feat: add support for Atomic Chat provider
- Introduced a new provider profile for Atomic Chat, allowing it to be used alongside existing providers. - Updated `package.json` to include a new development script for launching Atomic Chat. - Modified `smart_router.py` to recognize Atomic Chat as a local provider that does not require an API key. - Enhanced provider discovery and launch scripts to handle Atomic Chat, including model listing and connection checks. - Added tests to ensure proper environment setup and behavior for Atomic Chat profiles. This update expands the functionality of the application to support local LLMs via Atomic Chat, improving versatility for users.
This commit is contained in:
@@ -10,6 +10,7 @@ import {
|
||||
recommendOllamaModel,
|
||||
} from '../src/utils/providerRecommendation.ts'
|
||||
import {
|
||||
buildAtomicChatProfileEnv,
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
@@ -20,8 +21,11 @@ import {
|
||||
type ProviderProfile,
|
||||
} from '../src/utils/providerProfile.ts'
|
||||
import {
|
||||
getAtomicChatChatBaseUrl,
|
||||
getOllamaChatBaseUrl,
|
||||
hasLocalAtomicChat,
|
||||
hasLocalOllama,
|
||||
listAtomicChatModels,
|
||||
listOllamaModels,
|
||||
} from './provider-discovery.ts'
|
||||
|
||||
@@ -34,7 +38,7 @@ function parseArg(name: string): string | null {
|
||||
|
||||
function parseProviderArg(): ProviderProfile | 'auto' {
|
||||
const p = parseArg('--provider')?.toLowerCase()
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini') return p
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p
|
||||
return 'auto'
|
||||
}
|
||||
|
||||
@@ -102,6 +106,21 @@ async function main(): Promise<void> {
|
||||
getOllamaChatBaseUrl,
|
||||
},
|
||||
)
|
||||
} else if (selected === 'atomic-chat') {
|
||||
const model = argModel || (await listAtomicChatModels(argBaseUrl || undefined))[0]
|
||||
if (!model) {
|
||||
if (!(await hasLocalAtomicChat(argBaseUrl || undefined))) {
|
||||
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
|
||||
} else {
|
||||
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
|
||||
}
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = buildAtomicChatProfileEnv(model, {
|
||||
baseUrl: argBaseUrl,
|
||||
getAtomicChatChatBaseUrl,
|
||||
})
|
||||
} else if (selected === 'codex') {
|
||||
const builtEnv = buildCodexProfileEnv({
|
||||
model: argModel,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts'
|
||||
|
||||
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
|
||||
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
|
||||
|
||||
function withTimeoutSignal(timeoutMs: number): {
|
||||
signal: AbortSignal
|
||||
@@ -93,6 +94,61 @@ export async function listOllamaModels(
|
||||
}
|
||||
}
|
||||
|
||||
// ── Atomic Chat discovery (Apple Silicon local LLMs at 127.0.0.1:1337) ──────
|
||||
|
||||
export function getAtomicChatApiBaseUrl(baseUrl?: string): string {
|
||||
const raw = baseUrl || process.env.ATOMIC_CHAT_BASE_URL || DEFAULT_ATOMIC_CHAT_BASE_URL
|
||||
return trimTrailingSlash(raw)
|
||||
}
|
||||
|
||||
export function getAtomicChatChatBaseUrl(baseUrl?: string): string {
|
||||
return `${getAtomicChatApiBaseUrl(baseUrl)}/v1`
|
||||
}
|
||||
|
||||
export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> {
|
||||
const { signal, clear } = withTimeoutSignal(1200)
|
||||
try {
|
||||
const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
return response.ok
|
||||
} catch {
|
||||
return false
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
export async function listAtomicChatModels(
|
||||
baseUrl?: string,
|
||||
): Promise<string[]> {
|
||||
const { signal, clear } = withTimeoutSignal(5000)
|
||||
try {
|
||||
const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
if (!response.ok) {
|
||||
return []
|
||||
}
|
||||
|
||||
const data = await response.json() as {
|
||||
data?: Array<{ id?: string }>
|
||||
}
|
||||
|
||||
return (data.data ?? [])
|
||||
.filter(model => Boolean(model.id))
|
||||
.map(model => model.id!)
|
||||
} catch {
|
||||
return []
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Ollama benchmarking ─────────────────────────────────────────────────────
|
||||
|
||||
export async function benchmarkOllamaModel(
|
||||
modelName: string,
|
||||
baseUrl?: string,
|
||||
|
||||
@@ -16,8 +16,11 @@ import {
|
||||
type ProviderProfile,
|
||||
} from '../src/utils/providerProfile.ts'
|
||||
import {
|
||||
getAtomicChatChatBaseUrl,
|
||||
getOllamaChatBaseUrl,
|
||||
hasLocalAtomicChat,
|
||||
hasLocalOllama,
|
||||
listAtomicChatModels,
|
||||
listOllamaModels,
|
||||
} from './provider-discovery.ts'
|
||||
|
||||
@@ -48,7 +51,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
|
||||
continue
|
||||
}
|
||||
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') {
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
requestedProfile = lower as ProviderProfile | 'auto'
|
||||
continue
|
||||
}
|
||||
@@ -79,7 +82,7 @@ function loadPersistedProfile(): ProfileFile | null {
|
||||
if (!existsSync(path)) return null
|
||||
try {
|
||||
const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile
|
||||
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') {
|
||||
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini' || parsed.profile === 'atomic-chat') {
|
||||
return parsed
|
||||
}
|
||||
return null
|
||||
@@ -96,6 +99,11 @@ async function resolveOllamaDefaultModel(
|
||||
return recommended?.name ?? null
|
||||
}
|
||||
|
||||
async function resolveAtomicChatDefaultModel(): Promise<string | null> {
|
||||
const models = await listAtomicChatModels()
|
||||
return models[0] ?? null
|
||||
}
|
||||
|
||||
function runCommand(command: string, env: NodeJS.ProcessEnv): Promise<number> {
|
||||
return runProcess(command, [], env)
|
||||
}
|
||||
@@ -132,6 +140,10 @@ function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`)
|
||||
} else if (profile === 'atomic-chat') {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
console.log('OPENAI_API_KEY_SET=false (local provider, no key required)')
|
||||
} else {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
@@ -143,7 +155,7 @@ async function main(): Promise<void> {
|
||||
const options = parseLaunchOptions(process.argv.slice(2))
|
||||
const requestedProfile = options.requestedProfile
|
||||
if (!requestedProfile) {
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
@@ -175,12 +187,30 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
let resolvedAtomicChatModel: string | null = null
|
||||
if (
|
||||
profile === 'atomic-chat' &&
|
||||
(persisted?.profile !== 'atomic-chat' || !persisted?.env?.OPENAI_MODEL)
|
||||
) {
|
||||
if (!(await hasLocalAtomicChat())) {
|
||||
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
|
||||
process.exit(1)
|
||||
}
|
||||
resolvedAtomicChatModel = await resolveAtomicChatDefaultModel()
|
||||
if (!resolvedAtomicChatModel) {
|
||||
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
const env = await buildLaunchEnv({
|
||||
profile,
|
||||
persisted,
|
||||
goal: options.goal,
|
||||
getOllamaChatBaseUrl,
|
||||
resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b',
|
||||
getAtomicChatChatBaseUrl,
|
||||
resolveAtomicChatDefaultModel: async () => resolvedAtomicChatModel,
|
||||
})
|
||||
if (options.fast) {
|
||||
applyFastFlags(env)
|
||||
|
||||
Reference in New Issue
Block a user