feat: add support for Atomic Chat provider
- Introduced a new provider profile for Atomic Chat, allowing it to be used alongside existing providers. - Updated `package.json` to include a new development script for launching Atomic Chat. - Modified `smart_router.py` to recognize Atomic Chat as a local provider that does not require an API key. - Enhanced provider discovery and launch scripts to handle Atomic Chat, including model listing and connection checks. - Added tests to ensure proper environment setup and behavior for Atomic Chat profiles. This update expands the functionality of the application to support local LLMs via Atomic Chat, improving versatility for users.
This commit is contained in:
@@ -16,8 +16,11 @@ import {
|
||||
type ProviderProfile,
|
||||
} from '../src/utils/providerProfile.ts'
|
||||
import {
|
||||
getAtomicChatChatBaseUrl,
|
||||
getOllamaChatBaseUrl,
|
||||
hasLocalAtomicChat,
|
||||
hasLocalOllama,
|
||||
listAtomicChatModels,
|
||||
listOllamaModels,
|
||||
} from './provider-discovery.ts'
|
||||
|
||||
@@ -48,7 +51,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
|
||||
continue
|
||||
}
|
||||
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') {
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
requestedProfile = lower as ProviderProfile | 'auto'
|
||||
continue
|
||||
}
|
||||
@@ -79,7 +82,7 @@ function loadPersistedProfile(): ProfileFile | null {
|
||||
if (!existsSync(path)) return null
|
||||
try {
|
||||
const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile
|
||||
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') {
|
||||
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini' || parsed.profile === 'atomic-chat') {
|
||||
return parsed
|
||||
}
|
||||
return null
|
||||
@@ -96,6 +99,11 @@ async function resolveOllamaDefaultModel(
|
||||
return recommended?.name ?? null
|
||||
}
|
||||
|
||||
async function resolveAtomicChatDefaultModel(): Promise<string | null> {
|
||||
const models = await listAtomicChatModels()
|
||||
return models[0] ?? null
|
||||
}
|
||||
|
||||
function runCommand(command: string, env: NodeJS.ProcessEnv): Promise<number> {
|
||||
return runProcess(command, [], env)
|
||||
}
|
||||
@@ -132,6 +140,10 @@ function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`)
|
||||
} else if (profile === 'atomic-chat') {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
console.log('OPENAI_API_KEY_SET=false (local provider, no key required)')
|
||||
} else {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
@@ -143,7 +155,7 @@ async function main(): Promise<void> {
|
||||
const options = parseLaunchOptions(process.argv.slice(2))
|
||||
const requestedProfile = options.requestedProfile
|
||||
if (!requestedProfile) {
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
@@ -175,12 +187,30 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
let resolvedAtomicChatModel: string | null = null
|
||||
if (
|
||||
profile === 'atomic-chat' &&
|
||||
(persisted?.profile !== 'atomic-chat' || !persisted?.env?.OPENAI_MODEL)
|
||||
) {
|
||||
if (!(await hasLocalAtomicChat())) {
|
||||
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
|
||||
process.exit(1)
|
||||
}
|
||||
resolvedAtomicChatModel = await resolveAtomicChatDefaultModel()
|
||||
if (!resolvedAtomicChatModel) {
|
||||
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
const env = await buildLaunchEnv({
|
||||
profile,
|
||||
persisted,
|
||||
goal: options.goal,
|
||||
getOllamaChatBaseUrl,
|
||||
resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b',
|
||||
getAtomicChatChatBaseUrl,
|
||||
resolveAtomicChatDefaultModel: async () => resolvedAtomicChatModel,
|
||||
})
|
||||
if (options.fast) {
|
||||
applyFastFlags(env)
|
||||
|
||||
Reference in New Issue
Block a user