Merge pull request #74 from Vect0rM/feature/atomic-chat-integration

feat: add support for Atomic Chat provider
This commit is contained in:
Kevin Codex
2026-04-02 20:13:37 +08:00
committed by GitHub
11 changed files with 552 additions and 11 deletions

View File

@@ -10,6 +10,7 @@ import {
recommendOllamaModel,
} from '../src/utils/providerRecommendation.ts'
import {
buildAtomicChatProfileEnv,
buildCodexProfileEnv,
buildGeminiProfileEnv,
buildOllamaProfileEnv,
@@ -20,8 +21,11 @@ import {
type ProviderProfile,
} from '../src/utils/providerProfile.ts'
import {
getAtomicChatChatBaseUrl,
getOllamaChatBaseUrl,
hasLocalAtomicChat,
hasLocalOllama,
listAtomicChatModels,
listOllamaModels,
} from './provider-discovery.ts'
@@ -34,7 +38,7 @@ function parseArg(name: string): string | null {
function parseProviderArg(): ProviderProfile | 'auto' {
const p = parseArg('--provider')?.toLowerCase()
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini') return p
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p
return 'auto'
}
@@ -102,6 +106,21 @@ async function main(): Promise<void> {
getOllamaChatBaseUrl,
},
)
} else if (selected === 'atomic-chat') {
const model = argModel || (await listAtomicChatModels(argBaseUrl || undefined))[0]
if (!model) {
if (!(await hasLocalAtomicChat(argBaseUrl || undefined))) {
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
} else {
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
}
process.exit(1)
}
env = buildAtomicChatProfileEnv(model, {
baseUrl: argBaseUrl,
getAtomicChatChatBaseUrl,
})
} else if (selected === 'codex') {
const builtEnv = buildCodexProfileEnv({
model: argModel,

View File

@@ -1,6 +1,7 @@
import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts'
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
function withTimeoutSignal(timeoutMs: number): {
signal: AbortSignal
@@ -93,6 +94,69 @@ export async function listOllamaModels(
}
}
// ── Atomic Chat discovery (Apple Silicon local LLMs at 127.0.0.1:1337) ──────
export function getAtomicChatApiBaseUrl(baseUrl?: string): string {
const parsed = new URL(
baseUrl || process.env.ATOMIC_CHAT_BASE_URL || DEFAULT_ATOMIC_CHAT_BASE_URL,
)
const pathname = trimTrailingSlash(parsed.pathname)
parsed.pathname = pathname.endsWith('/v1')
? pathname.slice(0, -3) || '/'
: pathname || '/'
parsed.search = ''
parsed.hash = ''
return trimTrailingSlash(parsed.toString())
}
export function getAtomicChatChatBaseUrl(baseUrl?: string): string {
return `${getAtomicChatApiBaseUrl(baseUrl)}/v1`
}
export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> {
const { signal, clear } = withTimeoutSignal(1200)
try {
const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, {
method: 'GET',
signal,
})
return response.ok
} catch {
return false
} finally {
clear()
}
}
export async function listAtomicChatModels(
baseUrl?: string,
): Promise<string[]> {
const { signal, clear } = withTimeoutSignal(5000)
try {
const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, {
method: 'GET',
signal,
})
if (!response.ok) {
return []
}
const data = await response.json() as {
data?: Array<{ id?: string }>
}
return (data.data ?? [])
.filter(model => Boolean(model.id))
.map(model => model.id!)
} catch {
return []
} finally {
clear()
}
}
// ── Ollama benchmarking ─────────────────────────────────────────────────────
export async function benchmarkOllamaModel(
modelName: string,
baseUrl?: string,

View File

@@ -16,8 +16,11 @@ import {
type ProviderProfile,
} from '../src/utils/providerProfile.ts'
import {
getAtomicChatChatBaseUrl,
getOllamaChatBaseUrl,
hasLocalAtomicChat,
hasLocalOllama,
listAtomicChatModels,
listOllamaModels,
} from './provider-discovery.ts'
@@ -48,7 +51,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
continue
}
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') {
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') {
requestedProfile = lower as ProviderProfile | 'auto'
continue
}
@@ -79,7 +82,7 @@ function loadPersistedProfile(): ProfileFile | null {
if (!existsSync(path)) return null
try {
const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') {
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini' || parsed.profile === 'atomic-chat') {
return parsed
}
return null
@@ -96,6 +99,11 @@ async function resolveOllamaDefaultModel(
return recommended?.name ?? null
}
async function resolveAtomicChatDefaultModel(): Promise<string | null> {
const models = await listAtomicChatModels()
return models[0] ?? null
}
function runCommand(command: string, env: NodeJS.ProcessEnv): Promise<number> {
return runProcess(command, [], env)
}
@@ -132,6 +140,10 @@ function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`)
} else if (profile === 'atomic-chat') {
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
console.log('OPENAI_API_KEY_SET=false (local provider, no key required)')
} else {
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
@@ -143,7 +155,7 @@ async function main(): Promise<void> {
const options = parseLaunchOptions(process.argv.slice(2))
const requestedProfile = options.requestedProfile
if (!requestedProfile) {
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
process.exit(1)
}
@@ -175,12 +187,30 @@ async function main(): Promise<void> {
}
}
let resolvedAtomicChatModel: string | null = null
if (
profile === 'atomic-chat' &&
(persisted?.profile !== 'atomic-chat' || !persisted?.env?.OPENAI_MODEL)
) {
if (!(await hasLocalAtomicChat())) {
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
process.exit(1)
}
resolvedAtomicChatModel = await resolveAtomicChatDefaultModel()
if (!resolvedAtomicChatModel) {
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
process.exit(1)
}
}
const env = await buildLaunchEnv({
profile,
persisted,
goal: options.goal,
getOllamaChatBaseUrl,
resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b',
getAtomicChatChatBaseUrl,
resolveAtomicChatDefaultModel: async () => resolvedAtomicChatModel,
})
if (options.fast) {
applyFastFlags(env)

View File

@@ -186,7 +186,7 @@ function checkOpenAIEnv(): CheckResult[] {
} else if (!key && !isLocalBaseUrl(request.baseUrl)) {
results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.'))
} else if (!key) {
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).'))
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Atomic Chat/Ollama/LM Studio).'))
} else {
results.push(pass('OPENAI_API_KEY', 'Configured.'))
}
@@ -271,6 +271,15 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
}
}
function isAtomicChatUrl(baseUrl: string): boolean {
try {
const parsed = new URL(baseUrl)
return parsed.port === '1337' && isLocalBaseUrl(baseUrl)
} catch {
return false
}
}
function checkOllamaProcessorMode(): CheckResult {
if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).')
@@ -281,6 +290,10 @@ function checkOllamaProcessorMode(): CheckResult {
return pass('Ollama processor mode', 'Skipped (provider URL is not local).')
}
if (isAtomicChatUrl(baseUrl)) {
return pass('Ollama processor mode', 'Skipped (Atomic Chat local provider detected, not Ollama).')
}
const result = spawnSync('ollama', ['ps'], {
cwd: process.cwd(),
encoding: 'utf8',