From ee19159c17b3de3b4a8b4a4541a6569f4261d54e Mon Sep 17 00:00:00 2001 From: Mike <71440932+Vect0rM@users.noreply.github.com> Date: Wed, 22 Apr 2026 02:55:53 +0300 Subject: [PATCH] feat(provider): expose Atomic Chat in /provider picker with autodetect (#810) Adds Atomic Chat as a first-class preset inside the in-session /provider slash command, mirroring the Ollama auto-detect flow. Picking it probes 127.0.0.1:1337/v1/models, lists loaded models for direct selection, and falls back to "Enter manually" / "Back" when the server is unreachable or no models are loaded. README updated to reflect the new setup path. Made-with: Cursor --- README.md | 2 +- src/components/ProviderManager.test.tsx | 1 + src/components/ProviderManager.tsx | 163 ++++++++++++++++++++++++ src/utils/providerDiscovery.test.ts | 62 +++++++++ src/utils/providerDiscovery.ts | 18 +++ src/utils/providerProfiles.test.ts | 12 ++ src/utils/providerProfiles.ts | 10 ++ 7 files changed, 267 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 3fde048c..3360ab46 100644 --- a/README.md +++ b/README.md @@ -125,7 +125,7 @@ Advanced and source-build guides: | Codex OAuth | `/provider` | Opens ChatGPT sign-in in your browser and stores Codex credentials securely | | Codex | `/provider` | Uses existing Codex CLI auth, OpenClaude secure storage, or env credentials | | Ollama | `/provider`, env vars, or `ollama launch` | Local inference with no API key | -| Atomic Chat | advanced setup | Local Apple Silicon backend | +| Atomic Chat | `/provider`, env vars, or `bun run dev:atomic-chat` | Local Model Provider; auto-detects loaded models | | Bedrock / Vertex / Foundry | env vars | Additional provider integrations for supported environments | ## What Works diff --git a/src/components/ProviderManager.test.tsx b/src/components/ProviderManager.test.tsx index cfded054..44f75adc 100644 --- a/src/components/ProviderManager.test.tsx +++ b/src/components/ProviderManager.test.tsx @@ -108,6 +108,7 @@ const PRESET_ORDER = [ 'Alibaba Coding Plan', 'Alibaba Coding Plan (China)', 'Anthropic', + 'Atomic Chat', 'Azure OpenAI', 'Codex OAuth', 'DeepSeek', diff --git a/src/components/ProviderManager.tsx b/src/components/ProviderManager.tsx index 5252b09c..10b40b5d 100644 --- a/src/components/ProviderManager.tsx +++ b/src/components/ProviderManager.tsx @@ -37,7 +37,9 @@ import { readGithubModelsTokenAsync, } from '../utils/githubModelsCredentials.js' import { + probeAtomicChatReadiness, probeOllamaGenerationReadiness, + type AtomicChatReadiness, type OllamaGenerationReadiness, } from '../utils/providerDiscovery.js' import { @@ -69,6 +71,7 @@ type Screen = | 'menu' | 'select-preset' | 'select-ollama-model' + | 'select-atomic-chat-model' | 'codex-oauth' | 'form' | 'select-active' @@ -89,6 +92,16 @@ type OllamaSelectionState = } | { state: 'unavailable'; message: string } +type AtomicChatSelectionState = + | { state: 'idle' } + | { state: 'loading' } + | { + state: 'ready' + options: OptionWithDescription[] + defaultValue?: string + } + | { state: 'unavailable'; message: string } + const FORM_STEPS: Array<{ key: DraftField label: string @@ -222,6 +235,21 @@ function getGithubProviderSummary( return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}` } +function describeAtomicChatSelectionIssue( + readiness: AtomicChatReadiness, + baseUrl: string, +): string { + if (readiness.state === 'unreachable') { + return `Could not reach Atomic Chat at ${redactUrlForDisplay(baseUrl)}. Start the Atomic Chat app first, or enter the endpoint manually.` + } + + if (readiness.state === 'no_models') { + return 'Atomic Chat is running, but no models are loaded. Download and load a model inside the Atomic Chat app first, or enter details manually.' + } + + return '' +} + function describeOllamaSelectionIssue( readiness: OllamaGenerationReadiness, baseUrl: string, @@ -395,6 +423,8 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode { const [ollamaSelection, setOllamaSelection] = React.useState({ state: 'idle', }) + const [atomicChatSelection, setAtomicChatSelection] = + React.useState({ state: 'idle' }) // Deferred initialization: useState initializers run synchronously during // render, so getProviderProfiles() and getActiveProviderProfile() would block // the UI (sync file I/O). Defer to queueMicrotask after first render. @@ -583,6 +613,45 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode { } }, [draft.baseUrl, screen]) + React.useEffect(() => { + if (screen !== 'select-atomic-chat-model') { + return + } + + let cancelled = false + setAtomicChatSelection({ state: 'loading' }) + + void (async () => { + const readiness = await probeAtomicChatReadiness({ + baseUrl: draft.baseUrl, + }) + if (readiness.state !== 'ready') { + if (!cancelled) { + setAtomicChatSelection({ + state: 'unavailable', + message: describeAtomicChatSelectionIssue(readiness, draft.baseUrl), + }) + } + return + } + + if (!cancelled) { + setAtomicChatSelection({ + state: 'ready', + defaultValue: readiness.models[0], + options: readiness.models.map(model => ({ + label: model, + value: model, + })), + }) + } + })() + + return () => { + cancelled = true + } + }, [draft.baseUrl, screen]) + function refreshProfiles(): void { // Defer sync I/O to next microtask to prevent UI freeze. // getProviderProfiles() and getActiveProviderProfile() read config files @@ -889,6 +958,12 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode { return } + if (preset === 'atomic-chat') { + setAtomicChatSelection({ state: 'loading' }) + setScreen('select-atomic-chat-model') + return + } + setScreen('form') } @@ -964,6 +1039,86 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode { returnToMenu() } + function renderAtomicChatSelection(): React.ReactNode { + if ( + atomicChatSelection.state === 'loading' || + atomicChatSelection.state === 'idle' + ) { + return ( + + + Checking Atomic Chat + + Looking for loaded Atomic Chat models... + + ) + } + + if (atomicChatSelection.state === 'unavailable') { + return ( + + + Atomic Chat setup + + {atomicChatSelection.message} + { + const nextDraft = { + ...draft, + model: value, + } + setDraft(nextDraft) + persistDraft(nextDraft) + }} + onCancel={() => setScreen('select-preset')} + /> + + ) + } + function renderOllamaSelection(): React.ReactNode { if (ollamaSelection.state === 'loading' || ollamaSelection.state === 'idle') { return ( @@ -1114,6 +1269,11 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode { label: 'Anthropic', description: 'Native Claude API (x-api-key auth)', }, + { + value: 'atomic-chat', + label: 'Atomic Chat', + description: 'Local Model Provider', + }, { value: 'azure-openai', label: 'Azure OpenAI', @@ -1473,6 +1633,9 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode { case 'select-ollama-model': content = renderOllamaSelection() break + case 'select-atomic-chat-model': + content = renderAtomicChatSelection() + break case 'codex-oauth': content = ( { + const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule() + + const calledUrls: string[] = [] + globalThis.fetch = mock(input => { + const url = typeof input === 'string' ? input : input.url + calledUrls.push(url) + return Promise.resolve(new Response('unavailable', { status: 503 })) + }) as typeof globalThis.fetch + + await expect( + probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }), + ).resolves.toEqual({ state: 'unreachable' }) + + expect(calledUrls[0]).toBe('http://127.0.0.1:1337/v1/models') +}) + +test('atomic chat readiness reports no_models when server is reachable but empty', async () => { + const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule() + + globalThis.fetch = mock(() => + Promise.resolve( + new Response(JSON.stringify({ data: [] }), { + status: 200, + headers: { 'Content-Type': 'application/json' }, + }), + ), + ) as typeof globalThis.fetch + + await expect( + probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }), + ).resolves.toEqual({ state: 'no_models' }) +}) + +test('atomic chat readiness returns loaded model ids when ready', async () => { + const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule() + + globalThis.fetch = mock(() => + Promise.resolve( + new Response( + JSON.stringify({ + data: [ + { id: 'Qwen3_5-4B_Q4_K_M' }, + { id: 'llama-3.1-8b-instruct' }, + ], + }), + { + status: 200, + headers: { 'Content-Type': 'application/json' }, + }, + ), + ), + ) as typeof globalThis.fetch + + await expect( + probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }), + ).resolves.toEqual({ + state: 'ready', + models: ['Qwen3_5-4B_Q4_K_M', 'llama-3.1-8b-instruct'], + }) }) \ No newline at end of file diff --git a/src/utils/providerDiscovery.ts b/src/utils/providerDiscovery.ts index 7b6d309c..bd0e90c1 100644 --- a/src/utils/providerDiscovery.ts +++ b/src/utils/providerDiscovery.ts @@ -302,6 +302,24 @@ export async function listAtomicChatModels( } } +export type AtomicChatReadiness = + | { state: 'unreachable' } + | { state: 'no_models' } + | { state: 'ready'; models: string[] } + +export async function probeAtomicChatReadiness(options?: { + baseUrl?: string +}): Promise { + if (!(await hasLocalAtomicChat(options?.baseUrl))) { + return { state: 'unreachable' } + } + const models = await listAtomicChatModels(options?.baseUrl) + if (models.length === 0) { + return { state: 'no_models' } + } + return { state: 'ready', models } +} + export async function benchmarkOllamaModel( modelName: string, baseUrl?: string, diff --git a/src/utils/providerProfiles.test.ts b/src/utils/providerProfiles.test.ts index d222cadd..d27facf5 100644 --- a/src/utils/providerProfiles.test.ts +++ b/src/utils/providerProfiles.test.ts @@ -527,6 +527,18 @@ describe('getProviderPresetDefaults', () => { expect(defaults.baseUrl).toBe('http://localhost:11434/v1') expect(defaults.model).toBe('llama3.1:8b') }) + + test('atomic-chat preset defaults to a local Atomic Chat endpoint', async () => { + const { getProviderPresetDefaults } = await importFreshProviderProfileModules() + delete process.env.OPENAI_MODEL + + const defaults = getProviderPresetDefaults('atomic-chat') + + expect(defaults.provider).toBe('openai') + expect(defaults.name).toBe('Atomic Chat') + expect(defaults.baseUrl).toBe('http://127.0.0.1:1337/v1') + expect(defaults.requiresApiKey).toBe(false) + }) }) describe('setActiveProviderProfile', () => { diff --git a/src/utils/providerProfiles.ts b/src/utils/providerProfiles.ts index a44e3573..6da78329 100644 --- a/src/utils/providerProfiles.ts +++ b/src/utils/providerProfiles.ts @@ -33,6 +33,7 @@ export type ProviderPreset = | 'custom' | 'nvidia-nim' | 'minimax' + | 'atomic-chat' export type ProviderProfileInput = { provider?: ProviderProfile['provider'] @@ -285,6 +286,15 @@ export function getProviderPresetDefaults( apiKey: process.env.MINIMAX_API_KEY ?? '', requiresApiKey: true, } + case 'atomic-chat': + return { + provider: 'openai', + name: 'Atomic Chat', + baseUrl: 'http://127.0.0.1:1337/v1', + model: process.env.OPENAI_MODEL ?? 'local-model', + apiKey: '', + requiresApiKey: false, + } case 'ollama': default: return {