feat(provider): expose Atomic Chat in /provider picker with autodetect (#810)
Adds Atomic Chat as a first-class preset inside the in-session /provider slash command, mirroring the Ollama auto-detect flow. Picking it probes 127.0.0.1:1337/v1/models, lists loaded models for direct selection, and falls back to "Enter manually" / "Back" when the server is unreachable or no models are loaded. README updated to reflect the new setup path. Made-with: Cursor
This commit is contained in:
@@ -125,7 +125,7 @@ Advanced and source-build guides:
|
||||
| Codex OAuth | `/provider` | Opens ChatGPT sign-in in your browser and stores Codex credentials securely |
|
||||
| Codex | `/provider` | Uses existing Codex CLI auth, OpenClaude secure storage, or env credentials |
|
||||
| Ollama | `/provider`, env vars, or `ollama launch` | Local inference with no API key |
|
||||
| Atomic Chat | advanced setup | Local Apple Silicon backend |
|
||||
| Atomic Chat | `/provider`, env vars, or `bun run dev:atomic-chat` | Local Model Provider; auto-detects loaded models |
|
||||
| Bedrock / Vertex / Foundry | env vars | Additional provider integrations for supported environments |
|
||||
|
||||
## What Works
|
||||
|
||||
@@ -108,6 +108,7 @@ const PRESET_ORDER = [
|
||||
'Alibaba Coding Plan',
|
||||
'Alibaba Coding Plan (China)',
|
||||
'Anthropic',
|
||||
'Atomic Chat',
|
||||
'Azure OpenAI',
|
||||
'Codex OAuth',
|
||||
'DeepSeek',
|
||||
|
||||
@@ -37,7 +37,9 @@ import {
|
||||
readGithubModelsTokenAsync,
|
||||
} from '../utils/githubModelsCredentials.js'
|
||||
import {
|
||||
probeAtomicChatReadiness,
|
||||
probeOllamaGenerationReadiness,
|
||||
type AtomicChatReadiness,
|
||||
type OllamaGenerationReadiness,
|
||||
} from '../utils/providerDiscovery.js'
|
||||
import {
|
||||
@@ -69,6 +71,7 @@ type Screen =
|
||||
| 'menu'
|
||||
| 'select-preset'
|
||||
| 'select-ollama-model'
|
||||
| 'select-atomic-chat-model'
|
||||
| 'codex-oauth'
|
||||
| 'form'
|
||||
| 'select-active'
|
||||
@@ -89,6 +92,16 @@ type OllamaSelectionState =
|
||||
}
|
||||
| { state: 'unavailable'; message: string }
|
||||
|
||||
type AtomicChatSelectionState =
|
||||
| { state: 'idle' }
|
||||
| { state: 'loading' }
|
||||
| {
|
||||
state: 'ready'
|
||||
options: OptionWithDescription<string>[]
|
||||
defaultValue?: string
|
||||
}
|
||||
| { state: 'unavailable'; message: string }
|
||||
|
||||
const FORM_STEPS: Array<{
|
||||
key: DraftField
|
||||
label: string
|
||||
@@ -222,6 +235,21 @@ function getGithubProviderSummary(
|
||||
return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}`
|
||||
}
|
||||
|
||||
function describeAtomicChatSelectionIssue(
|
||||
readiness: AtomicChatReadiness,
|
||||
baseUrl: string,
|
||||
): string {
|
||||
if (readiness.state === 'unreachable') {
|
||||
return `Could not reach Atomic Chat at ${redactUrlForDisplay(baseUrl)}. Start the Atomic Chat app first, or enter the endpoint manually.`
|
||||
}
|
||||
|
||||
if (readiness.state === 'no_models') {
|
||||
return 'Atomic Chat is running, but no models are loaded. Download and load a model inside the Atomic Chat app first, or enter details manually.'
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
function describeOllamaSelectionIssue(
|
||||
readiness: OllamaGenerationReadiness,
|
||||
baseUrl: string,
|
||||
@@ -395,6 +423,8 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const [ollamaSelection, setOllamaSelection] = React.useState<OllamaSelectionState>({
|
||||
state: 'idle',
|
||||
})
|
||||
const [atomicChatSelection, setAtomicChatSelection] =
|
||||
React.useState<AtomicChatSelectionState>({ state: 'idle' })
|
||||
// Deferred initialization: useState initializers run synchronously during
|
||||
// render, so getProviderProfiles() and getActiveProviderProfile() would block
|
||||
// the UI (sync file I/O). Defer to queueMicrotask after first render.
|
||||
@@ -583,6 +613,45 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
}
|
||||
}, [draft.baseUrl, screen])
|
||||
|
||||
React.useEffect(() => {
|
||||
if (screen !== 'select-atomic-chat-model') {
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
setAtomicChatSelection({ state: 'loading' })
|
||||
|
||||
void (async () => {
|
||||
const readiness = await probeAtomicChatReadiness({
|
||||
baseUrl: draft.baseUrl,
|
||||
})
|
||||
if (readiness.state !== 'ready') {
|
||||
if (!cancelled) {
|
||||
setAtomicChatSelection({
|
||||
state: 'unavailable',
|
||||
message: describeAtomicChatSelectionIssue(readiness, draft.baseUrl),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (!cancelled) {
|
||||
setAtomicChatSelection({
|
||||
state: 'ready',
|
||||
defaultValue: readiness.models[0],
|
||||
options: readiness.models.map(model => ({
|
||||
label: model,
|
||||
value: model,
|
||||
})),
|
||||
})
|
||||
}
|
||||
})()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [draft.baseUrl, screen])
|
||||
|
||||
function refreshProfiles(): void {
|
||||
// Defer sync I/O to next microtask to prevent UI freeze.
|
||||
// getProviderProfiles() and getActiveProviderProfile() read config files
|
||||
@@ -889,6 +958,12 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return
|
||||
}
|
||||
|
||||
if (preset === 'atomic-chat') {
|
||||
setAtomicChatSelection({ state: 'loading' })
|
||||
setScreen('select-atomic-chat-model')
|
||||
return
|
||||
}
|
||||
|
||||
setScreen('form')
|
||||
}
|
||||
|
||||
@@ -964,6 +1039,86 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
returnToMenu()
|
||||
}
|
||||
|
||||
function renderAtomicChatSelection(): React.ReactNode {
|
||||
if (
|
||||
atomicChatSelection.state === 'loading' ||
|
||||
atomicChatSelection.state === 'idle'
|
||||
) {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Checking Atomic Chat
|
||||
</Text>
|
||||
<Text dimColor>Looking for loaded Atomic Chat models...</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
if (atomicChatSelection.state === 'unavailable') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Atomic Chat setup
|
||||
</Text>
|
||||
<Text dimColor>{atomicChatSelection.message}</Text>
|
||||
<Select
|
||||
options={[
|
||||
{
|
||||
value: 'manual',
|
||||
label: 'Enter manually',
|
||||
description: 'Fill in the base URL and model yourself',
|
||||
},
|
||||
{
|
||||
value: 'back',
|
||||
label: 'Back',
|
||||
description: 'Choose another provider preset',
|
||||
},
|
||||
]}
|
||||
onChange={(value: string) => {
|
||||
if (value === 'manual') {
|
||||
setFormStepIndex(0)
|
||||
setCursorOffset(draft.name.length)
|
||||
setScreen('form')
|
||||
return
|
||||
}
|
||||
setScreen('select-preset')
|
||||
}}
|
||||
onCancel={() => setScreen('select-preset')}
|
||||
visibleOptionCount={2}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Choose an Atomic Chat model
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
Pick one of the models loaded in Atomic Chat to save into a local
|
||||
provider profile.
|
||||
</Text>
|
||||
<Select
|
||||
options={atomicChatSelection.options}
|
||||
defaultValue={atomicChatSelection.defaultValue}
|
||||
defaultFocusValue={atomicChatSelection.defaultValue}
|
||||
inlineDescriptions
|
||||
visibleOptionCount={Math.min(8, atomicChatSelection.options.length)}
|
||||
onChange={(value: string) => {
|
||||
const nextDraft = {
|
||||
...draft,
|
||||
model: value,
|
||||
}
|
||||
setDraft(nextDraft)
|
||||
persistDraft(nextDraft)
|
||||
}}
|
||||
onCancel={() => setScreen('select-preset')}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function renderOllamaSelection(): React.ReactNode {
|
||||
if (ollamaSelection.state === 'loading' || ollamaSelection.state === 'idle') {
|
||||
return (
|
||||
@@ -1114,6 +1269,11 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
label: 'Anthropic',
|
||||
description: 'Native Claude API (x-api-key auth)',
|
||||
},
|
||||
{
|
||||
value: 'atomic-chat',
|
||||
label: 'Atomic Chat',
|
||||
description: 'Local Model Provider',
|
||||
},
|
||||
{
|
||||
value: 'azure-openai',
|
||||
label: 'Azure OpenAI',
|
||||
@@ -1473,6 +1633,9 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
case 'select-ollama-model':
|
||||
content = renderOllamaSelection()
|
||||
break
|
||||
case 'select-atomic-chat-model':
|
||||
content = renderAtomicChatSelection()
|
||||
break
|
||||
case 'codex-oauth':
|
||||
content = (
|
||||
<CodexOAuthSetup
|
||||
|
||||
@@ -298,4 +298,66 @@ test('ollama generation readiness reports ready when chat probe succeeds', async
|
||||
state: 'ready',
|
||||
probeModel: 'llama3.1:8b',
|
||||
})
|
||||
})
|
||||
|
||||
test('atomic chat readiness reports unreachable when /v1/models is down', async () => {
|
||||
const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
const calledUrls: string[] = []
|
||||
globalThis.fetch = mock(input => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
calledUrls.push(url)
|
||||
return Promise.resolve(new Response('unavailable', { status: 503 }))
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }),
|
||||
).resolves.toEqual({ state: 'unreachable' })
|
||||
|
||||
expect(calledUrls[0]).toBe('http://127.0.0.1:1337/v1/models')
|
||||
})
|
||||
|
||||
test('atomic chat readiness reports no_models when server is reachable but empty', async () => {
|
||||
const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(JSON.stringify({ data: [] }), {
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
}),
|
||||
),
|
||||
) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }),
|
||||
).resolves.toEqual({ state: 'no_models' })
|
||||
})
|
||||
|
||||
test('atomic chat readiness returns loaded model ids when ready', async () => {
|
||||
const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
data: [
|
||||
{ id: 'Qwen3_5-4B_Q4_K_M' },
|
||||
{ id: 'llama-3.1-8b-instruct' },
|
||||
],
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
},
|
||||
),
|
||||
),
|
||||
) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }),
|
||||
).resolves.toEqual({
|
||||
state: 'ready',
|
||||
models: ['Qwen3_5-4B_Q4_K_M', 'llama-3.1-8b-instruct'],
|
||||
})
|
||||
})
|
||||
@@ -302,6 +302,24 @@ export async function listAtomicChatModels(
|
||||
}
|
||||
}
|
||||
|
||||
export type AtomicChatReadiness =
|
||||
| { state: 'unreachable' }
|
||||
| { state: 'no_models' }
|
||||
| { state: 'ready'; models: string[] }
|
||||
|
||||
export async function probeAtomicChatReadiness(options?: {
|
||||
baseUrl?: string
|
||||
}): Promise<AtomicChatReadiness> {
|
||||
if (!(await hasLocalAtomicChat(options?.baseUrl))) {
|
||||
return { state: 'unreachable' }
|
||||
}
|
||||
const models = await listAtomicChatModels(options?.baseUrl)
|
||||
if (models.length === 0) {
|
||||
return { state: 'no_models' }
|
||||
}
|
||||
return { state: 'ready', models }
|
||||
}
|
||||
|
||||
export async function benchmarkOllamaModel(
|
||||
modelName: string,
|
||||
baseUrl?: string,
|
||||
|
||||
@@ -527,6 +527,18 @@ describe('getProviderPresetDefaults', () => {
|
||||
expect(defaults.baseUrl).toBe('http://localhost:11434/v1')
|
||||
expect(defaults.model).toBe('llama3.1:8b')
|
||||
})
|
||||
|
||||
test('atomic-chat preset defaults to a local Atomic Chat endpoint', async () => {
|
||||
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
const defaults = getProviderPresetDefaults('atomic-chat')
|
||||
|
||||
expect(defaults.provider).toBe('openai')
|
||||
expect(defaults.name).toBe('Atomic Chat')
|
||||
expect(defaults.baseUrl).toBe('http://127.0.0.1:1337/v1')
|
||||
expect(defaults.requiresApiKey).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('setActiveProviderProfile', () => {
|
||||
|
||||
@@ -33,6 +33,7 @@ export type ProviderPreset =
|
||||
| 'custom'
|
||||
| 'nvidia-nim'
|
||||
| 'minimax'
|
||||
| 'atomic-chat'
|
||||
|
||||
export type ProviderProfileInput = {
|
||||
provider?: ProviderProfile['provider']
|
||||
@@ -285,6 +286,15 @@ export function getProviderPresetDefaults(
|
||||
apiKey: process.env.MINIMAX_API_KEY ?? '',
|
||||
requiresApiKey: true,
|
||||
}
|
||||
case 'atomic-chat':
|
||||
return {
|
||||
provider: 'openai',
|
||||
name: 'Atomic Chat',
|
||||
baseUrl: 'http://127.0.0.1:1337/v1',
|
||||
model: process.env.OPENAI_MODEL ?? 'local-model',
|
||||
apiKey: '',
|
||||
requiresApiKey: false,
|
||||
}
|
||||
case 'ollama':
|
||||
default:
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user