feat(provider): expose Atomic Chat in /provider picker with autodetect (#810)
Adds Atomic Chat as a first-class preset inside the in-session /provider slash command, mirroring the Ollama auto-detect flow. Picking it probes 127.0.0.1:1337/v1/models, lists loaded models for direct selection, and falls back to "Enter manually" / "Back" when the server is unreachable or no models are loaded. README updated to reflect the new setup path. Made-with: Cursor
This commit is contained in:
@@ -298,4 +298,66 @@ test('ollama generation readiness reports ready when chat probe succeeds', async
|
||||
state: 'ready',
|
||||
probeModel: 'llama3.1:8b',
|
||||
})
|
||||
})
|
||||
|
||||
test('atomic chat readiness reports unreachable when /v1/models is down', async () => {
|
||||
const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
const calledUrls: string[] = []
|
||||
globalThis.fetch = mock(input => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
calledUrls.push(url)
|
||||
return Promise.resolve(new Response('unavailable', { status: 503 }))
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }),
|
||||
).resolves.toEqual({ state: 'unreachable' })
|
||||
|
||||
expect(calledUrls[0]).toBe('http://127.0.0.1:1337/v1/models')
|
||||
})
|
||||
|
||||
test('atomic chat readiness reports no_models when server is reachable but empty', async () => {
|
||||
const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(JSON.stringify({ data: [] }), {
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
}),
|
||||
),
|
||||
) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }),
|
||||
).resolves.toEqual({ state: 'no_models' })
|
||||
})
|
||||
|
||||
test('atomic chat readiness returns loaded model ids when ready', async () => {
|
||||
const { probeAtomicChatReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
data: [
|
||||
{ id: 'Qwen3_5-4B_Q4_K_M' },
|
||||
{ id: 'llama-3.1-8b-instruct' },
|
||||
],
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
},
|
||||
),
|
||||
),
|
||||
) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeAtomicChatReadiness({ baseUrl: 'http://127.0.0.1:1337' }),
|
||||
).resolves.toEqual({
|
||||
state: 'ready',
|
||||
models: ['Qwen3_5-4B_Q4_K_M', 'llama-3.1-8b-instruct'],
|
||||
})
|
||||
})
|
||||
@@ -302,6 +302,24 @@ export async function listAtomicChatModels(
|
||||
}
|
||||
}
|
||||
|
||||
export type AtomicChatReadiness =
|
||||
| { state: 'unreachable' }
|
||||
| { state: 'no_models' }
|
||||
| { state: 'ready'; models: string[] }
|
||||
|
||||
export async function probeAtomicChatReadiness(options?: {
|
||||
baseUrl?: string
|
||||
}): Promise<AtomicChatReadiness> {
|
||||
if (!(await hasLocalAtomicChat(options?.baseUrl))) {
|
||||
return { state: 'unreachable' }
|
||||
}
|
||||
const models = await listAtomicChatModels(options?.baseUrl)
|
||||
if (models.length === 0) {
|
||||
return { state: 'no_models' }
|
||||
}
|
||||
return { state: 'ready', models }
|
||||
}
|
||||
|
||||
export async function benchmarkOllamaModel(
|
||||
modelName: string,
|
||||
baseUrl?: string,
|
||||
|
||||
@@ -527,6 +527,18 @@ describe('getProviderPresetDefaults', () => {
|
||||
expect(defaults.baseUrl).toBe('http://localhost:11434/v1')
|
||||
expect(defaults.model).toBe('llama3.1:8b')
|
||||
})
|
||||
|
||||
test('atomic-chat preset defaults to a local Atomic Chat endpoint', async () => {
|
||||
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
const defaults = getProviderPresetDefaults('atomic-chat')
|
||||
|
||||
expect(defaults.provider).toBe('openai')
|
||||
expect(defaults.name).toBe('Atomic Chat')
|
||||
expect(defaults.baseUrl).toBe('http://127.0.0.1:1337/v1')
|
||||
expect(defaults.requiresApiKey).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('setActiveProviderProfile', () => {
|
||||
|
||||
@@ -33,6 +33,7 @@ export type ProviderPreset =
|
||||
| 'custom'
|
||||
| 'nvidia-nim'
|
||||
| 'minimax'
|
||||
| 'atomic-chat'
|
||||
|
||||
export type ProviderProfileInput = {
|
||||
provider?: ProviderProfile['provider']
|
||||
@@ -285,6 +286,15 @@ export function getProviderPresetDefaults(
|
||||
apiKey: process.env.MINIMAX_API_KEY ?? '',
|
||||
requiresApiKey: true,
|
||||
}
|
||||
case 'atomic-chat':
|
||||
return {
|
||||
provider: 'openai',
|
||||
name: 'Atomic Chat',
|
||||
baseUrl: 'http://127.0.0.1:1337/v1',
|
||||
model: process.env.OPENAI_MODEL ?? 'local-model',
|
||||
apiKey: '',
|
||||
requiresApiKey: false,
|
||||
}
|
||||
case 'ollama':
|
||||
default:
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user