feat(provider): expose Atomic Chat in /provider picker with autodetect (#810)

Adds Atomic Chat as a first-class preset inside the in-session /provider
slash command, mirroring the Ollama auto-detect flow. Picking it probes
127.0.0.1:1337/v1/models, lists loaded models for direct selection, and
falls back to "Enter manually" / "Back" when the server is unreachable
or no models are loaded. README updated to reflect the new setup path.

Made-with: Cursor
This commit is contained in:
Mike
2026-04-22 02:55:53 +03:00
committed by GitHub
parent 13de4e85df
commit ee19159c17
7 changed files with 267 additions and 1 deletions

View File

@@ -527,6 +527,18 @@ describe('getProviderPresetDefaults', () => {
expect(defaults.baseUrl).toBe('http://localhost:11434/v1')
expect(defaults.model).toBe('llama3.1:8b')
})
test('atomic-chat preset defaults to a local Atomic Chat endpoint', async () => {
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
delete process.env.OPENAI_MODEL
const defaults = getProviderPresetDefaults('atomic-chat')
expect(defaults.provider).toBe('openai')
expect(defaults.name).toBe('Atomic Chat')
expect(defaults.baseUrl).toBe('http://127.0.0.1:1337/v1')
expect(defaults.requiresApiKey).toBe(false)
})
})
describe('setActiveProviderProfile', () => {