feat(provider): expose Atomic Chat in /provider picker with autodetect (#810)

Adds Atomic Chat as a first-class preset inside the in-session /provider
slash command, mirroring the Ollama auto-detect flow. Picking it probes
127.0.0.1:1337/v1/models, lists loaded models for direct selection, and
falls back to "Enter manually" / "Back" when the server is unreachable
or no models are loaded. README updated to reflect the new setup path.

Made-with: Cursor
This commit is contained in:
Mike
2026-04-22 02:55:53 +03:00
committed by GitHub
parent 13de4e85df
commit ee19159c17
7 changed files with 267 additions and 1 deletions

View File

@@ -33,6 +33,7 @@ export type ProviderPreset =
| 'custom'
| 'nvidia-nim'
| 'minimax'
| 'atomic-chat'
export type ProviderProfileInput = {
provider?: ProviderProfile['provider']
@@ -285,6 +286,15 @@ export function getProviderPresetDefaults(
apiKey: process.env.MINIMAX_API_KEY ?? '',
requiresApiKey: true,
}
case 'atomic-chat':
return {
provider: 'openai',
name: 'Atomic Chat',
baseUrl: 'http://127.0.0.1:1337/v1',
model: process.env.OPENAI_MODEL ?? 'local-model',
apiKey: '',
requiresApiKey: false,
}
case 'ollama':
default:
return {