feat(provider): expose Atomic Chat in /provider picker with autodetect (#810)
Adds Atomic Chat as a first-class preset inside the in-session /provider slash command, mirroring the Ollama auto-detect flow. Picking it probes 127.0.0.1:1337/v1/models, lists loaded models for direct selection, and falls back to "Enter manually" / "Back" when the server is unreachable or no models are loaded. README updated to reflect the new setup path. Made-with: Cursor
This commit is contained in:
@@ -33,6 +33,7 @@ export type ProviderPreset =
|
||||
| 'custom'
|
||||
| 'nvidia-nim'
|
||||
| 'minimax'
|
||||
| 'atomic-chat'
|
||||
|
||||
export type ProviderProfileInput = {
|
||||
provider?: ProviderProfile['provider']
|
||||
@@ -285,6 +286,15 @@ export function getProviderPresetDefaults(
|
||||
apiKey: process.env.MINIMAX_API_KEY ?? '',
|
||||
requiresApiKey: true,
|
||||
}
|
||||
case 'atomic-chat':
|
||||
return {
|
||||
provider: 'openai',
|
||||
name: 'Atomic Chat',
|
||||
baseUrl: 'http://127.0.0.1:1337/v1',
|
||||
model: process.env.OPENAI_MODEL ?? 'local-model',
|
||||
apiKey: '',
|
||||
requiresApiKey: false,
|
||||
}
|
||||
case 'ollama':
|
||||
default:
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user