feat: add support for Atomic Chat provider

- Introduced a new provider profile for Atomic Chat, allowing it to be used alongside existing providers.
- Updated `package.json` to include a new development script for launching Atomic Chat.
- Modified `smart_router.py` to recognize Atomic Chat as a local provider that does not require an API key.
- Enhanced provider discovery and launch scripts to handle Atomic Chat, including model listing and connection checks.
- Added tests to ensure proper environment setup and behavior for Atomic Chat profiles.

This update expands the functionality of the application to support local LLMs via Atomic Chat, improving versatility for users.
This commit is contained in:
Misha Skvortsov
2026-04-01 21:42:43 +03:00
parent b204ae722f
commit 577e654ae7
9 changed files with 503 additions and 7 deletions

View File

@@ -1,6 +1,7 @@
import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts'
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
function withTimeoutSignal(timeoutMs: number): {
signal: AbortSignal
@@ -93,6 +94,61 @@ export async function listOllamaModels(
}
}
// ── Atomic Chat discovery (Apple Silicon local LLMs at 127.0.0.1:1337) ──────
export function getAtomicChatApiBaseUrl(baseUrl?: string): string {
const raw = baseUrl || process.env.ATOMIC_CHAT_BASE_URL || DEFAULT_ATOMIC_CHAT_BASE_URL
return trimTrailingSlash(raw)
}
export function getAtomicChatChatBaseUrl(baseUrl?: string): string {
return `${getAtomicChatApiBaseUrl(baseUrl)}/v1`
}
export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> {
const { signal, clear } = withTimeoutSignal(1200)
try {
const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, {
method: 'GET',
signal,
})
return response.ok
} catch {
return false
} finally {
clear()
}
}
export async function listAtomicChatModels(
baseUrl?: string,
): Promise<string[]> {
const { signal, clear } = withTimeoutSignal(5000)
try {
const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, {
method: 'GET',
signal,
})
if (!response.ok) {
return []
}
const data = await response.json() as {
data?: Array<{ id?: string }>
}
return (data.data ?? [])
.filter(model => Boolean(model.id))
.map(model => model.id!)
} catch {
return []
} finally {
clear()
}
}
// ── Ollama benchmarking ─────────────────────────────────────────────────────
export async function benchmarkOllamaModel(
modelName: string,
baseUrl?: string,