diff --git a/src/commands/model/model.tsx b/src/commands/model/model.tsx index 97a833fd..77c3b489 100644 --- a/src/commands/model/model.tsx +++ b/src/commands/model/model.tsx @@ -12,6 +12,10 @@ import { isBilledAsExtraUsage } from '../../utils/extraUsage.js'; import { clearFastModeCooldown, isFastModeAvailable, isFastModeEnabled, isFastModeSupportedByModel } from '../../utils/fastMode.js'; import { MODEL_ALIASES } from '../../utils/model/aliases.js'; import { checkOpus1mAccess, checkSonnet1mAccess } from '../../utils/model/check1mAccess.js'; +import type { ModelOption } from '../../utils/model/modelOptions.js'; +import { discoverOpenAICompatibleModelOptions } from '../../utils/model/openaiModelDiscovery.js'; +import { getAPIProvider } from '../../utils/model/providers.js'; +import { getActiveOpenAIModelOptionsCache, setActiveOpenAIModelOptionsCache } from '../../utils/providerProfiles.js'; import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js'; import { isModelAllowed } from '../../utils/model/modelAllowlist.js'; import { validateModel } from '../../utils/model/validateModel.js'; @@ -268,6 +272,33 @@ function _temp8(s_0) { function _temp7(s) { return s.mainLoopModel; } +function haveSameModelOptions(left: ModelOption[], right: ModelOption[]): boolean { + if (left.length !== right.length) { + return false; + } + return left.every((option, index) => { + const other = right[index]; + return other !== undefined && option.value === other.value && option.label === other.label && option.description === other.description && option.descriptionForModel === other.descriptionForModel; + }); +} +async function refreshOpenAIModelOptionsCache(): Promise { + if (getAPIProvider() !== 'openai') { + return; + } + try { + const discoveredOptions = await discoverOpenAICompatibleModelOptions(); + if (discoveredOptions.length === 0) { + return; + } + const currentOptions = getActiveOpenAIModelOptionsCache(); + if (haveSameModelOptions(currentOptions, discoveredOptions)) { + return; + } + setActiveOpenAIModelOptionsCache(discoveredOptions); + } catch { + // Keep /model usable even if endpoint discovery fails. + } +} export const call: LocalJSXCommandCall = async (onDone, _context, args) => { args = args?.trim() || ''; if (COMMON_INFO_ARGS.includes(args)) { @@ -288,6 +319,7 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => { }); return ; } + await refreshOpenAIModelOptionsCache(); return ; }; function renderModelLabel(model: string | null): string { diff --git a/src/commands/provider/index.ts b/src/commands/provider/index.ts index 9cd14daa..2ee87a17 100644 --- a/src/commands/provider/index.ts +++ b/src/commands/provider/index.ts @@ -1,12 +1,10 @@ import type { Command } from '../../commands.js' -import { shouldInferenceConfigCommandBeImmediate } from '../../utils/immediateCommand.js' -export default { +const provider = { type: 'local-jsx', name: 'provider', - description: 'Set up and save a third-party provider profile for OpenClaude', - get immediate() { - return shouldInferenceConfigCommandBeImmediate() - }, + description: 'Manage API provider profiles', load: () => import('./provider.js'), } satisfies Command + +export default provider diff --git a/src/commands/provider/provider.tsx b/src/commands/provider/provider.tsx index 6bf8a06c..43361997 100644 --- a/src/commands/provider/provider.tsx +++ b/src/commands/provider/provider.tsx @@ -2,6 +2,7 @@ import * as React from 'react' import type { LocalJSXCommandCall, LocalJSXCommandOnDone } from '../../types/command.js' import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js' +import { ProviderManager } from '../../components/ProviderManager.js' import TextInput from '../../components/TextInput.js' import { Select, @@ -1289,22 +1290,34 @@ export function ProviderWizard({ } export const call: LocalJSXCommandCall = async (onDone, _context, args) => { - const normalizedArgs = args?.trim().toLowerCase() || '' + const trimmedArgs = args?.trim().toLowerCase() ?? '' - if (COMMON_INFO_ARGS.includes(normalizedArgs)) { - onDone(buildUsageText(), { display: 'system' }) - return null + if ( + COMMON_HELP_ARGS.includes(trimmedArgs) || + COMMON_INFO_ARGS.includes(trimmedArgs) || + trimmedArgs === 'help' || + trimmedArgs === '--help' || + trimmedArgs === '-h' + ) { + onDone( + 'Run /provider to add, edit, delete, or activate provider profiles. The active provider controls base URL, model, and API key.', + { display: 'system' }, + ) + return } - if (COMMON_HELP_ARGS.includes(normalizedArgs)) { - onDone(buildUsageText(), { display: 'system' }) - return null - } + return ( + { + const message = + result?.message ?? + (result?.action === 'saved' + ? 'Provider profile updated' + : 'Provider manager closed') - if (normalizedArgs) { - onDone('Usage: /provider', { display: 'system' }) - return null - } - - return + onDone(message, { display: 'system' }) + }} + /> + ) } diff --git a/src/components/ProviderManager.tsx b/src/components/ProviderManager.tsx new file mode 100644 index 00000000..5aa80241 --- /dev/null +++ b/src/components/ProviderManager.tsx @@ -0,0 +1,613 @@ +import figures from 'figures' +import * as React from 'react' +import { Box, Text } from '../ink.js' +import { useKeybinding } from '../keybindings/useKeybinding.js' +import type { ProviderProfile } from '../utils/config.js' +import { + addProviderProfile, + deleteProviderProfile, + getActiveProviderProfile, + getProviderPresetDefaults, + getProviderProfiles, + setActiveProviderProfile, + type ProviderPreset, + type ProviderProfileInput, + updateProviderProfile, +} from '../utils/providerProfiles.js' +import { Select } from './CustomSelect/index.js' +import { Pane } from './design-system/Pane.js' +import TextInput from './TextInput.js' + +export type ProviderManagerResult = { + action: 'saved' | 'cancelled' + activeProfileId?: string + message?: string +} + +type Props = { + mode: 'first-run' | 'manage' + onDone: (result?: ProviderManagerResult) => void +} + +type Screen = + | 'menu' + | 'select-preset' + | 'form' + | 'select-active' + | 'select-edit' + | 'select-delete' + +type DraftField = 'name' | 'baseUrl' | 'model' | 'apiKey' + +type ProviderDraft = Record + +const FORM_STEPS: Array<{ + key: DraftField + label: string + placeholder: string + helpText: string + optional?: boolean +}> = [ + { + key: 'name', + label: 'Provider name', + placeholder: 'e.g. Ollama Home, OpenAI Work', + helpText: 'A short label shown in /provider and startup setup.', + }, + { + key: 'baseUrl', + label: 'Base URL', + placeholder: 'e.g. http://localhost:11434/v1', + helpText: 'API base URL used for this provider profile.', + }, + { + key: 'model', + label: 'Default model', + placeholder: 'e.g. llama3.1:8b', + helpText: 'Model name to use when this provider is active.', + }, + { + key: 'apiKey', + label: 'API key', + placeholder: 'Leave empty if your provider does not require one', + helpText: 'Optional. Press Enter with empty value to skip.', + optional: true, + }, +] + +function toDraft(profile: ProviderProfile): ProviderDraft { + return { + name: profile.name, + baseUrl: profile.baseUrl, + model: profile.model, + apiKey: profile.apiKey ?? '', + } +} + +function presetToDraft(preset: ProviderPreset): ProviderDraft { + const defaults = getProviderPresetDefaults(preset) + return { + name: defaults.name, + baseUrl: defaults.baseUrl, + model: defaults.model, + apiKey: defaults.apiKey ?? '', + } +} + +function profileSummary(profile: ProviderProfile, isActive: boolean): string { + const activeSuffix = isActive ? ' (active)' : '' + const keyInfo = profile.apiKey ? 'key set' : 'no key' + const providerKind = + profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible' + return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}` +} + +export function ProviderManager({ mode, onDone }: Props): React.ReactNode { + const [profiles, setProfiles] = React.useState(() => getProviderProfiles()) + const [activeProfileId, setActiveProfileId] = React.useState( + () => getActiveProviderProfile()?.id, + ) + const [screen, setScreen] = React.useState( + mode === 'first-run' ? 'select-preset' : 'menu', + ) + const [editingProfileId, setEditingProfileId] = React.useState(null) + const [draftProvider, setDraftProvider] = React.useState( + 'openai', + ) + const [draft, setDraft] = React.useState(() => + presetToDraft('ollama'), + ) + const [formStepIndex, setFormStepIndex] = React.useState(0) + const [cursorOffset, setCursorOffset] = React.useState(0) + const [statusMessage, setStatusMessage] = React.useState() + const [errorMessage, setErrorMessage] = React.useState() + + const currentStep = FORM_STEPS[formStepIndex] ?? FORM_STEPS[0] + const currentStepKey = currentStep.key + const currentValue = draft[currentStepKey] + + function refreshProfiles(): void { + const nextProfiles = getProviderProfiles() + setProfiles(nextProfiles) + setActiveProfileId(getActiveProviderProfile()?.id) + } + + function closeWithCancelled(message: string): void { + onDone({ action: 'cancelled', message }) + } + + function startCreateFromPreset(preset: ProviderPreset): void { + const defaults = getProviderPresetDefaults(preset) + const nextDraft = { + name: defaults.name, + baseUrl: defaults.baseUrl, + model: defaults.model, + apiKey: defaults.apiKey ?? '', + } + setEditingProfileId(null) + setDraftProvider(defaults.provider ?? 'openai') + setDraft(nextDraft) + setFormStepIndex(0) + setCursorOffset(nextDraft.name.length) + setErrorMessage(undefined) + setScreen('form') + } + + function startEditProfile(profileId: string): void { + const existing = profiles.find(profile => profile.id === profileId) + if (!existing) { + return + } + + const nextDraft = toDraft(existing) + setEditingProfileId(profileId) + setDraftProvider(existing.provider ?? 'openai') + setDraft(nextDraft) + setFormStepIndex(0) + setCursorOffset(nextDraft.name.length) + setErrorMessage(undefined) + setScreen('form') + } + + function persistDraft(): void { + const payload: ProviderProfileInput = { + provider: draftProvider, + name: draft.name, + baseUrl: draft.baseUrl, + model: draft.model, + apiKey: draft.apiKey, + } + + const saved = editingProfileId + ? updateProviderProfile(editingProfileId, payload) + : addProviderProfile(payload, { makeActive: true }) + + if (!saved) { + setErrorMessage('Could not save provider. Fill all required fields.') + return + } + + refreshProfiles() + setStatusMessage( + editingProfileId + ? `Updated provider: ${saved.name}` + : `Added provider: ${saved.name} (now active)`, + ) + + if (mode === 'first-run') { + onDone({ + action: 'saved', + activeProfileId: saved.id, + message: `Provider configured: ${saved.name}`, + }) + return + } + + setEditingProfileId(null) + setFormStepIndex(0) + setErrorMessage(undefined) + setScreen('menu') + } + + function handleFormSubmit(value: string): void { + const trimmed = value.trim() + + if (!currentStep.optional && trimmed.length === 0) { + setErrorMessage(`${currentStep.label} is required.`) + return + } + + const nextDraft = { + ...draft, + [currentStepKey]: trimmed, + } + + setDraft(nextDraft) + setErrorMessage(undefined) + + if (formStepIndex < FORM_STEPS.length - 1) { + const nextIndex = formStepIndex + 1 + const nextKey = FORM_STEPS[nextIndex]?.key ?? 'name' + setFormStepIndex(nextIndex) + setCursorOffset(nextDraft[nextKey].length) + return + } + + persistDraft() + } + + function handleBackFromForm(): void { + setErrorMessage(undefined) + + if (formStepIndex > 0) { + const nextIndex = formStepIndex - 1 + const nextKey = FORM_STEPS[nextIndex]?.key ?? 'name' + setFormStepIndex(nextIndex) + setCursorOffset(draft[nextKey].length) + return + } + + if (mode === 'first-run') { + setScreen('select-preset') + return + } + + setScreen('menu') + } + + useKeybinding('confirm:no', handleBackFromForm, { + context: 'Settings', + isActive: screen === 'form', + }) + + function renderPresetSelection(): React.ReactNode { + const options = [ + { + value: 'anthropic', + label: 'Anthropic', + description: 'Native Claude API (x-api-key auth)', + }, + { + value: 'ollama', + label: 'Ollama', + description: 'Local or remote Ollama endpoint', + }, + { + value: 'openai', + label: 'OpenAI', + description: 'OpenAI API with API key', + }, + { + value: 'moonshotai', + label: 'Moonshot AI', + description: 'Kimi OpenAI-compatible endpoint', + }, + { + value: 'deepseek', + label: 'DeepSeek', + description: 'DeepSeek OpenAI-compatible endpoint', + }, + { + value: 'gemini', + label: 'Google Gemini', + description: 'Gemini OpenAI-compatible endpoint', + }, + { + value: 'together', + label: 'Together AI', + description: 'Together chat/completions endpoint', + }, + { + value: 'groq', + label: 'Groq', + description: 'Groq OpenAI-compatible endpoint', + }, + { + value: 'mistral', + label: 'Mistral', + description: 'Mistral OpenAI-compatible endpoint', + }, + { + value: 'azure-openai', + label: 'Azure OpenAI', + description: 'Azure OpenAI endpoint (model=deployment name)', + }, + { + value: 'openrouter', + label: 'OpenRouter', + description: 'OpenRouter OpenAI-compatible endpoint', + }, + { + value: 'lmstudio', + label: 'LM Studio', + description: 'Local LM Studio endpoint', + }, + { + value: 'custom', + label: 'Custom', + description: 'Any OpenAI-compatible provider', + }, + ...(mode === 'first-run' + ? [ + { + value: 'skip', + label: 'Skip for now', + description: 'Continue with current defaults', + }, + ] + : []), + ] + + return ( + + + {mode === 'first-run' ? 'Set up provider' : 'Choose provider preset'} + + + Pick a preset, then confirm base URL, model, and API key. + + { + setErrorMessage(undefined) + switch (value) { + case 'add': + setScreen('select-preset') + break + case 'activate': + if (profiles.length > 0) { + setScreen('select-active') + } + break + case 'edit': + if (profiles.length > 0) { + setScreen('select-edit') + } + break + case 'delete': + if (profiles.length > 0) { + setScreen('select-delete') + } + break + default: + closeWithCancelled('Provider manager closed') + break + } + }} + onCancel={() => closeWithCancelled('Provider manager closed')} + visibleOptionCount={options.length} + /> + + ) + } + + function renderProfileSelection( + title: string, + emptyMessage: string, + onSelect: (profileId: string) => void, + ): React.ReactNode { + if (profiles.length === 0) { + return ( + + + {title} + + {emptyMessage} + setScreen('menu')} + visibleOptionCount={Math.min(10, Math.max(2, options.length))} + /> + + ) + } + + let content: React.ReactNode + + switch (screen) { + case 'select-preset': + content = renderPresetSelection() + break + case 'form': + content = renderForm() + break + case 'select-active': + content = renderProfileSelection( + 'Set active provider', + 'No providers available. Add one first.', + profileId => { + const active = setActiveProviderProfile(profileId) + if (!active) { + setErrorMessage('Could not change active provider.') + setScreen('menu') + return + } + refreshProfiles() + setStatusMessage(`Active provider: ${active.name}`) + setScreen('menu') + }, + ) + break + case 'select-edit': + content = renderProfileSelection( + 'Edit provider', + 'No providers available. Add one first.', + profileId => { + startEditProfile(profileId) + }, + ) + break + case 'select-delete': + content = renderProfileSelection( + 'Delete provider', + 'No providers available. Add one first.', + profileId => { + const result = deleteProviderProfile(profileId) + if (!result.removed) { + setErrorMessage('Could not delete provider.') + } else { + refreshProfiles() + setStatusMessage('Provider deleted') + } + setScreen('menu') + }, + ) + break + case 'menu': + default: + content = renderMenu() + break + } + + return {content} +} diff --git a/src/utils/config.ts b/src/utils/config.ts index 452749aa..2e63fdea 100644 --- a/src/utils/config.ts +++ b/src/utils/config.ts @@ -180,6 +180,15 @@ export type DiffTool = 'terminal' | 'auto' export type OutputStyle = string +export type ProviderProfile = { + id: string + name: string + provider: 'openai' | 'anthropic' + baseUrl: string + model: string + apiKey?: string +} + export type GlobalConfig = { /** * @deprecated Use settings.apiKeyHelper instead. @@ -568,6 +577,18 @@ export type GlobalConfig = { // Additional model options for the model picker (fetched during bootstrap). additionalModelOptionsCache?: ModelOption[] + // Additional model options discovered from OpenAI-compatible endpoints. + openaiAdditionalModelOptionsCache?: ModelOption[] + + // Provider profiles managed inside the TUI. The active profile determines + // which API provider env vars are applied for the current session. + providerProfiles?: ProviderProfile[] + activeProviderProfileId?: string + + // Per-profile cache for models discovered from OpenAI-compatible endpoints. + // Keyed by provider profile id. + openaiAdditionalModelOptionsCacheByProfile?: Record + // Disk cache for /api/claude_code/organizations/metrics_enabled. // Org-level settings change rarely; persisting across processes avoids a // cold API call on every `claude -p` invocation. @@ -624,6 +645,8 @@ function createDefaultGlobalConfig(): GlobalConfig { cachedGrowthBookFeatures: {}, respectGitignore: true, copyFullResponse: false, + providerProfiles: [], + openaiAdditionalModelOptionsCacheByProfile: {}, } } diff --git a/src/utils/managedEnv.ts b/src/utils/managedEnv.ts index 324b2d31..0ed32a3d 100644 --- a/src/utils/managedEnv.ts +++ b/src/utils/managedEnv.ts @@ -8,6 +8,7 @@ import { } from './managedEnvConstants.js' import { clearMTLSCache } from './mtls.js' import { clearProxyCache, configureGlobalAgents } from './proxy.js' +import { applyActiveProviderProfileFromConfig } from './providerProfiles.js' import { isSettingSourceEnabled } from './settings/constants.js' import { getSettings_DEPRECATED, @@ -175,6 +176,10 @@ export function applySafeConfigEnvironmentVariables(): void { process.env[key] = value } } + + // Apply active provider profile only when startup did not explicitly + // select a provider via flags/env. Explicit startup intent should win. + applyActiveProviderProfileFromConfig() } /** @@ -189,6 +194,10 @@ export function applyConfigEnvironmentVariables(): void { Object.assign(process.env, filterSettingsEnv(getSettings_DEPRECATED()?.env)) + // Keep runtime provider/model env aligned with the active profile, except + // when an explicit provider selection is already present in process.env. + applyActiveProviderProfileFromConfig() + // Clear caches so agents are rebuilt with the new env vars clearCACertsCache() clearMTLSCache() diff --git a/src/utils/model/modelOptions.ts b/src/utils/model/modelOptions.ts index 26fbf624..248a36f5 100644 --- a/src/utils/model/modelOptions.ts +++ b/src/utils/model/modelOptions.ts @@ -32,6 +32,7 @@ import { } from './model.js' import { has1mContext } from '../context.js' import { getGlobalConfig } from '../config.js' +import { getActiveOpenAIModelOptionsCache } from '../providerProfiles.js' import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js' // @[MODEL LAUNCH]: Update all the available and default model option strings below. @@ -565,8 +566,13 @@ export function getModelOptions(fastMode = false): ModelOption[] { }) } - // Append additional model options fetched during bootstrap - for (const opt of getGlobalConfig().additionalModelOptionsCache ?? []) { + const additionalOptions = + getAPIProvider() === 'openai' + ? getActiveOpenAIModelOptionsCache() + : getGlobalConfig().additionalModelOptionsCache ?? [] + + // Append additional model options fetched during bootstrap/endpoints. + for (const opt of additionalOptions) { if (!options.some(existing => existing.value === opt.value)) { options.push(opt) } diff --git a/src/utils/model/openaiModelDiscovery.ts b/src/utils/model/openaiModelDiscovery.ts new file mode 100644 index 00000000..5c33da97 --- /dev/null +++ b/src/utils/model/openaiModelDiscovery.ts @@ -0,0 +1,189 @@ +import axios from 'axios' +import { logForDebugging } from '../debug.js' +import type { ModelOption } from './modelOptions.js' +import { getAPIProvider } from './providers.js' + +const DISCOVERY_TIMEOUT_MS = 5000 +const DISCOVERED_MODEL_DESCRIPTION = + 'Discovered from OpenAI-compatible endpoint' + +type OpenAIModelsResponse = { + data?: Array<{ + id?: string | null + }> +} + +type OllamaTagsResponse = { + models?: Array<{ + name?: string | null + }> +} + +function getNormalizedOpenAIBaseUrl(): string { + return ( + process.env.OPENAI_BASE_URL ?? + process.env.OPENAI_API_BASE ?? + 'https://api.openai.com/v1' + ).replace(/\/+$/, '') +} + +function isAzureOpenAIBaseUrl(baseUrl: string): boolean { + try { + const hostname = new URL(baseUrl).hostname.toLowerCase() + return ( + hostname.endsWith('.openai.azure.com') || + hostname.endsWith('.cognitiveservices.azure.com') + ) + } catch { + return false + } +} + +function getOpenAIAuthHeaders(baseUrl: string): Record { + const apiKey = process.env.OPENAI_API_KEY?.trim() + if (!apiKey) { + return {} + } + + const headers: Record = { + Authorization: `Bearer ${apiKey}`, + } + + if (isAzureOpenAIBaseUrl(baseUrl)) { + headers['api-key'] = apiKey + } + + return headers +} + +function getModelListUrls(baseUrl: string): string[] { + const primary = baseUrl.endsWith('/v1') + ? `${baseUrl}/models` + : `${baseUrl}/v1/models` + const secondary = `${baseUrl}/models` + + const apiVersion = process.env.OPENAI_API_VERSION?.trim() + const addApiVersion = + apiVersion && isAzureOpenAIBaseUrl(baseUrl) + ? (url: string): string => { + try { + const parsed = new URL(url) + parsed.searchParams.set('api-version', apiVersion) + return parsed.toString() + } catch { + return url + } + } + : (url: string): string => url + + if (primary === secondary) { + return [addApiVersion(primary)] + } + + return [addApiVersion(primary), addApiVersion(secondary)] +} + +function getOllamaTagsUrl(baseUrl: string): string | null { + try { + const parsed = new URL(baseUrl) + const normalizedPath = parsed.pathname.replace(/\/+$/, '') + const pathPrefix = normalizedPath.endsWith('/v1') + ? normalizedPath.slice(0, -3) + : normalizedPath + const tagsPath = `${pathPrefix}/api/tags`.replace(/\/{2,}/g, '/') + return `${parsed.origin}${tagsPath}` + } catch { + return null + } +} + +function uniqueModelNames(modelNames: string[]): string[] { + const seen = new Set() + const unique: string[] = [] + + for (const modelName of modelNames) { + const trimmed = modelName.trim() + if (!trimmed || seen.has(trimmed)) { + continue + } + seen.add(trimmed) + unique.push(trimmed) + } + + return unique +} + +async function fetchOpenAIModels( + urls: string[], + headers: Record, +): Promise { + for (const url of urls) { + try { + const response = await axios.get(url, { + headers, + timeout: DISCOVERY_TIMEOUT_MS, + }) + const modelNames = uniqueModelNames( + (response.data?.data ?? []) + .map(model => model.id ?? '') + .filter((model): model is string => model.length > 0), + ) + if (modelNames.length > 0) { + return modelNames + } + } catch { + logForDebugging(`[ModelDiscovery] Failed to fetch OpenAI models from ${url}`) + } + } + + return [] +} + +async function fetchOllamaModels( + url: string, + headers: Record, +): Promise { + try { + const response = await axios.get(url, { + headers, + timeout: DISCOVERY_TIMEOUT_MS, + }) + return uniqueModelNames( + (response.data?.models ?? []) + .map(model => model.name ?? '') + .filter((model): model is string => model.length > 0), + ) + } catch { + logForDebugging(`[ModelDiscovery] Failed to fetch Ollama models from ${url}`) + return [] + } +} + +export async function discoverOpenAICompatibleModelOptions(): Promise< + ModelOption[] +> { + if (getAPIProvider() !== 'openai') { + return [] + } + + const baseUrl = getNormalizedOpenAIBaseUrl() + const headers = getOpenAIAuthHeaders(baseUrl) + + let discoveredModelNames = await fetchOpenAIModels( + getModelListUrls(baseUrl), + headers, + ) + + if (discoveredModelNames.length === 0) { + const ollamaTagsUrl = getOllamaTagsUrl(baseUrl) + if (ollamaTagsUrl) { + discoveredModelNames = await fetchOllamaModels(ollamaTagsUrl, headers) + } + } + + return discoveredModelNames.map(modelName => ({ + value: modelName, + label: modelName, + description: DISCOVERED_MODEL_DESCRIPTION, + })) +} \ No newline at end of file diff --git a/src/utils/providerProfiles.test.ts b/src/utils/providerProfiles.test.ts new file mode 100644 index 00000000..877f923f --- /dev/null +++ b/src/utils/providerProfiles.test.ts @@ -0,0 +1,239 @@ +import { afterEach, describe, expect, test } from 'bun:test' + +import { saveGlobalConfig, type ProviderProfile } from './config.js' +import { getAPIProvider } from './model/providers.js' +import { + applyActiveProviderProfileFromConfig, + applyProviderProfileToProcessEnv, + deleteProviderProfile, + getProviderPresetDefaults, +} from './providerProfiles.js' + +const originalEnv = { ...process.env } + +const RESTORED_KEYS = [ + 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED', + 'CLAUDE_CODE_USE_OPENAI', + 'CLAUDE_CODE_USE_GEMINI', + 'CLAUDE_CODE_USE_GITHUB', + 'CLAUDE_CODE_USE_BEDROCK', + 'CLAUDE_CODE_USE_VERTEX', + 'CLAUDE_CODE_USE_FOUNDRY', + 'OPENAI_BASE_URL', + 'OPENAI_API_BASE', + 'OPENAI_MODEL', + 'OPENAI_API_KEY', + 'ANTHROPIC_BASE_URL', + 'ANTHROPIC_MODEL', + 'ANTHROPIC_API_KEY', +] as const + +afterEach(() => { + for (const key of RESTORED_KEYS) { + if (originalEnv[key] === undefined) { + delete process.env[key] + } else { + process.env[key] = originalEnv[key] + } + } + + saveGlobalConfig(current => ({ + ...current, + providerProfiles: [], + activeProviderProfileId: undefined, + openaiAdditionalModelOptionsCache: [], + openaiAdditionalModelOptionsCacheByProfile: {}, + })) +}) + +function buildProfile(overrides: Partial = {}): ProviderProfile { + return { + id: 'provider_test', + name: 'Test Provider', + provider: 'openai', + baseUrl: 'https://api.openai.com/v1', + model: 'gpt-4o', + ...overrides, + } +} + +describe('applyProviderProfileToProcessEnv', () => { + test('openai profile clears competing gemini/github flags', () => { + process.env.CLAUDE_CODE_USE_GEMINI = '1' + process.env.CLAUDE_CODE_USE_GITHUB = '1' + + applyProviderProfileToProcessEnv(buildProfile()) + + expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1') + expect(getAPIProvider()).toBe('openai') + }) + + test('anthropic profile clears competing gemini/github flags', () => { + process.env.CLAUDE_CODE_USE_GEMINI = '1' + process.env.CLAUDE_CODE_USE_GITHUB = '1' + + applyProviderProfileToProcessEnv( + buildProfile({ + provider: 'anthropic', + baseUrl: 'https://api.anthropic.com', + model: 'claude-sonnet-4-6', + }), + ) + + expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined() + expect(getAPIProvider()).toBe('firstParty') + }) +}) + +describe('applyActiveProviderProfileFromConfig', () => { + test('does not override explicit startup provider selection', () => { + process.env.CLAUDE_CODE_USE_OPENAI = '1' + process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' + process.env.OPENAI_MODEL = 'qwen2.5:3b' + + const applied = applyActiveProviderProfileFromConfig({ + providerProfiles: [ + buildProfile({ + id: 'saved_openai', + baseUrl: 'https://api.openai.com/v1', + model: 'gpt-4o', + }), + ], + activeProviderProfileId: 'saved_openai', + } as any) + + expect(applied).toBeUndefined() + expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1') + expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') + }) + + test('does not override explicit startup selection when profile marker is stale', () => { + process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = '1' + process.env.CLAUDE_CODE_USE_OPENAI = '1' + process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' + process.env.OPENAI_MODEL = 'qwen2.5:3b' + + const applied = applyActiveProviderProfileFromConfig({ + providerProfiles: [ + buildProfile({ + id: 'saved_openai', + baseUrl: 'https://api.openai.com/v1', + model: 'gpt-4o', + }), + ], + activeProviderProfileId: 'saved_openai', + } as any) + + expect(applied).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1') + expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1') + expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') + }) + + test('applies active profile when no explicit provider is selected', () => { + delete process.env.CLAUDE_CODE_USE_OPENAI + delete process.env.CLAUDE_CODE_USE_GEMINI + delete process.env.CLAUDE_CODE_USE_GITHUB + delete process.env.CLAUDE_CODE_USE_BEDROCK + delete process.env.CLAUDE_CODE_USE_VERTEX + delete process.env.CLAUDE_CODE_USE_FOUNDRY + + process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' + process.env.OPENAI_MODEL = 'qwen2.5:3b' + + const applied = applyActiveProviderProfileFromConfig({ + providerProfiles: [ + buildProfile({ + id: 'saved_openai', + baseUrl: 'https://api.openai.com/v1', + model: 'gpt-4o', + }), + ], + activeProviderProfileId: 'saved_openai', + } as any) + + expect(applied?.id).toBe('saved_openai') + expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1') + expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1') + expect(process.env.OPENAI_MODEL).toBe('gpt-4o') + }) +}) + +describe('getProviderPresetDefaults', () => { + test('ollama preset defaults to a local Ollama model', () => { + delete process.env.OPENAI_MODEL + + const defaults = getProviderPresetDefaults('ollama') + + expect(defaults.baseUrl).toBe('http://localhost:11434/v1') + expect(defaults.model).toBe('llama3.1:8b') + }) +}) + +describe('deleteProviderProfile', () => { + test('deleting final profile clears provider env when active profile applied it', () => { + applyProviderProfileToProcessEnv( + buildProfile({ + id: 'only_profile', + baseUrl: 'https://api.openai.com/v1', + model: 'gpt-4o', + apiKey: 'sk-test', + }), + ) + + saveGlobalConfig(current => ({ + ...current, + providerProfiles: [buildProfile({ id: 'only_profile' })], + activeProviderProfileId: 'only_profile', + })) + + const result = deleteProviderProfile('only_profile') + + expect(result.removed).toBe(true) + expect(result.activeProfileId).toBeUndefined() + + expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined() + + expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_BEDROCK).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_VERTEX).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_FOUNDRY).toBeUndefined() + + expect(process.env.OPENAI_BASE_URL).toBeUndefined() + expect(process.env.OPENAI_API_BASE).toBeUndefined() + expect(process.env.OPENAI_MODEL).toBeUndefined() + expect(process.env.OPENAI_API_KEY).toBeUndefined() + + expect(process.env.ANTHROPIC_BASE_URL).toBeUndefined() + expect(process.env.ANTHROPIC_MODEL).toBeUndefined() + expect(process.env.ANTHROPIC_API_KEY).toBeUndefined() + }) + + test('deleting final profile preserves explicit startup provider env', () => { + process.env.CLAUDE_CODE_USE_OPENAI = '1' + process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' + process.env.OPENAI_MODEL = 'qwen2.5:3b' + + saveGlobalConfig(current => ({ + ...current, + providerProfiles: [buildProfile({ id: 'only_profile' })], + activeProviderProfileId: 'only_profile', + })) + + const result = deleteProviderProfile('only_profile') + + expect(result.removed).toBe(true) + expect(result.activeProfileId).toBeUndefined() + + expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined() + expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1') + expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1') + expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') + }) +}) diff --git a/src/utils/providerProfiles.ts b/src/utils/providerProfiles.ts new file mode 100644 index 00000000..c778cb40 --- /dev/null +++ b/src/utils/providerProfiles.ts @@ -0,0 +1,659 @@ +import { randomBytes } from 'crypto' +import { + getGlobalConfig, + saveGlobalConfig, + type ProviderProfile, +} from './config.js' +import type { ModelOption } from './model/modelOptions.js' + +export type ProviderPreset = + | 'anthropic' + | 'ollama' + | 'openai' + | 'moonshotai' + | 'deepseek' + | 'gemini' + | 'together' + | 'groq' + | 'mistral' + | 'azure-openai' + | 'openrouter' + | 'lmstudio' + | 'custom' + +export type ProviderProfileInput = { + provider?: ProviderProfile['provider'] + name: string + baseUrl: string + model: string + apiKey?: string +} + +export type ProviderPresetDefaults = Omit & { + provider: ProviderProfile['provider'] + requiresApiKey: boolean +} + +const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1' +const DEFAULT_OLLAMA_MODEL = 'llama3.1:8b' +const PROFILE_ENV_APPLIED_FLAG = 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED' + +function trimValue(value: string | undefined): string { + return value?.trim() ?? '' +} + +function trimOrUndefined(value: string | undefined): string | undefined { + const trimmed = trimValue(value) + return trimmed.length > 0 ? trimmed : undefined +} + +function normalizeBaseUrl(value: string): string { + return trimValue(value).replace(/\/+$/, '') +} + +function sanitizeProfile(profile: ProviderProfile): ProviderProfile | null { + const id = trimValue(profile.id) + const name = trimValue(profile.name) + const provider = profile.provider === 'anthropic' ? 'anthropic' : 'openai' + const baseUrl = normalizeBaseUrl(profile.baseUrl) + const model = trimValue(profile.model) + + if (!id || !name || !baseUrl || !model) { + return null + } + + return { + id, + name, + provider, + baseUrl, + model, + apiKey: trimOrUndefined(profile.apiKey), + } +} + +function sanitizeProfiles(profiles: ProviderProfile[] | undefined): ProviderProfile[] { + const seen = new Set() + const sanitized: ProviderProfile[] = [] + + for (const profile of profiles ?? []) { + const normalized = sanitizeProfile(profile) + if (!normalized || seen.has(normalized.id)) { + continue + } + seen.add(normalized.id) + sanitized.push(normalized) + } + + return sanitized +} + +function nextProfileId(): string { + return `provider_${randomBytes(6).toString('hex')}` +} + +function toProfile( + input: ProviderProfileInput, + id: string = nextProfileId(), +): ProviderProfile | null { + return sanitizeProfile({ + id, + provider: input.provider ?? 'openai', + name: input.name, + baseUrl: input.baseUrl, + model: input.model, + apiKey: input.apiKey, + }) +} + +function getModelCacheByProfile( + profileId: string, + config = getGlobalConfig(), +): ModelOption[] { + return config.openaiAdditionalModelOptionsCacheByProfile?.[profileId] ?? [] +} + +export function getProviderPresetDefaults( + preset: ProviderPreset, +): ProviderPresetDefaults { + switch (preset) { + case 'anthropic': + return { + provider: 'anthropic', + name: 'Anthropic', + baseUrl: process.env.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com', + model: process.env.ANTHROPIC_MODEL ?? 'claude-sonnet-4-6', + apiKey: process.env.ANTHROPIC_API_KEY ?? '', + requiresApiKey: true, + } + case 'openai': + return { + provider: 'openai', + name: 'OpenAI', + baseUrl: 'https://api.openai.com/v1', + model: 'gpt-5.3-codex', + apiKey: '', + requiresApiKey: true, + } + case 'moonshotai': + return { + provider: 'openai', + name: 'Moonshot AI', + baseUrl: 'https://api.moonshot.ai/v1', + model: 'kimi-k2.5', + apiKey: '', + requiresApiKey: true, + } + case 'deepseek': + return { + provider: 'openai', + name: 'DeepSeek', + baseUrl: 'https://api.deepseek.com/v1', + model: 'deepseek-chat', + apiKey: '', + requiresApiKey: true, + } + case 'gemini': + return { + provider: 'openai', + name: 'Google Gemini', + baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai', + model: 'gemini-3-flash-preview', + apiKey: '', + requiresApiKey: true, + } + case 'together': + return { + provider: 'openai', + name: 'Together AI', + baseUrl: 'https://api.together.xyz/v1', + model: 'Qwen/Qwen3.5-9B', + apiKey: '', + requiresApiKey: true, + } + case 'groq': + return { + provider: 'openai', + name: 'Groq', + baseUrl: 'https://api.groq.com/openai/v1', + model: 'llama-3.3-70b-versatile', + apiKey: '', + requiresApiKey: true, + } + case 'mistral': + return { + provider: 'openai', + name: 'Mistral', + baseUrl: 'https://api.mistral.ai/v1', + model: 'mistral-large-latest', + apiKey: '', + requiresApiKey: true, + } + case 'azure-openai': + return { + provider: 'openai', + name: 'Azure OpenAI', + baseUrl: 'https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1', + model: 'YOUR-DEPLOYMENT-NAME', + apiKey: '', + requiresApiKey: true, + } + case 'openrouter': + return { + provider: 'openai', + name: 'OpenRouter', + baseUrl: 'https://openrouter.ai/api/v1', + model: 'openai/gpt-5-mini', + apiKey: '', + requiresApiKey: true, + } + case 'lmstudio': + return { + provider: 'openai', + name: 'LM Studio', + baseUrl: 'http://localhost:1234/v1', + model: 'local-model', + apiKey: '', + requiresApiKey: false, + } + case 'custom': + return { + provider: 'openai', + name: 'Custom OpenAI-compatible', + baseUrl: + process.env.OPENAI_BASE_URL ?? + process.env.OPENAI_API_BASE ?? + DEFAULT_OLLAMA_BASE_URL, + model: process.env.OPENAI_MODEL ?? DEFAULT_OLLAMA_MODEL, + apiKey: process.env.OPENAI_API_KEY ?? '', + requiresApiKey: false, + } + case 'ollama': + default: + return { + provider: 'openai', + name: 'Ollama', + baseUrl: DEFAULT_OLLAMA_BASE_URL, + model: process.env.OPENAI_MODEL ?? DEFAULT_OLLAMA_MODEL, + apiKey: '', + requiresApiKey: false, + } + } +} + +export function getProviderProfiles( + config = getGlobalConfig(), +): ProviderProfile[] { + return sanitizeProfiles(config.providerProfiles) +} + +export function hasProviderProfiles(config = getGlobalConfig()): boolean { + return getProviderProfiles(config).length > 0 +} + +function hasProviderSelectionFlags( + processEnv: NodeJS.ProcessEnv = process.env, +): boolean { + return ( + processEnv.CLAUDE_CODE_USE_OPENAI !== undefined || + processEnv.CLAUDE_CODE_USE_GEMINI !== undefined || + processEnv.CLAUDE_CODE_USE_GITHUB !== undefined || + processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined || + processEnv.CLAUDE_CODE_USE_VERTEX !== undefined || + processEnv.CLAUDE_CODE_USE_FOUNDRY !== undefined + ) +} + +function sameOptionalEnvValue( + left: string | undefined, + right: string | undefined, +): boolean { + return trimOrUndefined(left) === trimOrUndefined(right) +} + +function isProcessEnvAlignedWithProfile( + processEnv: NodeJS.ProcessEnv, + profile: ProviderProfile, + options?: { + includeApiKey?: boolean + }, +): boolean { + const includeApiKey = options?.includeApiKey ?? true + + if (processEnv[PROFILE_ENV_APPLIED_FLAG] !== '1') { + return false + } + + if (profile.provider === 'anthropic') { + return ( + !hasProviderSelectionFlags(processEnv) && + sameOptionalEnvValue(processEnv.ANTHROPIC_BASE_URL, profile.baseUrl) && + sameOptionalEnvValue(processEnv.ANTHROPIC_MODEL, profile.model) && + (!includeApiKey || + sameOptionalEnvValue(processEnv.ANTHROPIC_API_KEY, profile.apiKey)) + ) + } + + return ( + processEnv.CLAUDE_CODE_USE_OPENAI !== undefined && + processEnv.CLAUDE_CODE_USE_GEMINI === undefined && + processEnv.CLAUDE_CODE_USE_GITHUB === undefined && + processEnv.CLAUDE_CODE_USE_BEDROCK === undefined && + processEnv.CLAUDE_CODE_USE_VERTEX === undefined && + processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined && + sameOptionalEnvValue(processEnv.OPENAI_BASE_URL, profile.baseUrl) && + sameOptionalEnvValue(processEnv.OPENAI_MODEL, profile.model) && + (!includeApiKey || + sameOptionalEnvValue(processEnv.OPENAI_API_KEY, profile.apiKey)) + ) +} + +export function getActiveProviderProfile( + config = getGlobalConfig(), +): ProviderProfile | undefined { + const profiles = getProviderProfiles(config) + if (profiles.length === 0) { + return undefined + } + + const activeId = trimOrUndefined(config.activeProviderProfileId) + return profiles.find(profile => profile.id === activeId) ?? profiles[0] +} + +export function clearProviderProfileEnvFromProcessEnv( + processEnv: NodeJS.ProcessEnv = process.env, +): void { + delete processEnv.CLAUDE_CODE_USE_OPENAI + delete processEnv.CLAUDE_CODE_USE_GEMINI + delete processEnv.CLAUDE_CODE_USE_GITHUB + delete processEnv.CLAUDE_CODE_USE_BEDROCK + delete processEnv.CLAUDE_CODE_USE_VERTEX + delete processEnv.CLAUDE_CODE_USE_FOUNDRY + + delete processEnv.OPENAI_BASE_URL + delete processEnv.OPENAI_API_BASE + delete processEnv.OPENAI_MODEL + delete processEnv.OPENAI_API_KEY + + delete processEnv.ANTHROPIC_BASE_URL + delete processEnv.ANTHROPIC_MODEL + delete processEnv.ANTHROPIC_API_KEY + delete processEnv[PROFILE_ENV_APPLIED_FLAG] +} + +export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void { + clearProviderProfileEnvFromProcessEnv() + process.env[PROFILE_ENV_APPLIED_FLAG] = '1' + + process.env.ANTHROPIC_MODEL = profile.model + if (profile.provider === 'anthropic') { + process.env.ANTHROPIC_BASE_URL = profile.baseUrl + + if (profile.apiKey) { + process.env.ANTHROPIC_API_KEY = profile.apiKey + } else { + delete process.env.ANTHROPIC_API_KEY + } + + delete process.env.OPENAI_BASE_URL + delete process.env.OPENAI_API_BASE + delete process.env.OPENAI_MODEL + delete process.env.OPENAI_API_KEY + return + } + + process.env.CLAUDE_CODE_USE_OPENAI = '1' + process.env.OPENAI_BASE_URL = profile.baseUrl + process.env.OPENAI_MODEL = profile.model + + if (profile.apiKey) { + process.env.OPENAI_API_KEY = profile.apiKey + } else { + delete process.env.OPENAI_API_KEY + } +} + +export function applyActiveProviderProfileFromConfig( + config = getGlobalConfig(), + options?: { + processEnv?: NodeJS.ProcessEnv + force?: boolean + }, +): ProviderProfile | undefined { + const processEnv = options?.processEnv ?? process.env + const activeProfile = getActiveProviderProfile(config) + if (!activeProfile) { + return undefined + } + + if (!options?.force && hasProviderSelectionFlags(processEnv)) { + // Respect explicit startup provider intent. Re-apply only when the + // current process env is already profile-managed and aligned. + if (!isProcessEnvAlignedWithProfile(processEnv, activeProfile)) { + return undefined + } + } + + applyProviderProfileToProcessEnv(activeProfile) + return activeProfile +} + +export function addProviderProfile( + input: ProviderProfileInput, + options?: { makeActive?: boolean }, +): ProviderProfile | null { + const profile = toProfile(input) + if (!profile) { + return null + } + + const makeActive = options?.makeActive ?? true + + saveGlobalConfig(current => { + const currentProfiles = getProviderProfiles(current) + const nextProfiles = [...currentProfiles, profile] + const currentActive = trimOrUndefined(current.activeProviderProfileId) + const nextActiveId = + makeActive || !currentActive || !nextProfiles.some(p => p.id === currentActive) + ? profile.id + : currentActive + + return { + ...current, + providerProfiles: nextProfiles, + activeProviderProfileId: nextActiveId, + } + }) + + const activeProfile = getActiveProviderProfile() + if (activeProfile?.id === profile.id) { + applyProviderProfileToProcessEnv(profile) + clearActiveOpenAIModelOptionsCache() + } + + return profile +} + +export function updateProviderProfile( + profileId: string, + input: ProviderProfileInput, +): ProviderProfile | null { + const updatedProfile = toProfile(input, profileId) + if (!updatedProfile) { + return null + } + + let wasUpdated = false + let shouldApply = false + + saveGlobalConfig(current => { + const currentProfiles = getProviderProfiles(current) + const profileIndex = currentProfiles.findIndex( + profile => profile.id === profileId, + ) + + if (profileIndex < 0) { + return current + } + + wasUpdated = true + + const nextProfiles = [...currentProfiles] + nextProfiles[profileIndex] = updatedProfile + + const cacheByProfile = { + ...(current.openaiAdditionalModelOptionsCacheByProfile ?? {}), + } + delete cacheByProfile[profileId] + + const currentActive = trimOrUndefined(current.activeProviderProfileId) + const nextActiveId = + currentActive && nextProfiles.some(profile => profile.id === currentActive) + ? currentActive + : nextProfiles[0]?.id + + shouldApply = nextActiveId === profileId + + return { + ...current, + providerProfiles: nextProfiles, + activeProviderProfileId: nextActiveId, + openaiAdditionalModelOptionsCacheByProfile: cacheByProfile, + openaiAdditionalModelOptionsCache: shouldApply + ? [] + : current.openaiAdditionalModelOptionsCache, + } + }) + + if (!wasUpdated) { + return null + } + + if (shouldApply) { + applyProviderProfileToProcessEnv(updatedProfile) + } + + return updatedProfile +} + +export function setActiveProviderProfile( + profileId: string, +): ProviderProfile | null { + const current = getGlobalConfig() + const profiles = getProviderProfiles(current) + const activeProfile = profiles.find(profile => profile.id === profileId) + + if (!activeProfile) { + return null + } + + saveGlobalConfig(config => ({ + ...config, + activeProviderProfileId: profileId, + openaiAdditionalModelOptionsCache: getModelCacheByProfile(profileId, config), + })) + + applyProviderProfileToProcessEnv(activeProfile) + return activeProfile +} + +export function deleteProviderProfile(profileId: string): { + removed: boolean + activeProfileId?: string +} { + let removed = false + let deletedProfile: ProviderProfile | undefined + let nextActiveProfile: ProviderProfile | undefined + + saveGlobalConfig(current => { + const currentProfiles = getProviderProfiles(current) + const existing = currentProfiles.find(profile => profile.id === profileId) + + if (!existing) { + return current + } + + removed = true + deletedProfile = existing + + const nextProfiles = currentProfiles.filter(profile => profile.id !== profileId) + const currentActive = trimOrUndefined(current.activeProviderProfileId) + const activeWasDeleted = + !currentActive || currentActive === profileId || + !nextProfiles.some(profile => profile.id === currentActive) + + const nextActiveId = activeWasDeleted ? nextProfiles[0]?.id : currentActive + + if (nextActiveId) { + nextActiveProfile = + nextProfiles.find(profile => profile.id === nextActiveId) ?? nextProfiles[0] + } + + const cacheByProfile = { + ...(current.openaiAdditionalModelOptionsCacheByProfile ?? {}), + } + delete cacheByProfile[profileId] + + return { + ...current, + providerProfiles: nextProfiles, + activeProviderProfileId: nextActiveId, + openaiAdditionalModelOptionsCacheByProfile: cacheByProfile, + openaiAdditionalModelOptionsCache: nextActiveId + ? getModelCacheByProfile(nextActiveId, { + ...current, + openaiAdditionalModelOptionsCacheByProfile: cacheByProfile, + }) + : [], + } + }) + + if (nextActiveProfile) { + applyProviderProfileToProcessEnv(nextActiveProfile) + } else if ( + deletedProfile && + isProcessEnvAlignedWithProfile(process.env, deletedProfile, { + includeApiKey: false, + }) + ) { + clearProviderProfileEnvFromProcessEnv() + } + + return { + removed, + activeProfileId: nextActiveProfile?.id, + } +} + +export function getActiveOpenAIModelOptionsCache( + config = getGlobalConfig(), +): ModelOption[] { + const activeProfile = getActiveProviderProfile(config) + + if (!activeProfile) { + return config.openaiAdditionalModelOptionsCache ?? [] + } + + const cached = config.openaiAdditionalModelOptionsCacheByProfile?.[ + activeProfile.id + ] + if (cached) { + return cached + } + + // Backward compatibility for users who have only the legacy single cache. + if ( + Object.keys(config.openaiAdditionalModelOptionsCacheByProfile ?? {}).length === + 0 + ) { + return config.openaiAdditionalModelOptionsCache ?? [] + } + + return [] +} + +export function setActiveOpenAIModelOptionsCache(options: ModelOption[]): void { + const activeProfile = getActiveProviderProfile() + + if (!activeProfile) { + saveGlobalConfig(current => ({ + ...current, + openaiAdditionalModelOptionsCache: options, + })) + return + } + + saveGlobalConfig(current => ({ + ...current, + openaiAdditionalModelOptionsCache: options, + openaiAdditionalModelOptionsCacheByProfile: { + ...(current.openaiAdditionalModelOptionsCacheByProfile ?? {}), + [activeProfile.id]: options, + }, + })) +} + +export function clearActiveOpenAIModelOptionsCache(): void { + const activeProfile = getActiveProviderProfile() + + if (!activeProfile) { + saveGlobalConfig(current => ({ + ...current, + openaiAdditionalModelOptionsCache: [], + })) + return + } + + saveGlobalConfig(current => { + const cacheByProfile = { + ...(current.openaiAdditionalModelOptionsCacheByProfile ?? {}), + } + delete cacheByProfile[activeProfile.id] + + return { + ...current, + openaiAdditionalModelOptionsCache: [], + openaiAdditionalModelOptionsCacheByProfile: cacheByProfile, + } + }) +}