feat(provider): align provider and model workflows (#324)

* feat(provider): align provider and model workflows

* fix(provider): clear gemini/github flags and use local ollama default

* fix(provider): preserve explicit startup provider selection

* fix(provider): clear env when deleting last profile

* chore(provider): apply review nits in ProviderManager

* fix(provider): preserve explicit env on last-profile delete

* fix(provider): preserve explicit env when profile marker is stale

---------

Co-authored-by: Gitlawb <gitlawb@users.noreply.github.com>
This commit is contained in:
Agent_J
2026-04-04 17:59:45 +05:30
committed by GitHub
parent a0bdab24c0
commit ef881b247f
10 changed files with 1803 additions and 22 deletions

View File

@@ -180,6 +180,15 @@ export type DiffTool = 'terminal' | 'auto'
export type OutputStyle = string
export type ProviderProfile = {
id: string
name: string
provider: 'openai' | 'anthropic'
baseUrl: string
model: string
apiKey?: string
}
export type GlobalConfig = {
/**
* @deprecated Use settings.apiKeyHelper instead.
@@ -568,6 +577,18 @@ export type GlobalConfig = {
// Additional model options for the model picker (fetched during bootstrap).
additionalModelOptionsCache?: ModelOption[]
// Additional model options discovered from OpenAI-compatible endpoints.
openaiAdditionalModelOptionsCache?: ModelOption[]
// Provider profiles managed inside the TUI. The active profile determines
// which API provider env vars are applied for the current session.
providerProfiles?: ProviderProfile[]
activeProviderProfileId?: string
// Per-profile cache for models discovered from OpenAI-compatible endpoints.
// Keyed by provider profile id.
openaiAdditionalModelOptionsCacheByProfile?: Record<string, ModelOption[]>
// Disk cache for /api/claude_code/organizations/metrics_enabled.
// Org-level settings change rarely; persisting across processes avoids a
// cold API call on every `claude -p` invocation.
@@ -624,6 +645,8 @@ function createDefaultGlobalConfig(): GlobalConfig {
cachedGrowthBookFeatures: {},
respectGitignore: true,
copyFullResponse: false,
providerProfiles: [],
openaiAdditionalModelOptionsCacheByProfile: {},
}
}

View File

@@ -8,6 +8,7 @@ import {
} from './managedEnvConstants.js'
import { clearMTLSCache } from './mtls.js'
import { clearProxyCache, configureGlobalAgents } from './proxy.js'
import { applyActiveProviderProfileFromConfig } from './providerProfiles.js'
import { isSettingSourceEnabled } from './settings/constants.js'
import {
getSettings_DEPRECATED,
@@ -175,6 +176,10 @@ export function applySafeConfigEnvironmentVariables(): void {
process.env[key] = value
}
}
// Apply active provider profile only when startup did not explicitly
// select a provider via flags/env. Explicit startup intent should win.
applyActiveProviderProfileFromConfig()
}
/**
@@ -189,6 +194,10 @@ export function applyConfigEnvironmentVariables(): void {
Object.assign(process.env, filterSettingsEnv(getSettings_DEPRECATED()?.env))
// Keep runtime provider/model env aligned with the active profile, except
// when an explicit provider selection is already present in process.env.
applyActiveProviderProfileFromConfig()
// Clear caches so agents are rebuilt with the new env vars
clearCACertsCache()
clearMTLSCache()

View File

@@ -32,6 +32,7 @@ import {
} from './model.js'
import { has1mContext } from '../context.js'
import { getGlobalConfig } from '../config.js'
import { getActiveOpenAIModelOptionsCache } from '../providerProfiles.js'
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
// @[MODEL LAUNCH]: Update all the available and default model option strings below.
@@ -565,8 +566,13 @@ export function getModelOptions(fastMode = false): ModelOption[] {
})
}
// Append additional model options fetched during bootstrap
for (const opt of getGlobalConfig().additionalModelOptionsCache ?? []) {
const additionalOptions =
getAPIProvider() === 'openai'
? getActiveOpenAIModelOptionsCache()
: getGlobalConfig().additionalModelOptionsCache ?? []
// Append additional model options fetched during bootstrap/endpoints.
for (const opt of additionalOptions) {
if (!options.some(existing => existing.value === opt.value)) {
options.push(opt)
}

View File

@@ -0,0 +1,189 @@
import axios from 'axios'
import { logForDebugging } from '../debug.js'
import type { ModelOption } from './modelOptions.js'
import { getAPIProvider } from './providers.js'
const DISCOVERY_TIMEOUT_MS = 5000
const DISCOVERED_MODEL_DESCRIPTION =
'Discovered from OpenAI-compatible endpoint'
type OpenAIModelsResponse = {
data?: Array<{
id?: string | null
}>
}
type OllamaTagsResponse = {
models?: Array<{
name?: string | null
}>
}
function getNormalizedOpenAIBaseUrl(): string {
return (
process.env.OPENAI_BASE_URL ??
process.env.OPENAI_API_BASE ??
'https://api.openai.com/v1'
).replace(/\/+$/, '')
}
function isAzureOpenAIBaseUrl(baseUrl: string): boolean {
try {
const hostname = new URL(baseUrl).hostname.toLowerCase()
return (
hostname.endsWith('.openai.azure.com') ||
hostname.endsWith('.cognitiveservices.azure.com')
)
} catch {
return false
}
}
function getOpenAIAuthHeaders(baseUrl: string): Record<string, string> {
const apiKey = process.env.OPENAI_API_KEY?.trim()
if (!apiKey) {
return {}
}
const headers: Record<string, string> = {
Authorization: `Bearer ${apiKey}`,
}
if (isAzureOpenAIBaseUrl(baseUrl)) {
headers['api-key'] = apiKey
}
return headers
}
function getModelListUrls(baseUrl: string): string[] {
const primary = baseUrl.endsWith('/v1')
? `${baseUrl}/models`
: `${baseUrl}/v1/models`
const secondary = `${baseUrl}/models`
const apiVersion = process.env.OPENAI_API_VERSION?.trim()
const addApiVersion =
apiVersion && isAzureOpenAIBaseUrl(baseUrl)
? (url: string): string => {
try {
const parsed = new URL(url)
parsed.searchParams.set('api-version', apiVersion)
return parsed.toString()
} catch {
return url
}
}
: (url: string): string => url
if (primary === secondary) {
return [addApiVersion(primary)]
}
return [addApiVersion(primary), addApiVersion(secondary)]
}
function getOllamaTagsUrl(baseUrl: string): string | null {
try {
const parsed = new URL(baseUrl)
const normalizedPath = parsed.pathname.replace(/\/+$/, '')
const pathPrefix = normalizedPath.endsWith('/v1')
? normalizedPath.slice(0, -3)
: normalizedPath
const tagsPath = `${pathPrefix}/api/tags`.replace(/\/{2,}/g, '/')
return `${parsed.origin}${tagsPath}`
} catch {
return null
}
}
function uniqueModelNames(modelNames: string[]): string[] {
const seen = new Set<string>()
const unique: string[] = []
for (const modelName of modelNames) {
const trimmed = modelName.trim()
if (!trimmed || seen.has(trimmed)) {
continue
}
seen.add(trimmed)
unique.push(trimmed)
}
return unique
}
async function fetchOpenAIModels(
urls: string[],
headers: Record<string, string>,
): Promise<string[]> {
for (const url of urls) {
try {
const response = await axios.get<OpenAIModelsResponse>(url, {
headers,
timeout: DISCOVERY_TIMEOUT_MS,
})
const modelNames = uniqueModelNames(
(response.data?.data ?? [])
.map(model => model.id ?? '')
.filter((model): model is string => model.length > 0),
)
if (modelNames.length > 0) {
return modelNames
}
} catch {
logForDebugging(`[ModelDiscovery] Failed to fetch OpenAI models from ${url}`)
}
}
return []
}
async function fetchOllamaModels(
url: string,
headers: Record<string, string>,
): Promise<string[]> {
try {
const response = await axios.get<OllamaTagsResponse>(url, {
headers,
timeout: DISCOVERY_TIMEOUT_MS,
})
return uniqueModelNames(
(response.data?.models ?? [])
.map(model => model.name ?? '')
.filter((model): model is string => model.length > 0),
)
} catch {
logForDebugging(`[ModelDiscovery] Failed to fetch Ollama models from ${url}`)
return []
}
}
export async function discoverOpenAICompatibleModelOptions(): Promise<
ModelOption[]
> {
if (getAPIProvider() !== 'openai') {
return []
}
const baseUrl = getNormalizedOpenAIBaseUrl()
const headers = getOpenAIAuthHeaders(baseUrl)
let discoveredModelNames = await fetchOpenAIModels(
getModelListUrls(baseUrl),
headers,
)
if (discoveredModelNames.length === 0) {
const ollamaTagsUrl = getOllamaTagsUrl(baseUrl)
if (ollamaTagsUrl) {
discoveredModelNames = await fetchOllamaModels(ollamaTagsUrl, headers)
}
}
return discoveredModelNames.map(modelName => ({
value: modelName,
label: modelName,
description: DISCOVERED_MODEL_DESCRIPTION,
}))
}

View File

@@ -0,0 +1,239 @@
import { afterEach, describe, expect, test } from 'bun:test'
import { saveGlobalConfig, type ProviderProfile } from './config.js'
import { getAPIProvider } from './model/providers.js'
import {
applyActiveProviderProfileFromConfig,
applyProviderProfileToProcessEnv,
deleteProviderProfile,
getProviderPresetDefaults,
} from './providerProfiles.js'
const originalEnv = { ...process.env }
const RESTORED_KEYS = [
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED',
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'CLAUDE_CODE_USE_FOUNDRY',
'OPENAI_BASE_URL',
'OPENAI_API_BASE',
'OPENAI_MODEL',
'OPENAI_API_KEY',
'ANTHROPIC_BASE_URL',
'ANTHROPIC_MODEL',
'ANTHROPIC_API_KEY',
] as const
afterEach(() => {
for (const key of RESTORED_KEYS) {
if (originalEnv[key] === undefined) {
delete process.env[key]
} else {
process.env[key] = originalEnv[key]
}
}
saveGlobalConfig(current => ({
...current,
providerProfiles: [],
activeProviderProfileId: undefined,
openaiAdditionalModelOptionsCache: [],
openaiAdditionalModelOptionsCacheByProfile: {},
}))
})
function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
return {
id: 'provider_test',
name: 'Test Provider',
provider: 'openai',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-4o',
...overrides,
}
}
describe('applyProviderProfileToProcessEnv', () => {
test('openai profile clears competing gemini/github flags', () => {
process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.CLAUDE_CODE_USE_GITHUB = '1'
applyProviderProfileToProcessEnv(buildProfile())
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(getAPIProvider()).toBe('openai')
})
test('anthropic profile clears competing gemini/github flags', () => {
process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.CLAUDE_CODE_USE_GITHUB = '1'
applyProviderProfileToProcessEnv(
buildProfile({
provider: 'anthropic',
baseUrl: 'https://api.anthropic.com',
model: 'claude-sonnet-4-6',
}),
)
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
expect(getAPIProvider()).toBe('firstParty')
})
})
describe('applyActiveProviderProfileFromConfig', () => {
test('does not override explicit startup provider selection', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b'
const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [
buildProfile({
id: 'saved_openai',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-4o',
}),
],
activeProviderProfileId: 'saved_openai',
} as any)
expect(applied).toBeUndefined()
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
})
test('does not override explicit startup selection when profile marker is stale', () => {
process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b'
const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [
buildProfile({
id: 'saved_openai',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-4o',
}),
],
activeProviderProfileId: 'saved_openai',
} as any)
expect(applied).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
})
test('applies active profile when no explicit provider is selected', () => {
delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b'
const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [
buildProfile({
id: 'saved_openai',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-4o',
}),
],
activeProviderProfileId: 'saved_openai',
} as any)
expect(applied?.id).toBe('saved_openai')
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
})
})
describe('getProviderPresetDefaults', () => {
test('ollama preset defaults to a local Ollama model', () => {
delete process.env.OPENAI_MODEL
const defaults = getProviderPresetDefaults('ollama')
expect(defaults.baseUrl).toBe('http://localhost:11434/v1')
expect(defaults.model).toBe('llama3.1:8b')
})
})
describe('deleteProviderProfile', () => {
test('deleting final profile clears provider env when active profile applied it', () => {
applyProviderProfileToProcessEnv(
buildProfile({
id: 'only_profile',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-4o',
apiKey: 'sk-test',
}),
)
saveGlobalConfig(current => ({
...current,
providerProfiles: [buildProfile({ id: 'only_profile' })],
activeProviderProfileId: 'only_profile',
}))
const result = deleteProviderProfile('only_profile')
expect(result.removed).toBe(true)
expect(result.activeProfileId).toBeUndefined()
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_BEDROCK).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_VERTEX).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_FOUNDRY).toBeUndefined()
expect(process.env.OPENAI_BASE_URL).toBeUndefined()
expect(process.env.OPENAI_API_BASE).toBeUndefined()
expect(process.env.OPENAI_MODEL).toBeUndefined()
expect(process.env.OPENAI_API_KEY).toBeUndefined()
expect(process.env.ANTHROPIC_BASE_URL).toBeUndefined()
expect(process.env.ANTHROPIC_MODEL).toBeUndefined()
expect(process.env.ANTHROPIC_API_KEY).toBeUndefined()
})
test('deleting final profile preserves explicit startup provider env', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b'
saveGlobalConfig(current => ({
...current,
providerProfiles: [buildProfile({ id: 'only_profile' })],
activeProviderProfileId: 'only_profile',
}))
const result = deleteProviderProfile('only_profile')
expect(result.removed).toBe(true)
expect(result.activeProfileId).toBeUndefined()
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
})
})

View File

@@ -0,0 +1,659 @@
import { randomBytes } from 'crypto'
import {
getGlobalConfig,
saveGlobalConfig,
type ProviderProfile,
} from './config.js'
import type { ModelOption } from './model/modelOptions.js'
export type ProviderPreset =
| 'anthropic'
| 'ollama'
| 'openai'
| 'moonshotai'
| 'deepseek'
| 'gemini'
| 'together'
| 'groq'
| 'mistral'
| 'azure-openai'
| 'openrouter'
| 'lmstudio'
| 'custom'
export type ProviderProfileInput = {
provider?: ProviderProfile['provider']
name: string
baseUrl: string
model: string
apiKey?: string
}
export type ProviderPresetDefaults = Omit<ProviderProfileInput, 'provider'> & {
provider: ProviderProfile['provider']
requiresApiKey: boolean
}
const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1'
const DEFAULT_OLLAMA_MODEL = 'llama3.1:8b'
const PROFILE_ENV_APPLIED_FLAG = 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED'
function trimValue(value: string | undefined): string {
return value?.trim() ?? ''
}
function trimOrUndefined(value: string | undefined): string | undefined {
const trimmed = trimValue(value)
return trimmed.length > 0 ? trimmed : undefined
}
function normalizeBaseUrl(value: string): string {
return trimValue(value).replace(/\/+$/, '')
}
function sanitizeProfile(profile: ProviderProfile): ProviderProfile | null {
const id = trimValue(profile.id)
const name = trimValue(profile.name)
const provider = profile.provider === 'anthropic' ? 'anthropic' : 'openai'
const baseUrl = normalizeBaseUrl(profile.baseUrl)
const model = trimValue(profile.model)
if (!id || !name || !baseUrl || !model) {
return null
}
return {
id,
name,
provider,
baseUrl,
model,
apiKey: trimOrUndefined(profile.apiKey),
}
}
function sanitizeProfiles(profiles: ProviderProfile[] | undefined): ProviderProfile[] {
const seen = new Set<string>()
const sanitized: ProviderProfile[] = []
for (const profile of profiles ?? []) {
const normalized = sanitizeProfile(profile)
if (!normalized || seen.has(normalized.id)) {
continue
}
seen.add(normalized.id)
sanitized.push(normalized)
}
return sanitized
}
function nextProfileId(): string {
return `provider_${randomBytes(6).toString('hex')}`
}
function toProfile(
input: ProviderProfileInput,
id: string = nextProfileId(),
): ProviderProfile | null {
return sanitizeProfile({
id,
provider: input.provider ?? 'openai',
name: input.name,
baseUrl: input.baseUrl,
model: input.model,
apiKey: input.apiKey,
})
}
function getModelCacheByProfile(
profileId: string,
config = getGlobalConfig(),
): ModelOption[] {
return config.openaiAdditionalModelOptionsCacheByProfile?.[profileId] ?? []
}
export function getProviderPresetDefaults(
preset: ProviderPreset,
): ProviderPresetDefaults {
switch (preset) {
case 'anthropic':
return {
provider: 'anthropic',
name: 'Anthropic',
baseUrl: process.env.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com',
model: process.env.ANTHROPIC_MODEL ?? 'claude-sonnet-4-6',
apiKey: process.env.ANTHROPIC_API_KEY ?? '',
requiresApiKey: true,
}
case 'openai':
return {
provider: 'openai',
name: 'OpenAI',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-5.3-codex',
apiKey: '',
requiresApiKey: true,
}
case 'moonshotai':
return {
provider: 'openai',
name: 'Moonshot AI',
baseUrl: 'https://api.moonshot.ai/v1',
model: 'kimi-k2.5',
apiKey: '',
requiresApiKey: true,
}
case 'deepseek':
return {
provider: 'openai',
name: 'DeepSeek',
baseUrl: 'https://api.deepseek.com/v1',
model: 'deepseek-chat',
apiKey: '',
requiresApiKey: true,
}
case 'gemini':
return {
provider: 'openai',
name: 'Google Gemini',
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai',
model: 'gemini-3-flash-preview',
apiKey: '',
requiresApiKey: true,
}
case 'together':
return {
provider: 'openai',
name: 'Together AI',
baseUrl: 'https://api.together.xyz/v1',
model: 'Qwen/Qwen3.5-9B',
apiKey: '',
requiresApiKey: true,
}
case 'groq':
return {
provider: 'openai',
name: 'Groq',
baseUrl: 'https://api.groq.com/openai/v1',
model: 'llama-3.3-70b-versatile',
apiKey: '',
requiresApiKey: true,
}
case 'mistral':
return {
provider: 'openai',
name: 'Mistral',
baseUrl: 'https://api.mistral.ai/v1',
model: 'mistral-large-latest',
apiKey: '',
requiresApiKey: true,
}
case 'azure-openai':
return {
provider: 'openai',
name: 'Azure OpenAI',
baseUrl: 'https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1',
model: 'YOUR-DEPLOYMENT-NAME',
apiKey: '',
requiresApiKey: true,
}
case 'openrouter':
return {
provider: 'openai',
name: 'OpenRouter',
baseUrl: 'https://openrouter.ai/api/v1',
model: 'openai/gpt-5-mini',
apiKey: '',
requiresApiKey: true,
}
case 'lmstudio':
return {
provider: 'openai',
name: 'LM Studio',
baseUrl: 'http://localhost:1234/v1',
model: 'local-model',
apiKey: '',
requiresApiKey: false,
}
case 'custom':
return {
provider: 'openai',
name: 'Custom OpenAI-compatible',
baseUrl:
process.env.OPENAI_BASE_URL ??
process.env.OPENAI_API_BASE ??
DEFAULT_OLLAMA_BASE_URL,
model: process.env.OPENAI_MODEL ?? DEFAULT_OLLAMA_MODEL,
apiKey: process.env.OPENAI_API_KEY ?? '',
requiresApiKey: false,
}
case 'ollama':
default:
return {
provider: 'openai',
name: 'Ollama',
baseUrl: DEFAULT_OLLAMA_BASE_URL,
model: process.env.OPENAI_MODEL ?? DEFAULT_OLLAMA_MODEL,
apiKey: '',
requiresApiKey: false,
}
}
}
export function getProviderProfiles(
config = getGlobalConfig(),
): ProviderProfile[] {
return sanitizeProfiles(config.providerProfiles)
}
export function hasProviderProfiles(config = getGlobalConfig()): boolean {
return getProviderProfiles(config).length > 0
}
function hasProviderSelectionFlags(
processEnv: NodeJS.ProcessEnv = process.env,
): boolean {
return (
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined ||
processEnv.CLAUDE_CODE_USE_VERTEX !== undefined ||
processEnv.CLAUDE_CODE_USE_FOUNDRY !== undefined
)
}
function sameOptionalEnvValue(
left: string | undefined,
right: string | undefined,
): boolean {
return trimOrUndefined(left) === trimOrUndefined(right)
}
function isProcessEnvAlignedWithProfile(
processEnv: NodeJS.ProcessEnv,
profile: ProviderProfile,
options?: {
includeApiKey?: boolean
},
): boolean {
const includeApiKey = options?.includeApiKey ?? true
if (processEnv[PROFILE_ENV_APPLIED_FLAG] !== '1') {
return false
}
if (profile.provider === 'anthropic') {
return (
!hasProviderSelectionFlags(processEnv) &&
sameOptionalEnvValue(processEnv.ANTHROPIC_BASE_URL, profile.baseUrl) &&
sameOptionalEnvValue(processEnv.ANTHROPIC_MODEL, profile.model) &&
(!includeApiKey ||
sameOptionalEnvValue(processEnv.ANTHROPIC_API_KEY, profile.apiKey))
)
}
return (
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined &&
processEnv.CLAUDE_CODE_USE_GEMINI === undefined &&
processEnv.CLAUDE_CODE_USE_GITHUB === undefined &&
processEnv.CLAUDE_CODE_USE_BEDROCK === undefined &&
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
sameOptionalEnvValue(processEnv.OPENAI_BASE_URL, profile.baseUrl) &&
sameOptionalEnvValue(processEnv.OPENAI_MODEL, profile.model) &&
(!includeApiKey ||
sameOptionalEnvValue(processEnv.OPENAI_API_KEY, profile.apiKey))
)
}
export function getActiveProviderProfile(
config = getGlobalConfig(),
): ProviderProfile | undefined {
const profiles = getProviderProfiles(config)
if (profiles.length === 0) {
return undefined
}
const activeId = trimOrUndefined(config.activeProviderProfileId)
return profiles.find(profile => profile.id === activeId) ?? profiles[0]
}
export function clearProviderProfileEnvFromProcessEnv(
processEnv: NodeJS.ProcessEnv = process.env,
): void {
delete processEnv.CLAUDE_CODE_USE_OPENAI
delete processEnv.CLAUDE_CODE_USE_GEMINI
delete processEnv.CLAUDE_CODE_USE_GITHUB
delete processEnv.CLAUDE_CODE_USE_BEDROCK
delete processEnv.CLAUDE_CODE_USE_VERTEX
delete processEnv.CLAUDE_CODE_USE_FOUNDRY
delete processEnv.OPENAI_BASE_URL
delete processEnv.OPENAI_API_BASE
delete processEnv.OPENAI_MODEL
delete processEnv.OPENAI_API_KEY
delete processEnv.ANTHROPIC_BASE_URL
delete processEnv.ANTHROPIC_MODEL
delete processEnv.ANTHROPIC_API_KEY
delete processEnv[PROFILE_ENV_APPLIED_FLAG]
}
export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void {
clearProviderProfileEnvFromProcessEnv()
process.env[PROFILE_ENV_APPLIED_FLAG] = '1'
process.env.ANTHROPIC_MODEL = profile.model
if (profile.provider === 'anthropic') {
process.env.ANTHROPIC_BASE_URL = profile.baseUrl
if (profile.apiKey) {
process.env.ANTHROPIC_API_KEY = profile.apiKey
} else {
delete process.env.ANTHROPIC_API_KEY
}
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
delete process.env.OPENAI_MODEL
delete process.env.OPENAI_API_KEY
return
}
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = profile.baseUrl
process.env.OPENAI_MODEL = profile.model
if (profile.apiKey) {
process.env.OPENAI_API_KEY = profile.apiKey
} else {
delete process.env.OPENAI_API_KEY
}
}
export function applyActiveProviderProfileFromConfig(
config = getGlobalConfig(),
options?: {
processEnv?: NodeJS.ProcessEnv
force?: boolean
},
): ProviderProfile | undefined {
const processEnv = options?.processEnv ?? process.env
const activeProfile = getActiveProviderProfile(config)
if (!activeProfile) {
return undefined
}
if (!options?.force && hasProviderSelectionFlags(processEnv)) {
// Respect explicit startup provider intent. Re-apply only when the
// current process env is already profile-managed and aligned.
if (!isProcessEnvAlignedWithProfile(processEnv, activeProfile)) {
return undefined
}
}
applyProviderProfileToProcessEnv(activeProfile)
return activeProfile
}
export function addProviderProfile(
input: ProviderProfileInput,
options?: { makeActive?: boolean },
): ProviderProfile | null {
const profile = toProfile(input)
if (!profile) {
return null
}
const makeActive = options?.makeActive ?? true
saveGlobalConfig(current => {
const currentProfiles = getProviderProfiles(current)
const nextProfiles = [...currentProfiles, profile]
const currentActive = trimOrUndefined(current.activeProviderProfileId)
const nextActiveId =
makeActive || !currentActive || !nextProfiles.some(p => p.id === currentActive)
? profile.id
: currentActive
return {
...current,
providerProfiles: nextProfiles,
activeProviderProfileId: nextActiveId,
}
})
const activeProfile = getActiveProviderProfile()
if (activeProfile?.id === profile.id) {
applyProviderProfileToProcessEnv(profile)
clearActiveOpenAIModelOptionsCache()
}
return profile
}
export function updateProviderProfile(
profileId: string,
input: ProviderProfileInput,
): ProviderProfile | null {
const updatedProfile = toProfile(input, profileId)
if (!updatedProfile) {
return null
}
let wasUpdated = false
let shouldApply = false
saveGlobalConfig(current => {
const currentProfiles = getProviderProfiles(current)
const profileIndex = currentProfiles.findIndex(
profile => profile.id === profileId,
)
if (profileIndex < 0) {
return current
}
wasUpdated = true
const nextProfiles = [...currentProfiles]
nextProfiles[profileIndex] = updatedProfile
const cacheByProfile = {
...(current.openaiAdditionalModelOptionsCacheByProfile ?? {}),
}
delete cacheByProfile[profileId]
const currentActive = trimOrUndefined(current.activeProviderProfileId)
const nextActiveId =
currentActive && nextProfiles.some(profile => profile.id === currentActive)
? currentActive
: nextProfiles[0]?.id
shouldApply = nextActiveId === profileId
return {
...current,
providerProfiles: nextProfiles,
activeProviderProfileId: nextActiveId,
openaiAdditionalModelOptionsCacheByProfile: cacheByProfile,
openaiAdditionalModelOptionsCache: shouldApply
? []
: current.openaiAdditionalModelOptionsCache,
}
})
if (!wasUpdated) {
return null
}
if (shouldApply) {
applyProviderProfileToProcessEnv(updatedProfile)
}
return updatedProfile
}
export function setActiveProviderProfile(
profileId: string,
): ProviderProfile | null {
const current = getGlobalConfig()
const profiles = getProviderProfiles(current)
const activeProfile = profiles.find(profile => profile.id === profileId)
if (!activeProfile) {
return null
}
saveGlobalConfig(config => ({
...config,
activeProviderProfileId: profileId,
openaiAdditionalModelOptionsCache: getModelCacheByProfile(profileId, config),
}))
applyProviderProfileToProcessEnv(activeProfile)
return activeProfile
}
export function deleteProviderProfile(profileId: string): {
removed: boolean
activeProfileId?: string
} {
let removed = false
let deletedProfile: ProviderProfile | undefined
let nextActiveProfile: ProviderProfile | undefined
saveGlobalConfig(current => {
const currentProfiles = getProviderProfiles(current)
const existing = currentProfiles.find(profile => profile.id === profileId)
if (!existing) {
return current
}
removed = true
deletedProfile = existing
const nextProfiles = currentProfiles.filter(profile => profile.id !== profileId)
const currentActive = trimOrUndefined(current.activeProviderProfileId)
const activeWasDeleted =
!currentActive || currentActive === profileId ||
!nextProfiles.some(profile => profile.id === currentActive)
const nextActiveId = activeWasDeleted ? nextProfiles[0]?.id : currentActive
if (nextActiveId) {
nextActiveProfile =
nextProfiles.find(profile => profile.id === nextActiveId) ?? nextProfiles[0]
}
const cacheByProfile = {
...(current.openaiAdditionalModelOptionsCacheByProfile ?? {}),
}
delete cacheByProfile[profileId]
return {
...current,
providerProfiles: nextProfiles,
activeProviderProfileId: nextActiveId,
openaiAdditionalModelOptionsCacheByProfile: cacheByProfile,
openaiAdditionalModelOptionsCache: nextActiveId
? getModelCacheByProfile(nextActiveId, {
...current,
openaiAdditionalModelOptionsCacheByProfile: cacheByProfile,
})
: [],
}
})
if (nextActiveProfile) {
applyProviderProfileToProcessEnv(nextActiveProfile)
} else if (
deletedProfile &&
isProcessEnvAlignedWithProfile(process.env, deletedProfile, {
includeApiKey: false,
})
) {
clearProviderProfileEnvFromProcessEnv()
}
return {
removed,
activeProfileId: nextActiveProfile?.id,
}
}
export function getActiveOpenAIModelOptionsCache(
config = getGlobalConfig(),
): ModelOption[] {
const activeProfile = getActiveProviderProfile(config)
if (!activeProfile) {
return config.openaiAdditionalModelOptionsCache ?? []
}
const cached = config.openaiAdditionalModelOptionsCacheByProfile?.[
activeProfile.id
]
if (cached) {
return cached
}
// Backward compatibility for users who have only the legacy single cache.
if (
Object.keys(config.openaiAdditionalModelOptionsCacheByProfile ?? {}).length ===
0
) {
return config.openaiAdditionalModelOptionsCache ?? []
}
return []
}
export function setActiveOpenAIModelOptionsCache(options: ModelOption[]): void {
const activeProfile = getActiveProviderProfile()
if (!activeProfile) {
saveGlobalConfig(current => ({
...current,
openaiAdditionalModelOptionsCache: options,
}))
return
}
saveGlobalConfig(current => ({
...current,
openaiAdditionalModelOptionsCache: options,
openaiAdditionalModelOptionsCacheByProfile: {
...(current.openaiAdditionalModelOptionsCacheByProfile ?? {}),
[activeProfile.id]: options,
},
}))
}
export function clearActiveOpenAIModelOptionsCache(): void {
const activeProfile = getActiveProviderProfile()
if (!activeProfile) {
saveGlobalConfig(current => ({
...current,
openaiAdditionalModelOptionsCache: [],
}))
return
}
saveGlobalConfig(current => {
const cacheByProfile = {
...(current.openaiAdditionalModelOptionsCacheByProfile ?? {}),
}
delete cacheByProfile[activeProfile.id]
return {
...current,
openaiAdditionalModelOptionsCache: [],
openaiAdditionalModelOptionsCacheByProfile: cacheByProfile,
}
})
}