Provider loading fix (#623)

* add mistral and gemini provider type for profile provider field

* load latest locally selected

* env variables take precedence over json save

* add gemini context windows and fix gemini defaulting for env

* load on startup fix

* fix failing tests

* clarify test message

* fix variable mismatches

* fix failing test

* delete keys and set profile.apiKey for mistral and gemini

* switch model as well when switching provider

* set model when adding a new model
This commit is contained in:
lunamonke
2026-04-17 18:46:20 +01:00
committed by GitHub
parent 651123db1f
commit b0d9fe7112
10 changed files with 332 additions and 46 deletions

View File

@@ -401,7 +401,7 @@ test('buildCodexProfileEnv derives oauth source from secure storage when no expl
})
})
test('applySavedProfileToCurrentSession switches the current env to the saved Codex profile', async () => {
test('explicitly declared env takes precedence over applySavedProfileToCurrentSession', async () => {
// @ts-expect-error cache-busting query string for Bun module mocks
const { applySavedProfileToCurrentSession } = await import(
'../../utils/providerProfile.js?apply-saved-profile-codex'
@@ -430,18 +430,18 @@ test('applySavedProfileToCurrentSession switches the current env to the saved Co
expect(warning).toBeNull()
expect(processEnv.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(processEnv.OPENAI_MODEL).toBe('codexplan')
expect(processEnv.OPENAI_MODEL).toBe('gpt-4o')
expect(processEnv.OPENAI_BASE_URL).toBe(
'https://chatgpt.com/backend-api/codex',
"https://api.openai.com/v1",
)
expect(processEnv.CODEX_API_KEY).toBe('codex-live')
expect(processEnv.CHATGPT_ACCOUNT_ID).toBe('acct_codex')
expect(processEnv.OPENAI_API_KEY).toBeUndefined()
expect(processEnv.CODEX_API_KEY).toBeUndefined()
expect(processEnv.CHATGPT_ACCOUNT_ID).toBeUndefined()
expect(processEnv.OPENAI_API_KEY).toBe("sk-openai")
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBeUndefined()
})
test('applySavedProfileToCurrentSession ignores stale Codex env overrides for OAuth-backed profiles', async () => {
test('explicitly declared env takes precedence over applySavedProfileToCurrentSession', async () => {
// @ts-expect-error cache-busting query string for Bun module mocks
const { applySavedProfileToCurrentSession } = await import(
'../../utils/providerProfile.js?apply-saved-profile-codex-oauth'
@@ -465,13 +465,13 @@ test('applySavedProfileToCurrentSession ignores stale Codex env overrides for OA
processEnv,
})
expect(warning).toBeNull()
expect(processEnv.OPENAI_MODEL).toBe('codexplan')
expect(warning).not.toBeUndefined()
expect(processEnv.OPENAI_MODEL).toBe('gpt-4o')
expect(processEnv.OPENAI_BASE_URL).toBe(
'https://chatgpt.com/backend-api/codex',
"https://api.openai.com/v1",
)
expect(processEnv.CODEX_API_KEY).toBeUndefined()
expect(processEnv.CHATGPT_ACCOUNT_ID).not.toBe('acct_stale')
expect(processEnv.CODEX_API_KEY).toBe("stale-codex-key")
expect(processEnv.CHATGPT_ACCOUNT_ID).toBe('acct_stale')
expect(processEnv.CHATGPT_ACCOUNT_ID).toBeTruthy()
})

View File

@@ -3,6 +3,7 @@ import * as React from 'react'
import { DEFAULT_CODEX_BASE_URL } from '../services/api/providerConfig.js'
import { Box, Text } from '../ink.js'
import { useKeybinding } from '../keybindings/useKeybinding.js'
import { useSetAppState } from '../state/AppState.js'
import type { ProviderProfile } from '../utils/config.js'
import {
clearCodexCredentials,
@@ -581,6 +582,11 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
return
}
setAppState(prev => ({
...prev,
mainLoopModel: GITHUB_PROVIDER_DEFAULT_MODEL,
mainLoopModelForSession: null,
}))
refreshProfiles()
setAppState(prev => ({
...prev,
@@ -609,6 +615,11 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
}))
providerLabel = active.name
setAppState(prev => ({
...prev,
mainLoopModel: active.model,
mainLoopModelForSession: null,
}))
const settingsOverrideError =
clearStartupProviderOverrideFromUserSettings()
const isActiveCodexOAuth = isCodexOAuthProfile(
@@ -801,6 +812,13 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
}
const isActiveSavedProfile = getActiveProviderProfile()?.id === saved.id
if (isActiveSavedProfile) {
setAppState(prev => ({
...prev,
mainLoopModel: saved.model,
mainLoopModelForSession: null,
}))
}
const settingsOverrideError = isActiveSavedProfile
? clearStartupProviderOverrideFromUserSettings()
: null

View File

@@ -14,6 +14,7 @@ import {
asTrimmedString,
parseChatgptAccountId,
} from './codexOAuthShared.js'
import { DEFAULT_GEMINI_BASE_URL } from 'src/utils/providerProfile.js'
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
@@ -381,11 +382,15 @@ export function resolveProviderRequest(options?: {
}): ResolvedProviderRequest {
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
const isMistralMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
const isGeminiMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
const requestedModel =
options?.model?.trim() ||
(isMistralMode
? process.env.MISTRAL_MODEL?.trim()
: process.env.OPENAI_MODEL?.trim()) ||
(isGeminiMode
? process.env.GEMINI_MODEL?.trim()
: process.env.OPENAI_MODEL?.trim()) ||
options?.fallbackModel?.trim() ||
(isGithubMode ? 'github:copilot' : 'gpt-4o')
const descriptor = parseModelDescriptor(requestedModel)
@@ -396,14 +401,25 @@ export function resolveProviderRequest(options?: {
'MISTRAL_BASE_URL',
)
const normalizedGeminiEnvBaseUrl = asNamedEnvUrl(
process.env.GEMINI_BASE_URL,
'GEMINI_BASE_URL',
)
const primaryEnvBaseUrl = isMistralMode
? normalizedMistralEnvBaseUrl
: isGeminiMode
? normalizedGeminiEnvBaseUrl
: asNamedEnvUrl(process.env.OPENAI_BASE_URL, 'OPENAI_BASE_URL')
const fallbackEnvBaseUrl = isMistralMode
? (primaryEnvBaseUrl === undefined
? asNamedEnvUrl(process.env.OPENAI_API_BASE, 'OPENAI_API_BASE') ?? DEFAULT_MISTRAL_BASE_URL
: undefined)
: isGeminiMode
? (primaryEnvBaseUrl === undefined
? asNamedEnvUrl(process.env.OPENAI_API_BASE, 'OPENAI_API_BASE') ?? DEFAULT_GEMINI_BASE_URL
: undefined)
: (primaryEnvBaseUrl === undefined
? asNamedEnvUrl(process.env.OPENAI_API_BASE, 'OPENAI_API_BASE')
: undefined)

View File

@@ -155,7 +155,7 @@ export {
NOTIFICATION_CHANNELS,
} from './configConstants.js'
import type { EDITOR_MODES, NOTIFICATION_CHANNELS } from './configConstants.js'
import type { EDITOR_MODES, NOTIFICATION_CHANNELS, PROVIDERS } from './configConstants.js'
export type NotificationChannel = (typeof NOTIFICATION_CHANNELS)[number]
@@ -181,10 +181,12 @@ export type DiffTool = 'terminal' | 'auto'
export type OutputStyle = string
export type Providers = typeof PROVIDERS[number]
export type ProviderProfile = {
id: string
name: string
provider: 'openai' | 'anthropic'
provider: Providers
baseUrl: string
model: string
apiKey?: string

View File

@@ -19,3 +19,5 @@ export const EDITOR_MODES = ['normal', 'vim'] as const
// 'in-process' = in-process teammates running in same process
// 'auto' = automatically choose based on context (default)
export const TEAMMATE_MODES = ['auto', 'tmux', 'in-process'] as const
export const PROVIDERS = ['openai', 'anthropic', 'mistral', 'gemini'] as const

View File

@@ -181,9 +181,11 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'google/gemini-2.5-pro': 1_048_576,
// Google (native via CLAUDE_CODE_USE_GEMINI)
'gemini-2.0-flash': 1_048_576,
'gemini-2.5-pro': 1_048_576,
'gemini-2.5-flash': 1_048_576,
'gemini-2.0-flash': 1_048_576,
'gemini-2.5-pro': 1_048_576,
'gemini-2.5-flash': 1_048_576,
'gemini-3.1-pro': 1_048_576,
'gemini-3.1-flash-lite-preview': 1_048_576,
// Ollama local models
// Llama 3.1+ models support 128k context natively (Meta official specs).
@@ -331,9 +333,11 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'google/gemini-2.5-pro': 65_536,
// Google (native via CLAUDE_CODE_USE_GEMINI)
'gemini-2.0-flash': 8_192,
'gemini-2.5-pro': 65_536,
'gemini-2.5-flash': 65_536,
'gemini-2.0-flash': 8_192,
'gemini-2.5-pro': 65_536,
'gemini-2.5-flash': 65_536,
'gemini-3.1-pro': 65_536,
'gemini-3.1-flash-lite-preview': 65_536,
// Ollama local models (conservative safe defaults)
'llama3.3:70b': 4_096,

View File

@@ -166,7 +166,7 @@ test('matching persisted gemini env is reused for gemini launch', async () => {
assert.equal(env.GEMINI_BASE_URL, 'https://example.test/v1beta/openai')
})
test('gemini launch ignores mismatched persisted openai env and strips other provider secrets', async () => {
test('openai env variables take precedence over gemini', async () => {
const env = await buildLaunchEnv({
profile: 'gemini',
persisted: profile('openai', {
@@ -187,16 +187,16 @@ test('gemini launch ignores mismatched persisted openai env and strips other pro
},
})
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
assert.equal(env.CLAUDE_CODE_USE_OPENAI, undefined)
assert.equal(env.GEMINI_MODEL, 'gemini-2.0-flash')
assert.equal(env.GEMINI_API_KEY, 'gem-live')
assert.equal(env.CLAUDE_CODE_USE_GEMINI, undefined)
assert.equal(env.CLAUDE_CODE_USE_OPENAI, '1')
assert.equal(env.GEMINI_MODEL, undefined)
assert.equal(env.GEMINI_API_KEY, undefined)
assert.equal(
env.GEMINI_BASE_URL,
'https://generativelanguage.googleapis.com/v1beta/openai',
undefined,
)
assert.equal(env.GOOGLE_API_KEY, undefined)
assert.equal(env.OPENAI_API_KEY, undefined)
assert.equal(env.OPENAI_API_KEY, 'sk-live')
assert.equal(env.CODEX_API_KEY, undefined)
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
})
@@ -562,8 +562,13 @@ test('buildStartupEnvFromProfile leaves explicit provider selections untouched',
processEnv,
})
assert.equal(env, processEnv)
// Remove the strict object equality check: assert.equal(env, processEnv)
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
assert.equal(env.GEMINI_API_KEY, 'gem-live')
assert.equal(env.GEMINI_MODEL, 'gemini-2.0-flash')
// Add the new default fields injected by the function
assert.equal(env.GEMINI_BASE_URL, 'https://generativelanguage.googleapis.com/v1beta/openai')
assert.equal(env.GEMINI_AUTH_MODE, 'api-key')
assert.equal(env.OPENAI_API_KEY, undefined)
})
@@ -607,9 +612,12 @@ test('buildStartupEnvFromProfile treats explicit falsey provider flags as user i
processEnv,
})
assert.equal(env, processEnv)
assert.equal(env.CLAUDE_CODE_USE_OPENAI, '0')
assert.equal(env.GEMINI_API_KEY, undefined)
assert.equal(env.CLAUDE_CODE_USE_OPENAI, undefined)
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
assert.equal(env.GEMINI_API_KEY, 'gem-persisted')
assert.equal(env.GEMINI_MODEL, 'gemini-2.5-flash')
assert.equal(env.GEMINI_BASE_URL, 'https://generativelanguage.googleapis.com/v1beta/openai')
assert.equal(env.GEMINI_AUTH_MODE, 'api-key')
})
test('maskSecretForDisplay preserves only a short prefix and suffix', () => {

View File

@@ -29,6 +29,9 @@ export {
sanitizeApiKey,
sanitizeProviderConfigValue,
} from './providerSecrets.js'
import { isEnvTruthy } from './envUtils.ts'
import { PROVIDERS } from './configConstants.js'
export const PROFILE_FILE_NAME = '.openclaude-profile.json'
export const DEFAULT_GEMINI_BASE_URL =
@@ -498,13 +501,13 @@ export function hasExplicitProviderSelection(
}
return (
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
processEnv.CLAUDE_CODE_USE_MISTRAL !== undefined ||
processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined ||
processEnv.CLAUDE_CODE_USE_VERTEX !== undefined ||
processEnv.CLAUDE_CODE_USE_FOUNDRY !== undefined
isEnvTruthy(processEnv.CLAUDE_CODE_USE_OPENAI) ||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB) ||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_GEMINI) ||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_MISTRAL) ||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_BEDROCK) ||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_VERTEX) ||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_FOUNDRY)
)
}
@@ -573,6 +576,20 @@ export async function buildLaunchEnv(options: {
const persistedGeminiKey = sanitizeApiKey(persistedEnv.GEMINI_API_KEY)
const persistedGeminiAuthMode = persistedEnv.GEMINI_AUTH_MODE
if (hasExplicitProviderSelection(processEnv)) {
for (let provider of PROVIDERS) {
if (provider === "anthropic") {
continue;
}
const env_key_name = `CLAUDE_CODE_USE_${provider.toUpperCase()}`
if (env_key_name in processEnv && isEnvTruthy(processEnv[env_key_name])) {
options.profile = provider;
}
}
}
if (options.profile === 'gemini') {
const env: NodeJS.ProcessEnv = {
...processEnv,
@@ -825,12 +842,18 @@ export async function buildStartupEnvFromProfile(options?: {
const persisted = options?.persisted ?? loadProfileFile()
// Saved /provider profiles should still win over provider-manager env that was
// auto-applied during startup. Only explicit shell/flag provider selection
// auto-applied during startup. Only an explicit shell/flag provider selection
// should bypass the persisted startup profile.
//
const profileManagedEnv = processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED === '1'
if (hasExplicitProviderSelection(processEnv) && !profileManagedEnv) {
return processEnv
}
// If the user explicitly selected a provider via env, allow it to bypass
// the persisted profile only when we can prove it was managed by the
// persisted profile env itself.
//
// Practically: on initial startup, provider routing env vars can already
// be present due to earlier auto-application steps. We should still apply
// the persisted profile rather than returning early.
if (!persisted) {
return processEnv

View File

@@ -13,6 +13,7 @@ const RESTORED_KEYS = [
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID',
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_MISTRAL',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
@@ -24,6 +25,15 @@ const RESTORED_KEYS = [
'ANTHROPIC_BASE_URL',
'ANTHROPIC_MODEL',
'ANTHROPIC_API_KEY',
'GEMINI_BASE_URL',
'GEMINI_MODEL',
'GEMINI_API_KEY',
'GEMINI_AUTH_MODE',
'GEMINI_ACCESS_TOKEN',
'GOOGLE_API_KEY',
'MISTRAL_BASE_URL',
'MISTRAL_MODEL',
'MISTRAL_API_KEY',
] as const
type MockConfigState = {
@@ -98,6 +108,24 @@ function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile
}
}
function buildMistralProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
return buildProfile({
provider: 'mistral',
baseUrl: 'https://api.mistral.ai/v1',
model: 'devstral-latest',
...overrides,
})
}
function buildGeminiProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
return buildProfile({
provider: 'gemini',
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai',
model: 'gemini-3-flash-preview',
...overrides,
})
}
describe('applyProviderProfileToProcessEnv', () => {
test('openai profile clears competing gemini/github flags', async () => {
const { applyProviderProfileToProcessEnv } =
@@ -118,6 +146,36 @@ describe('applyProviderProfileToProcessEnv', () => {
expect(getFreshAPIProvider()).toBe('openai')
})
test('mistral profile sets CLAUDE_CODE_USE_MISTRAL and clears openai flags', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
applyProviderProfileToProcessEnv(buildMistralProfile())
const { getAPIProvider: getFreshAPIProvider } =
await importFreshProvidersModule()
expect(process.env.CLAUDE_CODE_USE_MISTRAL).toBe('1')
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
expect(process.env.MISTRAL_MODEL).toBe('devstral-latest')
expect(getFreshAPIProvider()).toBe('mistral')
})
test('gemini profile sets CLAUDE_CODE_USE_GEMINI and clears openai flags', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
applyProviderProfileToProcessEnv(buildGeminiProfile())
const { getAPIProvider: getFreshAPIProvider } =
await importFreshProvidersModule()
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBe('1')
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
expect(process.env.GEMINI_MODEL).toBe('gemini-3-flash-preview')
expect(getFreshAPIProvider()).toBe('gemini')
})
test('anthropic profile clears competing gemini/github flags', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()

View File

@@ -6,6 +6,14 @@ import {
} from './config.js'
import type { ModelOption } from './model/modelOptions.js'
import { getPrimaryModel, parseModelList } from './providerModels.js'
import {
createProfileFile,
saveProfileFile,
buildGeminiProfileEnv,
buildMistralProfileEnv,
buildOpenAIProfileEnv,
type ProviderProfile as ProviderProfileStartup,
} from './providerProfile.js'
export type ProviderPreset =
| 'anthropic'
@@ -60,7 +68,14 @@ function normalizeBaseUrl(value: string): string {
function sanitizeProfile(profile: ProviderProfile): ProviderProfile | null {
const id = trimValue(profile.id)
const name = trimValue(profile.name)
const provider = profile.provider === 'anthropic' ? 'anthropic' : 'openai'
const provider =
profile.provider === 'anthropic'
? 'anthropic'
: profile.provider === 'mistral'
? 'mistral'
: profile.provider === 'gemini'
? 'gemini'
: 'openai'
const baseUrl = normalizeBaseUrl(profile.baseUrl)
const model = trimValue(profile.model)
@@ -161,7 +176,7 @@ export function getProviderPresetDefaults(
}
case 'gemini':
return {
provider: 'openai',
provider: 'gemini',
name: 'Google Gemini',
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai',
model: 'gemini-3-flash-preview',
@@ -170,7 +185,7 @@ export function getProviderPresetDefaults(
}
case 'mistral':
return {
provider: 'openai',
provider: 'mistral',
name: 'Mistral',
baseUrl: 'https://api.mistral.ai/v1',
model: 'devstral-latest',
@@ -317,6 +332,7 @@ function hasConflictingProviderFlagsForProfile(
return (
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
processEnv.CLAUDE_CODE_USE_MISTRAL !== undefined ||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined ||
processEnv.CLAUDE_CODE_USE_VERTEX !== undefined ||
@@ -358,6 +374,38 @@ function isProcessEnvAlignedWithProfile(
)
}
if (profile.provider === 'mistral') {
return (
processEnv.CLAUDE_CODE_USE_MISTRAL !== undefined &&
processEnv.CLAUDE_CODE_USE_GEMINI === undefined &&
processEnv.CLAUDE_CODE_USE_OPENAI === undefined &&
processEnv.CLAUDE_CODE_USE_GITHUB === undefined &&
processEnv.CLAUDE_CODE_USE_BEDROCK === undefined &&
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
sameOptionalEnvValue(processEnv.MISTRAL_BASE_URL, profile.baseUrl) &&
sameOptionalEnvValue(processEnv.MISTRAL_MODEL, profile.model) &&
(!includeApiKey ||
sameOptionalEnvValue(processEnv.MISTRAL_API_KEY, profile.apiKey))
)
}
if (profile.provider === 'gemini') {
return (
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined &&
processEnv.CLAUDE_CODE_USE_MISTRAL === undefined &&
processEnv.CLAUDE_CODE_USE_OPENAI === undefined &&
processEnv.CLAUDE_CODE_USE_GITHUB === undefined &&
processEnv.CLAUDE_CODE_USE_BEDROCK === undefined &&
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
sameOptionalEnvValue(processEnv.GEMINI_BASE_URL, profile.baseUrl) &&
sameOptionalEnvValue(processEnv.GEMINI_MODEL, profile.model) &&
(!includeApiKey ||
sameOptionalEnvValue(processEnv.GEMINI_API_KEY, profile.apiKey))
)
}
return (
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined &&
processEnv.CLAUDE_CODE_USE_GEMINI === undefined &&
@@ -407,6 +455,17 @@ export function clearProviderProfileEnvFromProcessEnv(
delete processEnv[PROFILE_ENV_APPLIED_FLAG]
delete processEnv[PROFILE_ENV_APPLIED_ID]
delete processEnv.GEMINI_MODEL
delete processEnv.GEMINI_BASE_URL
delete processEnv.GEMINI_API_KEY
delete processEnv.GEMINI_AUTH_MODE
delete processEnv.GEMINI_ACCESS_TOKEN
delete processEnv.GOOGLE_API_KEY
delete processEnv.MISTRAL_MODEL
delete processEnv.MISTRAL_BASE_URL
delete processEnv.MISTRAL_API_KEY
// Clear provider-specific API keys
delete processEnv.MINIMAX_API_KEY
delete processEnv.NVIDIA_API_KEY
@@ -435,6 +494,40 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
return
}
if (profile.provider === 'mistral') {
process.env.CLAUDE_CODE_USE_MISTRAL = '1'
process.env.MISTRAL_BASE_URL = profile.baseUrl
process.env.MISTRAL_MODEL = profile.model
if (profile.apiKey) {
process.env.MISTRAL_API_KEY = profile.apiKey
} else {
delete process.env.MISTRAL_API_KEY
}
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_KEY
delete process.env.OPENAI_MODEL
return
}
if (profile.provider === 'gemini') {
process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.GEMINI_BASE_URL = profile.baseUrl
process.env.GEMINI_MODEL = profile.model
if (profile.apiKey) {
process.env.GEMINI_API_KEY = profile.apiKey
} else {
delete process.env.GEMINI_API_KEY
}
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_KEY
delete process.env.OPENAI_MODEL
return
}
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = profile.baseUrl
process.env.OPENAI_MODEL = getPrimaryModel(profile.model)
@@ -520,7 +613,7 @@ export function addProviderProfile(
const activeProfile = getActiveProviderProfile()
if (activeProfile?.id === profile.id) {
applyProviderProfileToProcessEnv(profile)
setActiveProviderProfile(profile.id)
clearActiveOpenAIModelOptionsCache()
}
@@ -699,6 +792,68 @@ export function setActiveProviderProfile(
}))
applyProviderProfileToProcessEnv(activeProfile)
// Keep startup persisted provider profile in sync so initial startup
// uses the selected provider/model.
const persistedProfile = (() => {
if (activeProfile.provider === 'anthropic') return 'openai' as const
return activeProfile.provider
})()
const profileEnv = (() => {
switch (activeProfile.provider) {
case 'gemini':
return (
buildGeminiProfileEnv({
model: activeProfile.model,
baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey,
authMode: 'api-key',
processEnv: process.env,
}) ?? null
)
case 'mistral':
return (
buildMistralProfileEnv({
model: activeProfile.model,
baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey,
processEnv: process.env,
}) ?? null
)
default:
// anthropic and all openai-compatible providers
return (
buildOpenAIProfileEnv({
model: activeProfile.model,
baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey,
processEnv: process.env,
}) ?? null
)
}
})()
if (profileEnv) {
const startupProfile =
activeProfile.provider === 'anthropic'
? ({
profile: 'openai' as ProviderProfileStartup,
env: {
OPENAI_BASE_URL: activeProfile.baseUrl,
OPENAI_MODEL: activeProfile.model,
OPENAI_API_KEY: activeProfile.apiKey,
},
} as const)
: ({
profile: activeProfile.provider as ProviderProfileStartup,
env: profileEnv,
} as const)
const file = createProfileFile(startupProfile.profile, startupProfile.env)
saveProfileFile(file)
}
return activeProfile
}