fix: normalize /provider multi-model selection and semicolon parsing (#841)

* fix provider multi-model selection

* fix provider manager multi-model save path
This commit is contained in:
JATMN
2026-04-24 11:28:14 -07:00
committed by GitHub
parent b5f7047358
commit c4cb98a4f0
8 changed files with 439 additions and 59 deletions

View File

@@ -152,6 +152,7 @@ function createDeferred<T>(): {
function mockProviderProfilesModule(options?: { function mockProviderProfilesModule(options?: {
addProviderProfile?: (...args: unknown[]) => unknown addProviderProfile?: (...args: unknown[]) => unknown
getActiveProviderProfile?: () => unknown
getProviderProfiles?: () => unknown[] getProviderProfiles?: () => unknown[]
updateProviderProfile?: (...args: unknown[]) => unknown updateProviderProfile?: (...args: unknown[]) => unknown
setActiveProviderProfile?: (...args: unknown[]) => unknown setActiveProviderProfile?: (...args: unknown[]) => unknown
@@ -160,7 +161,7 @@ function mockProviderProfilesModule(options?: {
addProviderProfile: options?.addProviderProfile ?? (() => null), addProviderProfile: options?.addProviderProfile ?? (() => null),
applyActiveProviderProfileFromConfig: () => {}, applyActiveProviderProfileFromConfig: () => {},
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }), deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
getActiveProviderProfile: () => null, getActiveProviderProfile: options?.getActiveProviderProfile ?? (() => null),
getProviderPresetDefaults: (preset: string) => getProviderPresetDefaults: (preset: string) =>
preset === 'ollama' preset === 'ollama'
? { ? {
@@ -190,6 +191,7 @@ function mockProviderManagerDependencies(
addProviderProfile?: (...args: unknown[]) => unknown addProviderProfile?: (...args: unknown[]) => unknown
applySavedProfileToCurrentSession?: (...args: unknown[]) => Promise<string | null> applySavedProfileToCurrentSession?: (...args: unknown[]) => Promise<string | null>
clearCodexCredentials?: () => { success: boolean; warning?: string } clearCodexCredentials?: () => { success: boolean; warning?: string }
getActiveProviderProfile?: () => unknown
getProviderProfiles?: () => unknown[] getProviderProfiles?: () => unknown[]
probeOllamaGenerationReadiness?: () => Promise<{ probeOllamaGenerationReadiness?: () => Promise<{
state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed' state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed'
@@ -229,6 +231,7 @@ function mockProviderManagerDependencies(
): void { ): void {
mockProviderProfilesModule({ mockProviderProfilesModule({
addProviderProfile: options?.addProviderProfile, addProviderProfile: options?.addProviderProfile,
getActiveProviderProfile: options?.getActiveProviderProfile,
getProviderProfiles: options?.getProviderProfiles, getProviderProfiles: options?.getProviderProfiles,
updateProviderProfile: options?.updateProviderProfile, updateProviderProfile: options?.updateProviderProfile,
setActiveProviderProfile: options?.setActiveProviderProfile, setActiveProviderProfile: options?.setActiveProviderProfile,
@@ -331,6 +334,10 @@ async function mountProviderManager(
options?: { options?: {
mode?: 'first-run' | 'manage' mode?: 'first-run' | 'manage'
onDone?: (result?: unknown) => void onDone?: (result?: unknown) => void
onChangeAppState?: (args: {
newState: unknown
oldState: unknown
}) => void
}, },
): Promise<{ ): Promise<{
stdin: PassThrough stdin: PassThrough
@@ -345,7 +352,7 @@ async function mountProviderManager(
}) })
root.render( root.render(
<AppStateProvider> <AppStateProvider onChangeAppState={options?.onChangeAppState}>
<KeybindingSetup> <KeybindingSetup>
<ProviderManager <ProviderManager
mode={options?.mode ?? 'manage'} mode={options?.mode ?? 'manage'}
@@ -907,6 +914,205 @@ test('ProviderManager keeps Codex OAuth as next-startup only when activating the
await mounted.dispose() await mounted.dispose()
}) })
test('ProviderManager activating a multi-model provider sets the session model to the primary model', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const multiModelProfile = {
id: 'provider_multi_model',
provider: 'openai',
name: 'Multi Model Provider',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-5.4; gpt-5.4-mini',
apiKey: 'sk-test',
}
const setActiveProviderProfile = mock(() => multiModelProfile)
const appStateChanges: Array<{ newState: any; oldState: any }> = []
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
getProviderProfiles: () => [multiModelProfile],
setActiveProviderProfile,
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
onChangeAppState: args => {
appStateChanges.push(args as { newState: any; oldState: any })
},
})
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Provider manager') &&
frame.includes('Set active provider'),
)
mounted.stdin.write('j')
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Set active provider') &&
frame.includes('Multi Model Provider'),
)
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForCondition(() => setActiveProviderProfile.mock.calls.length > 0)
await waitForCondition(() =>
appStateChanges.some(
({ newState, oldState }) =>
newState.mainLoopModel === 'gpt-5.4' &&
oldState.mainLoopModel !== newState.mainLoopModel,
),
)
expect(setActiveProviderProfile).toHaveBeenCalledWith('provider_multi_model')
expect(
appStateChanges.some(
({ newState }) =>
newState.mainLoopModel === 'gpt-5.4' &&
newState.mainLoopModelForSession === null,
),
).toBe(true)
expect(
appStateChanges.some(
({ newState }) => newState.mainLoopModel === 'gpt-5.4; gpt-5.4-mini',
),
).toBe(false)
await mounted.dispose()
})
test('ProviderManager editing an active multi-model provider keeps app state on the primary model', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const multiModelProfile = {
id: 'provider_multi_model',
provider: 'openai',
name: 'Multi Model Provider',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-5.4; gpt-5.4-mini',
apiKey: 'sk-test',
}
const updateProviderProfile = mock(() => multiModelProfile)
const appStateChanges: Array<{ newState: any; oldState: any }> = []
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
getActiveProviderProfile: () => multiModelProfile,
getProviderProfiles: () => [multiModelProfile],
updateProviderProfile,
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
onChangeAppState: args => {
appStateChanges.push(args as { newState: any; oldState: any })
},
})
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Provider manager') &&
frame.includes('Edit provider'),
)
mounted.stdin.write('j')
await Bun.sleep(25)
mounted.stdin.write('j')
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Edit provider') &&
frame.includes('Multi Model Provider'),
)
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Edit provider profile') &&
frame.includes('Step 1 of 4'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 2 of 4'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 3 of 4'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 4 of 4'),
)
mounted.stdin.write('\r')
await waitForCondition(() => updateProviderProfile.mock.calls.length > 0)
await waitForCondition(() =>
appStateChanges.some(
({ newState, oldState }) =>
newState.mainLoopModel === 'gpt-5.4' &&
oldState.mainLoopModel !== newState.mainLoopModel,
),
)
expect(updateProviderProfile).toHaveBeenCalledWith(
'provider_multi_model',
expect.objectContaining({
model: 'gpt-5.4; gpt-5.4-mini',
}),
)
expect(
appStateChanges.some(
({ newState }) =>
newState.mainLoopModel === 'gpt-5.4' &&
newState.mainLoopModelForSession === null,
),
).toBe(true)
expect(
appStateChanges.some(
({ newState }) => newState.mainLoopModel === 'gpt-5.4; gpt-5.4-mini',
),
).toBe(false)
await mounted.dispose()
})
test('ProviderManager resolves Codex OAuth state from async storage without sync reads in render flow', async () => { test('ProviderManager resolves Codex OAuth state from async storage without sync reads in render flow', async () => {
delete process.env.CLAUDE_CODE_SIMPLE delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_GITHUB

View File

@@ -125,8 +125,8 @@ const FORM_STEPS: Array<{
{ {
key: 'model', key: 'model',
label: 'Default model', label: 'Default model',
placeholder: 'e.g. llama3.1:8b or glm-4.7, glm-4.7-flash', placeholder: 'e.g. llama3.1:8b or glm-4.7; glm-4.7-flash',
helpText: 'Model name(s) to use. Separate multiple with commas; first is default.', helpText: 'Model name(s) to use. Separate multiple with ";" or ","; first is default.',
}, },
{ {
key: 'apiKey', key: 'apiKey',
@@ -780,19 +780,14 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
// Update the session model to the new provider's first model. // Update the session model to the new provider's first model.
// persistActiveProviderProfileModel (called by onChangeAppState) will // persistActiveProviderProfileModel (called by onChangeAppState) will
// not overwrite the multi-model list because it checks if the model // not overwrite the multi-model list because it checks if the model
// is already in the profile's comma-separated model list. // is already in the provider's configured model list.
const newModel = getPrimaryModel(active.model) const newModel = getPrimaryModel(active.model)
setAppState(prev => ({ setAppState(prev => ({
...prev, ...prev,
mainLoopModel: newModel, mainLoopModel: newModel,
}))
providerLabel = active.name
setAppState(prev => ({
...prev,
mainLoopModel: active.model,
mainLoopModelForSession: null, mainLoopModelForSession: null,
})) }))
providerLabel = active.name
const settingsOverrideError = const settingsOverrideError =
clearStartupProviderOverrideFromUserSettings() clearStartupProviderOverrideFromUserSettings()
const isActiveCodexOAuth = isCodexOAuthProfile( const isActiveCodexOAuth = isCodexOAuthProfile(
@@ -996,7 +991,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
if (isActiveSavedProfile) { if (isActiveSavedProfile) {
setAppState(prev => ({ setAppState(prev => ({
...prev, ...prev,
mainLoopModel: saved.model, mainLoopModel: getPrimaryModel(saved.model),
mainLoopModelForSession: null, mainLoopModelForSession: null,
})) }))
} }

View File

@@ -35,6 +35,21 @@ describe('parseModelList', () => {
]) ])
}) })
test('splits semicolon-separated models', () => {
expect(parseModelList('glm-4.7; glm-4.7-flash')).toEqual([
'glm-4.7',
'glm-4.7-flash',
])
})
test('splits mixed comma- and semicolon-separated models', () => {
expect(parseModelList('gpt-5.4; gpt-5.4-mini, o3')).toEqual([
'gpt-5.4',
'gpt-5.4-mini',
'o3',
])
})
test('returns empty array for empty string', () => { test('returns empty array for empty string', () => {
expect(parseModelList('')).toEqual([]) expect(parseModelList('')).toEqual([])
}) })
@@ -62,6 +77,10 @@ describe('getPrimaryModel', () => {
expect(getPrimaryModel('glm-4.7, glm-4.7-flash')).toBe('glm-4.7') expect(getPrimaryModel('glm-4.7, glm-4.7-flash')).toBe('glm-4.7')
}) })
test('returns first model from semicolon-separated list', () => {
expect(getPrimaryModel('glm-4.7; glm-4.7-flash')).toBe('glm-4.7')
})
test('returns the only model when single model is provided', () => { test('returns the only model when single model is provided', () => {
expect(getPrimaryModel('llama3.1:8b')).toBe('llama3.1:8b') expect(getPrimaryModel('llama3.1:8b')).toBe('llama3.1:8b')
}) })
@@ -86,6 +105,10 @@ describe('hasMultipleModels', () => {
expect(hasMultipleModels('glm-4.7, glm-4.7-flash')).toBe(true) expect(hasMultipleModels('glm-4.7, glm-4.7-flash')).toBe(true)
}) })
test('returns true for semicolon-separated models', () => {
expect(hasMultipleModels('glm-4.7; glm-4.7-flash')).toBe(true)
})
test('returns false for a single model', () => { test('returns false for a single model', () => {
expect(hasMultipleModels('llama3.1:8b')).toBe(false) expect(hasMultipleModels('llama3.1:8b')).toBe(false)
}) })

View File

@@ -1,28 +1,30 @@
/** /**
* Utility functions for parsing comma-separated model names in provider profiles. * Utility functions for parsing provider-profile model lists.
* *
* Example: "glm-4.7, glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"] * Examples:
* Single model: "llama3.1:8b" -> ["llama3.1:8b"] * - "glm-4.7, glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
* - "glm-4.7; glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
* - "llama3.1:8b" -> ["llama3.1:8b"]
*/ */
/** /**
* Splits a comma-separated model field into an array of trimmed model names, * Splits a comma- or semicolon-separated model field into an array of trimmed
* filtering out any empty entries. * model names, filtering out any empty entries.
*/ */
export function parseModelList(modelField: string): string[] { export function parseModelList(modelField: string): string[] {
return modelField return modelField
.split(',') .split(/[;,]/)
.map((part) => part.trim()) .map((part) => part.trim())
.filter((part) => part.length > 0) .filter((part) => part.length > 0)
} }
/** /**
* Returns the first (primary) model from a comma-separated model field. * Returns the first (primary) model from a model-list field.
* Falls back to the original string if parsing yields no results. * Falls back to the trimmed original string if parsing yields no results.
*/ */
export function getPrimaryModel(modelField: string): string { export function getPrimaryModel(modelField: string): string {
const models = parseModelList(modelField) const models = parseModelList(modelField)
return models.length > 0 ? models[0] : modelField return models.length > 0 ? models[0] : modelField.trim()
} }
/** /**

View File

@@ -391,6 +391,21 @@ test('gemini profiles accept google api key fallback', () => {
}) })
}) })
test('gemini profiles use the first model from a semicolon-separated list', () => {
const env = buildGeminiProfileEnv({
authMode: 'api-key',
apiKey: 'gem-live',
model: 'gemini-2.5-pro; gemini-2.5-flash',
processEnv: {},
})
assert.deepEqual(env, {
GEMINI_AUTH_MODE: 'api-key',
GEMINI_MODEL: 'gemini-2.5-pro',
GEMINI_API_KEY: 'gem-live',
})
})
test('gemini profiles support access-token auth mode without persisting a key', () => { test('gemini profiles support access-token auth mode without persisting a key', () => {
const env = buildGeminiProfileEnv({ const env = buildGeminiProfileEnv({
authMode: 'access-token', authMode: 'access-token',
@@ -766,6 +781,21 @@ test('openai profiles ignore codex shell transport hints', () => {
}) })
}) })
test('openai profiles use the first model from a semicolon-separated list', () => {
const env = buildOpenAIProfileEnv({
goal: 'balanced',
apiKey: 'sk-live',
model: 'gpt-5.4; gpt-5.4-mini',
processEnv: {},
})
assert.deepEqual(env, {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-5.4',
OPENAI_API_KEY: 'sk-live',
})
})
test('openai profiles ignore poisoned shell model and base url values', () => { test('openai profiles ignore poisoned shell model and base url values', () => {
const env = buildOpenAIProfileEnv({ const env = buildOpenAIProfileEnv({
goal: 'balanced', goal: 'balanced',
@@ -800,6 +830,22 @@ test('startup env ignores poisoned persisted openai model and base url', async (
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1') assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
}) })
test('startup env normalizes a semicolon-separated persisted openai model list', async () => {
const env = await buildStartupEnvFromProfile({
persisted: profile('openai', {
OPENAI_API_KEY: 'sk-live',
OPENAI_MODEL: 'gpt-5.4; gpt-5.4-mini',
OPENAI_BASE_URL: 'https://api.openai.com/v1',
}),
processEnv: {},
})
assert.equal(env.CLAUDE_CODE_USE_OPENAI, '1')
assert.equal(env.OPENAI_API_KEY, 'sk-live')
assert.equal(env.OPENAI_MODEL, 'gpt-5.4')
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
})
test('auto profile falls back to openai when no viable ollama model exists', () => { test('auto profile falls back to openai when no viable ollama model exists', () => {
assert.equal(selectAutoProfile(null), 'openai') assert.equal(selectAutoProfile(null), 'openai')
assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama') assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama')

View File

@@ -22,6 +22,7 @@ import {
sanitizeApiKey, sanitizeApiKey,
sanitizeProviderConfigValue, sanitizeProviderConfigValue,
} from './providerSecrets.js' } from './providerSecrets.js'
import { getPrimaryModel } from './providerModels.js'
export { export {
maskSecretForDisplay, maskSecretForDisplay,
@@ -147,6 +148,18 @@ function resolveProfileFilePath(options?: ProfileFileLocation): string {
return resolve(options?.cwd ?? process.cwd(), PROFILE_FILE_NAME) return resolve(options?.cwd ?? process.cwd(), PROFILE_FILE_NAME)
} }
function normalizeProfileModel(
value: string | undefined | null,
): string | undefined {
const trimmed = value?.trim()
if (!trimmed) {
return undefined
}
const primary = getPrimaryModel(trimmed)
return primary.length > 0 ? primary : undefined
}
export function isProviderProfile(value: unknown): value is ProviderProfile { export function isProviderProfile(value: unknown): value is ProviderProfile {
return ( return (
value === 'openai' || value === 'openai' ||
@@ -207,8 +220,12 @@ export function buildNvidiaNimProfileEnv(options: {
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) || sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
defaultBaseUrl, defaultBaseUrl,
OPENAI_MODEL: OPENAI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) || normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) || sanitizeProviderConfigValue(options.model, secretSource),
) ||
normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource),
) ||
'nvidia/llama-3.1-nemotron-70b-instruct', 'nvidia/llama-3.1-nemotron-70b-instruct',
OPENAI_API_KEY: key, OPENAI_API_KEY: key,
NVIDIA_NIM: '1', NVIDIA_NIM: '1',
@@ -237,8 +254,12 @@ export function buildMiniMaxProfileEnv(options: {
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) || sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
defaultBaseUrl, defaultBaseUrl,
OPENAI_MODEL: OPENAI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) || normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) || sanitizeProviderConfigValue(options.model, secretSource),
) ||
normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource),
) ||
defaultModel, defaultModel,
OPENAI_API_KEY: key, OPENAI_API_KEY: key,
MINIMAX_API_KEY: key, MINIMAX_API_KEY: key,
@@ -270,8 +291,12 @@ export function buildGeminiProfileEnv(options: {
const env: ProfileEnv = { const env: ProfileEnv = {
GEMINI_AUTH_MODE: authMode, GEMINI_AUTH_MODE: authMode,
GEMINI_MODEL: GEMINI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) || normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) || sanitizeProviderConfigValue(options.model, secretSource),
) ||
normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource),
) ||
DEFAULT_GEMINI_MODEL, DEFAULT_GEMINI_MODEL,
} }
@@ -304,9 +329,11 @@ export function buildOpenAIProfileEnv(options: {
const defaultModel = getGoalDefaultOpenAIModel(options.goal) const defaultModel = getGoalDefaultOpenAIModel(options.goal)
const secretSource: SecretValueSource = { OPENAI_API_KEY: key } const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
const shellOpenAIModel = sanitizeProviderConfigValue( const shellOpenAIModel = normalizeProfileModel(
processEnv.OPENAI_MODEL, sanitizeProviderConfigValue(
secretSource, processEnv.OPENAI_MODEL,
secretSource,
),
) )
const shellOpenAIBaseUrl = sanitizeProviderConfigValue( const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
processEnv.OPENAI_BASE_URL, processEnv.OPENAI_BASE_URL,
@@ -325,7 +352,9 @@ export function buildOpenAIProfileEnv(options: {
(useShellOpenAIConfig ? shellOpenAIBaseUrl : undefined) || (useShellOpenAIConfig ? shellOpenAIBaseUrl : undefined) ||
DEFAULT_OPENAI_BASE_URL, DEFAULT_OPENAI_BASE_URL,
OPENAI_MODEL: OPENAI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) || normalizeProfileModel(
sanitizeProviderConfigValue(options.model, secretSource),
) ||
(useShellOpenAIConfig ? shellOpenAIModel : undefined) || (useShellOpenAIConfig ? shellOpenAIModel : undefined) ||
defaultModel, defaultModel,
OPENAI_API_KEY: key, OPENAI_API_KEY: key,
@@ -382,10 +411,14 @@ export function buildMistralProfileEnv(options: {
const env: ProfileEnv = { const env: ProfileEnv = {
MISTRAL_API_KEY: key, MISTRAL_API_KEY: key,
MISTRAL_MODEL: MISTRAL_MODEL:
sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }) || normalizeProfileModel(
sanitizeProviderConfigValue( sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }),
processEnv.MISTRAL_MODEL, ) ||
{ MISTRAL_API_KEY: key }, normalizeProfileModel(
sanitizeProviderConfigValue(
processEnv.MISTRAL_MODEL,
{ MISTRAL_API_KEY: key },
),
) || ) ||
DEFAULT_MISTRAL_MODEL, DEFAULT_MISTRAL_MODEL,
} }
@@ -578,33 +611,41 @@ export async function buildLaunchEnv(options: {
options.persisted?.profile === options.profile options.persisted?.profile === options.profile
? options.persisted.env ?? {} ? options.persisted.env ?? {}
: {} : {}
const persistedOpenAIModel = sanitizeProviderConfigValue( const persistedOpenAIModel = normalizeProfileModel(
persistedEnv.OPENAI_MODEL, sanitizeProviderConfigValue(
persistedEnv, persistedEnv.OPENAI_MODEL,
persistedEnv,
),
) )
const persistedOpenAIBaseUrl = sanitizeProviderConfigValue( const persistedOpenAIBaseUrl = sanitizeProviderConfigValue(
persistedEnv.OPENAI_BASE_URL, persistedEnv.OPENAI_BASE_URL,
persistedEnv, persistedEnv,
) )
const shellOpenAIModel = sanitizeProviderConfigValue( const shellOpenAIModel = normalizeProfileModel(
processEnv.OPENAI_MODEL, sanitizeProviderConfigValue(
processEnv as SecretValueSource, processEnv.OPENAI_MODEL,
processEnv as SecretValueSource,
),
) )
const shellOpenAIBaseUrl = sanitizeProviderConfigValue( const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
processEnv.OPENAI_BASE_URL, processEnv.OPENAI_BASE_URL,
processEnv as SecretValueSource, processEnv as SecretValueSource,
) )
const persistedGeminiModel = sanitizeProviderConfigValue( const persistedGeminiModel = normalizeProfileModel(
persistedEnv.GEMINI_MODEL, sanitizeProviderConfigValue(
persistedEnv, persistedEnv.GEMINI_MODEL,
persistedEnv,
),
) )
const persistedGeminiBaseUrl = sanitizeProviderConfigValue( const persistedGeminiBaseUrl = sanitizeProviderConfigValue(
persistedEnv.GEMINI_BASE_URL, persistedEnv.GEMINI_BASE_URL,
persistedEnv, persistedEnv,
) )
const shellGeminiModel = sanitizeProviderConfigValue( const shellGeminiModel = normalizeProfileModel(
processEnv.GEMINI_MODEL, sanitizeProviderConfigValue(
processEnv as SecretValueSource, processEnv.GEMINI_MODEL,
processEnv as SecretValueSource,
),
) )
const shellGeminiBaseUrl = sanitizeProviderConfigValue( const shellGeminiBaseUrl = sanitizeProviderConfigValue(
processEnv.GEMINI_BASE_URL, processEnv.GEMINI_BASE_URL,
@@ -702,11 +743,15 @@ export async function buildLaunchEnv(options: {
delete env.CLAUDE_CODE_USE_VERTEX delete env.CLAUDE_CODE_USE_VERTEX
delete env.CLAUDE_CODE_USE_FOUNDRY delete env.CLAUDE_CODE_USE_FOUNDRY
const shellMistralModel = sanitizeProviderConfigValue( const shellMistralModel = normalizeProfileModel(
processEnv.MISTRAL_MODEL, sanitizeProviderConfigValue(
processEnv.MISTRAL_MODEL,
),
) )
const persistedMistralModel = sanitizeProviderConfigValue( const persistedMistralModel = normalizeProfileModel(
persistedEnv.MISTRAL_MODEL, sanitizeProviderConfigValue(
persistedEnv.MISTRAL_MODEL,
),
) )
const shellMistralBaseUrl = sanitizeProviderConfigValue( const shellMistralBaseUrl = sanitizeProviderConfigValue(
processEnv.MISTRAL_BASE_URL, processEnv.MISTRAL_BASE_URL,

View File

@@ -221,6 +221,23 @@ describe('applyProviderProfileToProcessEnv', () => {
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1') expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
}) })
test('openai profile with semicolon-separated multi-model string sets only first model in OPENAI_MODEL', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(
buildProfile({
provider: 'openai',
baseUrl: 'https://api.openai.com/v1',
model: 'glm-4.7; glm-4.7-flash; glm-4.7-plus',
}),
)
expect(process.env.OPENAI_MODEL).toBe('glm-4.7')
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
})
test('anthropic profile with multi-model string sets only first model in ANTHROPIC_MODEL', async () => { test('anthropic profile with multi-model string sets only first model in ANTHROPIC_MODEL', async () => {
const { applyProviderProfileToProcessEnv } = const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules() await importFreshProviderProfileModules()
@@ -236,6 +253,34 @@ describe('applyProviderProfileToProcessEnv', () => {
expect(process.env.ANTHROPIC_MODEL).toBe('claude-sonnet-4-6') expect(process.env.ANTHROPIC_MODEL).toBe('claude-sonnet-4-6')
expect(process.env.ANTHROPIC_BASE_URL).toBe('https://api.anthropic.com') expect(process.env.ANTHROPIC_BASE_URL).toBe('https://api.anthropic.com')
}) })
test('gemini profile with semicolon-separated multi-model string sets only first model in GEMINI_MODEL', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(
buildGeminiProfile({
model: 'gemini-3-flash-preview; gemini-3-pro-preview',
}),
)
expect(process.env.GEMINI_MODEL).toBe('gemini-3-flash-preview')
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBe('1')
})
test('mistral profile with semicolon-separated multi-model string sets only first model in MISTRAL_MODEL', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(
buildMistralProfile({
model: 'devstral-latest; mistral-medium-latest',
}),
)
expect(process.env.MISTRAL_MODEL).toBe('devstral-latest')
expect(process.env.CLAUDE_CODE_USE_MISTRAL).toBe('1')
})
}) })
describe('applyActiveProviderProfileFromConfig', () => { describe('applyActiveProviderProfileFromConfig', () => {
@@ -837,6 +882,24 @@ describe('getProfileModelOptions', () => {
]) ])
}) })
test('generates options for semicolon-separated multi-model profile', async () => {
const { getProfileModelOptions } =
await importFreshProviderProfileModules()
const options = getProfileModelOptions(
buildProfile({
name: 'Test Provider',
model: 'glm-4.7; glm-4.7-flash; glm-4.7-plus',
}),
)
expect(options).toEqual([
{ value: 'glm-4.7', label: 'glm-4.7', description: 'Provider: Test Provider' },
{ value: 'glm-4.7-flash', label: 'glm-4.7-flash', description: 'Provider: Test Provider' },
{ value: 'glm-4.7-plus', label: 'glm-4.7-plus', description: 'Provider: Test Provider' },
])
})
test('returns single option for single-model profile', async () => { test('returns single option for single-model profile', async () => {
const { getProfileModelOptions } = const { getProfileModelOptions } =
await importFreshProviderProfileModules() await importFreshProviderProfileModules()

View File

@@ -458,7 +458,7 @@ function isProcessEnvAlignedWithProfile(
processEnv.CLAUDE_CODE_USE_VERTEX === undefined && processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined && processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
sameOptionalEnvValue(processEnv.MISTRAL_BASE_URL, profile.baseUrl) && sameOptionalEnvValue(processEnv.MISTRAL_BASE_URL, profile.baseUrl) &&
sameOptionalEnvValue(processEnv.MISTRAL_MODEL, profile.model) && sameOptionalEnvValue(processEnv.MISTRAL_MODEL, getPrimaryModel(profile.model)) &&
(!includeApiKey || (!includeApiKey ||
sameOptionalEnvValue(processEnv.MISTRAL_API_KEY, profile.apiKey)) sameOptionalEnvValue(processEnv.MISTRAL_API_KEY, profile.apiKey))
) )
@@ -474,7 +474,7 @@ function isProcessEnvAlignedWithProfile(
processEnv.CLAUDE_CODE_USE_VERTEX === undefined && processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined && processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
sameOptionalEnvValue(processEnv.GEMINI_BASE_URL, profile.baseUrl) && sameOptionalEnvValue(processEnv.GEMINI_BASE_URL, profile.baseUrl) &&
sameOptionalEnvValue(processEnv.GEMINI_MODEL, profile.model) && sameOptionalEnvValue(processEnv.GEMINI_MODEL, getPrimaryModel(profile.model)) &&
(!includeApiKey || (!includeApiKey ||
sameOptionalEnvValue(processEnv.GEMINI_API_KEY, profile.apiKey)) sameOptionalEnvValue(processEnv.GEMINI_API_KEY, profile.apiKey))
) )
@@ -578,7 +578,7 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
if (profile.provider === 'mistral') { if (profile.provider === 'mistral') {
process.env.CLAUDE_CODE_USE_MISTRAL = '1' process.env.CLAUDE_CODE_USE_MISTRAL = '1'
process.env.MISTRAL_BASE_URL = profile.baseUrl process.env.MISTRAL_BASE_URL = profile.baseUrl
process.env.MISTRAL_MODEL = profile.model process.env.MISTRAL_MODEL = getPrimaryModel(profile.model)
if (profile.apiKey) { if (profile.apiKey) {
process.env.MISTRAL_API_KEY = profile.apiKey process.env.MISTRAL_API_KEY = profile.apiKey
@@ -595,7 +595,7 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
if (profile.provider === 'gemini') { if (profile.provider === 'gemini') {
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.GEMINI_BASE_URL = profile.baseUrl process.env.GEMINI_BASE_URL = profile.baseUrl
process.env.GEMINI_MODEL = profile.model process.env.GEMINI_MODEL = getPrimaryModel(profile.model)
if (profile.apiKey) { if (profile.apiKey) {
process.env.GEMINI_API_KEY = profile.apiKey process.env.GEMINI_API_KEY = profile.apiKey
@@ -930,7 +930,7 @@ export function setActiveProviderProfile(
case 'gemini': case 'gemini':
return ( return (
buildGeminiProfileEnv({ buildGeminiProfileEnv({
model: activeProfile.model, model: getPrimaryModel(activeProfile.model),
baseUrl: activeProfile.baseUrl, baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey, apiKey: activeProfile.apiKey,
authMode: 'api-key', authMode: 'api-key',
@@ -940,7 +940,7 @@ export function setActiveProviderProfile(
case 'mistral': case 'mistral':
return ( return (
buildMistralProfileEnv({ buildMistralProfileEnv({
model: activeProfile.model, model: getPrimaryModel(activeProfile.model),
baseUrl: activeProfile.baseUrl, baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey, apiKey: activeProfile.apiKey,
processEnv: process.env, processEnv: process.env,
@@ -951,7 +951,7 @@ export function setActiveProviderProfile(
? ( ? (
buildOpenAIProfileEnv({ buildOpenAIProfileEnv({
goal: 'balanced', goal: 'balanced',
model: activeProfile.model, model: getPrimaryModel(activeProfile.model),
baseUrl: activeProfile.baseUrl, baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey, apiKey: activeProfile.apiKey,
processEnv: process.env, processEnv: process.env,
@@ -968,7 +968,7 @@ export function setActiveProviderProfile(
profile: 'openai' as ProviderProfileStartup, profile: 'openai' as ProviderProfileStartup,
env: { env: {
OPENAI_BASE_URL: activeProfile.baseUrl, OPENAI_BASE_URL: activeProfile.baseUrl,
OPENAI_MODEL: activeProfile.model, OPENAI_MODEL: getPrimaryModel(activeProfile.model),
OPENAI_API_KEY: activeProfile.apiKey, OPENAI_API_KEY: activeProfile.apiKey,
}, },
} as const) } as const)