fix: normalize /provider multi-model selection and semicolon parsing (#841)
* fix provider multi-model selection * fix provider manager multi-model save path
This commit is contained in:
@@ -152,6 +152,7 @@ function createDeferred<T>(): {
|
||||
|
||||
function mockProviderProfilesModule(options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
getActiveProviderProfile?: () => unknown
|
||||
getProviderProfiles?: () => unknown[]
|
||||
updateProviderProfile?: (...args: unknown[]) => unknown
|
||||
setActiveProviderProfile?: (...args: unknown[]) => unknown
|
||||
@@ -160,7 +161,7 @@ function mockProviderProfilesModule(options?: {
|
||||
addProviderProfile: options?.addProviderProfile ?? (() => null),
|
||||
applyActiveProviderProfileFromConfig: () => {},
|
||||
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
|
||||
getActiveProviderProfile: () => null,
|
||||
getActiveProviderProfile: options?.getActiveProviderProfile ?? (() => null),
|
||||
getProviderPresetDefaults: (preset: string) =>
|
||||
preset === 'ollama'
|
||||
? {
|
||||
@@ -190,6 +191,7 @@ function mockProviderManagerDependencies(
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
applySavedProfileToCurrentSession?: (...args: unknown[]) => Promise<string | null>
|
||||
clearCodexCredentials?: () => { success: boolean; warning?: string }
|
||||
getActiveProviderProfile?: () => unknown
|
||||
getProviderProfiles?: () => unknown[]
|
||||
probeOllamaGenerationReadiness?: () => Promise<{
|
||||
state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed'
|
||||
@@ -229,6 +231,7 @@ function mockProviderManagerDependencies(
|
||||
): void {
|
||||
mockProviderProfilesModule({
|
||||
addProviderProfile: options?.addProviderProfile,
|
||||
getActiveProviderProfile: options?.getActiveProviderProfile,
|
||||
getProviderProfiles: options?.getProviderProfiles,
|
||||
updateProviderProfile: options?.updateProviderProfile,
|
||||
setActiveProviderProfile: options?.setActiveProviderProfile,
|
||||
@@ -331,6 +334,10 @@ async function mountProviderManager(
|
||||
options?: {
|
||||
mode?: 'first-run' | 'manage'
|
||||
onDone?: (result?: unknown) => void
|
||||
onChangeAppState?: (args: {
|
||||
newState: unknown
|
||||
oldState: unknown
|
||||
}) => void
|
||||
},
|
||||
): Promise<{
|
||||
stdin: PassThrough
|
||||
@@ -345,7 +352,7 @@ async function mountProviderManager(
|
||||
})
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<AppStateProvider onChangeAppState={options?.onChangeAppState}>
|
||||
<KeybindingSetup>
|
||||
<ProviderManager
|
||||
mode={options?.mode ?? 'manage'}
|
||||
@@ -907,6 +914,205 @@ test('ProviderManager keeps Codex OAuth as next-startup only when activating the
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager activating a multi-model provider sets the session model to the primary model', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const multiModelProfile = {
|
||||
id: 'provider_multi_model',
|
||||
provider: 'openai',
|
||||
name: 'Multi Model Provider',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
model: 'gpt-5.4; gpt-5.4-mini',
|
||||
apiKey: 'sk-test',
|
||||
}
|
||||
|
||||
const setActiveProviderProfile = mock(() => multiModelProfile)
|
||||
const appStateChanges: Array<{ newState: any; oldState: any }> = []
|
||||
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
getProviderProfiles: () => [multiModelProfile],
|
||||
setActiveProviderProfile,
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
onChangeAppState: args => {
|
||||
appStateChanges.push(args as { newState: any; oldState: any })
|
||||
},
|
||||
})
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame =>
|
||||
frame.includes('Provider manager') &&
|
||||
frame.includes('Set active provider'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame =>
|
||||
frame.includes('Set active provider') &&
|
||||
frame.includes('Multi Model Provider'),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => setActiveProviderProfile.mock.calls.length > 0)
|
||||
await waitForCondition(() =>
|
||||
appStateChanges.some(
|
||||
({ newState, oldState }) =>
|
||||
newState.mainLoopModel === 'gpt-5.4' &&
|
||||
oldState.mainLoopModel !== newState.mainLoopModel,
|
||||
),
|
||||
)
|
||||
|
||||
expect(setActiveProviderProfile).toHaveBeenCalledWith('provider_multi_model')
|
||||
expect(
|
||||
appStateChanges.some(
|
||||
({ newState }) =>
|
||||
newState.mainLoopModel === 'gpt-5.4' &&
|
||||
newState.mainLoopModelForSession === null,
|
||||
),
|
||||
).toBe(true)
|
||||
expect(
|
||||
appStateChanges.some(
|
||||
({ newState }) => newState.mainLoopModel === 'gpt-5.4; gpt-5.4-mini',
|
||||
),
|
||||
).toBe(false)
|
||||
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager editing an active multi-model provider keeps app state on the primary model', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const multiModelProfile = {
|
||||
id: 'provider_multi_model',
|
||||
provider: 'openai',
|
||||
name: 'Multi Model Provider',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
model: 'gpt-5.4; gpt-5.4-mini',
|
||||
apiKey: 'sk-test',
|
||||
}
|
||||
|
||||
const updateProviderProfile = mock(() => multiModelProfile)
|
||||
const appStateChanges: Array<{ newState: any; oldState: any }> = []
|
||||
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
getActiveProviderProfile: () => multiModelProfile,
|
||||
getProviderProfiles: () => [multiModelProfile],
|
||||
updateProviderProfile,
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
onChangeAppState: args => {
|
||||
appStateChanges.push(args as { newState: any; oldState: any })
|
||||
},
|
||||
})
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame =>
|
||||
frame.includes('Provider manager') &&
|
||||
frame.includes('Edit provider'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame =>
|
||||
frame.includes('Edit provider') &&
|
||||
frame.includes('Multi Model Provider'),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame =>
|
||||
frame.includes('Edit provider profile') &&
|
||||
frame.includes('Step 1 of 4'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('\r')
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Step 2 of 4'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('\r')
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Step 3 of 4'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('\r')
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Step 4 of 4'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => updateProviderProfile.mock.calls.length > 0)
|
||||
await waitForCondition(() =>
|
||||
appStateChanges.some(
|
||||
({ newState, oldState }) =>
|
||||
newState.mainLoopModel === 'gpt-5.4' &&
|
||||
oldState.mainLoopModel !== newState.mainLoopModel,
|
||||
),
|
||||
)
|
||||
|
||||
expect(updateProviderProfile).toHaveBeenCalledWith(
|
||||
'provider_multi_model',
|
||||
expect.objectContaining({
|
||||
model: 'gpt-5.4; gpt-5.4-mini',
|
||||
}),
|
||||
)
|
||||
expect(
|
||||
appStateChanges.some(
|
||||
({ newState }) =>
|
||||
newState.mainLoopModel === 'gpt-5.4' &&
|
||||
newState.mainLoopModelForSession === null,
|
||||
),
|
||||
).toBe(true)
|
||||
expect(
|
||||
appStateChanges.some(
|
||||
({ newState }) => newState.mainLoopModel === 'gpt-5.4; gpt-5.4-mini',
|
||||
),
|
||||
).toBe(false)
|
||||
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager resolves Codex OAuth state from async storage without sync reads in render flow', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
@@ -125,8 +125,8 @@ const FORM_STEPS: Array<{
|
||||
{
|
||||
key: 'model',
|
||||
label: 'Default model',
|
||||
placeholder: 'e.g. llama3.1:8b or glm-4.7, glm-4.7-flash',
|
||||
helpText: 'Model name(s) to use. Separate multiple with commas; first is default.',
|
||||
placeholder: 'e.g. llama3.1:8b or glm-4.7; glm-4.7-flash',
|
||||
helpText: 'Model name(s) to use. Separate multiple with ";" or ","; first is default.',
|
||||
},
|
||||
{
|
||||
key: 'apiKey',
|
||||
@@ -780,19 +780,14 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
// Update the session model to the new provider's first model.
|
||||
// persistActiveProviderProfileModel (called by onChangeAppState) will
|
||||
// not overwrite the multi-model list because it checks if the model
|
||||
// is already in the profile's comma-separated model list.
|
||||
// is already in the provider's configured model list.
|
||||
const newModel = getPrimaryModel(active.model)
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
mainLoopModel: newModel,
|
||||
}))
|
||||
|
||||
providerLabel = active.name
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
mainLoopModel: active.model,
|
||||
mainLoopModelForSession: null,
|
||||
}))
|
||||
providerLabel = active.name
|
||||
const settingsOverrideError =
|
||||
clearStartupProviderOverrideFromUserSettings()
|
||||
const isActiveCodexOAuth = isCodexOAuthProfile(
|
||||
@@ -996,7 +991,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
if (isActiveSavedProfile) {
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
mainLoopModel: saved.model,
|
||||
mainLoopModel: getPrimaryModel(saved.model),
|
||||
mainLoopModelForSession: null,
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -35,6 +35,21 @@ describe('parseModelList', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('splits semicolon-separated models', () => {
|
||||
expect(parseModelList('glm-4.7; glm-4.7-flash')).toEqual([
|
||||
'glm-4.7',
|
||||
'glm-4.7-flash',
|
||||
])
|
||||
})
|
||||
|
||||
test('splits mixed comma- and semicolon-separated models', () => {
|
||||
expect(parseModelList('gpt-5.4; gpt-5.4-mini, o3')).toEqual([
|
||||
'gpt-5.4',
|
||||
'gpt-5.4-mini',
|
||||
'o3',
|
||||
])
|
||||
})
|
||||
|
||||
test('returns empty array for empty string', () => {
|
||||
expect(parseModelList('')).toEqual([])
|
||||
})
|
||||
@@ -62,6 +77,10 @@ describe('getPrimaryModel', () => {
|
||||
expect(getPrimaryModel('glm-4.7, glm-4.7-flash')).toBe('glm-4.7')
|
||||
})
|
||||
|
||||
test('returns first model from semicolon-separated list', () => {
|
||||
expect(getPrimaryModel('glm-4.7; glm-4.7-flash')).toBe('glm-4.7')
|
||||
})
|
||||
|
||||
test('returns the only model when single model is provided', () => {
|
||||
expect(getPrimaryModel('llama3.1:8b')).toBe('llama3.1:8b')
|
||||
})
|
||||
@@ -86,6 +105,10 @@ describe('hasMultipleModels', () => {
|
||||
expect(hasMultipleModels('glm-4.7, glm-4.7-flash')).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true for semicolon-separated models', () => {
|
||||
expect(hasMultipleModels('glm-4.7; glm-4.7-flash')).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false for a single model', () => {
|
||||
expect(hasMultipleModels('llama3.1:8b')).toBe(false)
|
||||
})
|
||||
|
||||
@@ -1,28 +1,30 @@
|
||||
/**
|
||||
* Utility functions for parsing comma-separated model names in provider profiles.
|
||||
* Utility functions for parsing provider-profile model lists.
|
||||
*
|
||||
* Example: "glm-4.7, glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
|
||||
* Single model: "llama3.1:8b" -> ["llama3.1:8b"]
|
||||
* Examples:
|
||||
* - "glm-4.7, glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
|
||||
* - "glm-4.7; glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
|
||||
* - "llama3.1:8b" -> ["llama3.1:8b"]
|
||||
*/
|
||||
|
||||
/**
|
||||
* Splits a comma-separated model field into an array of trimmed model names,
|
||||
* filtering out any empty entries.
|
||||
* Splits a comma- or semicolon-separated model field into an array of trimmed
|
||||
* model names, filtering out any empty entries.
|
||||
*/
|
||||
export function parseModelList(modelField: string): string[] {
|
||||
return modelField
|
||||
.split(',')
|
||||
.split(/[;,]/)
|
||||
.map((part) => part.trim())
|
||||
.filter((part) => part.length > 0)
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the first (primary) model from a comma-separated model field.
|
||||
* Falls back to the original string if parsing yields no results.
|
||||
* Returns the first (primary) model from a model-list field.
|
||||
* Falls back to the trimmed original string if parsing yields no results.
|
||||
*/
|
||||
export function getPrimaryModel(modelField: string): string {
|
||||
const models = parseModelList(modelField)
|
||||
return models.length > 0 ? models[0] : modelField
|
||||
return models.length > 0 ? models[0] : modelField.trim()
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -391,6 +391,21 @@ test('gemini profiles accept google api key fallback', () => {
|
||||
})
|
||||
})
|
||||
|
||||
test('gemini profiles use the first model from a semicolon-separated list', () => {
|
||||
const env = buildGeminiProfileEnv({
|
||||
authMode: 'api-key',
|
||||
apiKey: 'gem-live',
|
||||
model: 'gemini-2.5-pro; gemini-2.5-flash',
|
||||
processEnv: {},
|
||||
})
|
||||
|
||||
assert.deepEqual(env, {
|
||||
GEMINI_AUTH_MODE: 'api-key',
|
||||
GEMINI_MODEL: 'gemini-2.5-pro',
|
||||
GEMINI_API_KEY: 'gem-live',
|
||||
})
|
||||
})
|
||||
|
||||
test('gemini profiles support access-token auth mode without persisting a key', () => {
|
||||
const env = buildGeminiProfileEnv({
|
||||
authMode: 'access-token',
|
||||
@@ -766,6 +781,21 @@ test('openai profiles ignore codex shell transport hints', () => {
|
||||
})
|
||||
})
|
||||
|
||||
test('openai profiles use the first model from a semicolon-separated list', () => {
|
||||
const env = buildOpenAIProfileEnv({
|
||||
goal: 'balanced',
|
||||
apiKey: 'sk-live',
|
||||
model: 'gpt-5.4; gpt-5.4-mini',
|
||||
processEnv: {},
|
||||
})
|
||||
|
||||
assert.deepEqual(env, {
|
||||
OPENAI_BASE_URL: 'https://api.openai.com/v1',
|
||||
OPENAI_MODEL: 'gpt-5.4',
|
||||
OPENAI_API_KEY: 'sk-live',
|
||||
})
|
||||
})
|
||||
|
||||
test('openai profiles ignore poisoned shell model and base url values', () => {
|
||||
const env = buildOpenAIProfileEnv({
|
||||
goal: 'balanced',
|
||||
@@ -800,6 +830,22 @@ test('startup env ignores poisoned persisted openai model and base url', async (
|
||||
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
|
||||
})
|
||||
|
||||
test('startup env normalizes a semicolon-separated persisted openai model list', async () => {
|
||||
const env = await buildStartupEnvFromProfile({
|
||||
persisted: profile('openai', {
|
||||
OPENAI_API_KEY: 'sk-live',
|
||||
OPENAI_MODEL: 'gpt-5.4; gpt-5.4-mini',
|
||||
OPENAI_BASE_URL: 'https://api.openai.com/v1',
|
||||
}),
|
||||
processEnv: {},
|
||||
})
|
||||
|
||||
assert.equal(env.CLAUDE_CODE_USE_OPENAI, '1')
|
||||
assert.equal(env.OPENAI_API_KEY, 'sk-live')
|
||||
assert.equal(env.OPENAI_MODEL, 'gpt-5.4')
|
||||
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
|
||||
})
|
||||
|
||||
test('auto profile falls back to openai when no viable ollama model exists', () => {
|
||||
assert.equal(selectAutoProfile(null), 'openai')
|
||||
assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama')
|
||||
|
||||
@@ -22,6 +22,7 @@ import {
|
||||
sanitizeApiKey,
|
||||
sanitizeProviderConfigValue,
|
||||
} from './providerSecrets.js'
|
||||
import { getPrimaryModel } from './providerModels.js'
|
||||
|
||||
export {
|
||||
maskSecretForDisplay,
|
||||
@@ -147,6 +148,18 @@ function resolveProfileFilePath(options?: ProfileFileLocation): string {
|
||||
return resolve(options?.cwd ?? process.cwd(), PROFILE_FILE_NAME)
|
||||
}
|
||||
|
||||
function normalizeProfileModel(
|
||||
value: string | undefined | null,
|
||||
): string | undefined {
|
||||
const trimmed = value?.trim()
|
||||
if (!trimmed) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const primary = getPrimaryModel(trimmed)
|
||||
return primary.length > 0 ? primary : undefined
|
||||
}
|
||||
|
||||
export function isProviderProfile(value: unknown): value is ProviderProfile {
|
||||
return (
|
||||
value === 'openai' ||
|
||||
@@ -207,8 +220,12 @@ export function buildNvidiaNimProfileEnv(options: {
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
|
||||
defaultBaseUrl,
|
||||
OPENAI_MODEL:
|
||||
sanitizeProviderConfigValue(options.model, secretSource) ||
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(options.model, secretSource),
|
||||
) ||
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource),
|
||||
) ||
|
||||
'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
OPENAI_API_KEY: key,
|
||||
NVIDIA_NIM: '1',
|
||||
@@ -237,8 +254,12 @@ export function buildMiniMaxProfileEnv(options: {
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
|
||||
defaultBaseUrl,
|
||||
OPENAI_MODEL:
|
||||
sanitizeProviderConfigValue(options.model, secretSource) ||
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(options.model, secretSource),
|
||||
) ||
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource),
|
||||
) ||
|
||||
defaultModel,
|
||||
OPENAI_API_KEY: key,
|
||||
MINIMAX_API_KEY: key,
|
||||
@@ -270,8 +291,12 @@ export function buildGeminiProfileEnv(options: {
|
||||
const env: ProfileEnv = {
|
||||
GEMINI_AUTH_MODE: authMode,
|
||||
GEMINI_MODEL:
|
||||
sanitizeProviderConfigValue(options.model, secretSource) ||
|
||||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) ||
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(options.model, secretSource),
|
||||
) ||
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource),
|
||||
) ||
|
||||
DEFAULT_GEMINI_MODEL,
|
||||
}
|
||||
|
||||
@@ -304,9 +329,11 @@ export function buildOpenAIProfileEnv(options: {
|
||||
|
||||
const defaultModel = getGoalDefaultOpenAIModel(options.goal)
|
||||
const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
|
||||
const shellOpenAIModel = sanitizeProviderConfigValue(
|
||||
processEnv.OPENAI_MODEL,
|
||||
secretSource,
|
||||
const shellOpenAIModel = normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(
|
||||
processEnv.OPENAI_MODEL,
|
||||
secretSource,
|
||||
),
|
||||
)
|
||||
const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
|
||||
processEnv.OPENAI_BASE_URL,
|
||||
@@ -325,7 +352,9 @@ export function buildOpenAIProfileEnv(options: {
|
||||
(useShellOpenAIConfig ? shellOpenAIBaseUrl : undefined) ||
|
||||
DEFAULT_OPENAI_BASE_URL,
|
||||
OPENAI_MODEL:
|
||||
sanitizeProviderConfigValue(options.model, secretSource) ||
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(options.model, secretSource),
|
||||
) ||
|
||||
(useShellOpenAIConfig ? shellOpenAIModel : undefined) ||
|
||||
defaultModel,
|
||||
OPENAI_API_KEY: key,
|
||||
@@ -382,10 +411,14 @@ export function buildMistralProfileEnv(options: {
|
||||
const env: ProfileEnv = {
|
||||
MISTRAL_API_KEY: key,
|
||||
MISTRAL_MODEL:
|
||||
sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }) ||
|
||||
sanitizeProviderConfigValue(
|
||||
processEnv.MISTRAL_MODEL,
|
||||
{ MISTRAL_API_KEY: key },
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }),
|
||||
) ||
|
||||
normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(
|
||||
processEnv.MISTRAL_MODEL,
|
||||
{ MISTRAL_API_KEY: key },
|
||||
),
|
||||
) ||
|
||||
DEFAULT_MISTRAL_MODEL,
|
||||
}
|
||||
@@ -578,33 +611,41 @@ export async function buildLaunchEnv(options: {
|
||||
options.persisted?.profile === options.profile
|
||||
? options.persisted.env ?? {}
|
||||
: {}
|
||||
const persistedOpenAIModel = sanitizeProviderConfigValue(
|
||||
persistedEnv.OPENAI_MODEL,
|
||||
persistedEnv,
|
||||
const persistedOpenAIModel = normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(
|
||||
persistedEnv.OPENAI_MODEL,
|
||||
persistedEnv,
|
||||
),
|
||||
)
|
||||
const persistedOpenAIBaseUrl = sanitizeProviderConfigValue(
|
||||
persistedEnv.OPENAI_BASE_URL,
|
||||
persistedEnv,
|
||||
)
|
||||
const shellOpenAIModel = sanitizeProviderConfigValue(
|
||||
processEnv.OPENAI_MODEL,
|
||||
processEnv as SecretValueSource,
|
||||
const shellOpenAIModel = normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(
|
||||
processEnv.OPENAI_MODEL,
|
||||
processEnv as SecretValueSource,
|
||||
),
|
||||
)
|
||||
const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
|
||||
processEnv.OPENAI_BASE_URL,
|
||||
processEnv as SecretValueSource,
|
||||
)
|
||||
const persistedGeminiModel = sanitizeProviderConfigValue(
|
||||
persistedEnv.GEMINI_MODEL,
|
||||
persistedEnv,
|
||||
const persistedGeminiModel = normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(
|
||||
persistedEnv.GEMINI_MODEL,
|
||||
persistedEnv,
|
||||
),
|
||||
)
|
||||
const persistedGeminiBaseUrl = sanitizeProviderConfigValue(
|
||||
persistedEnv.GEMINI_BASE_URL,
|
||||
persistedEnv,
|
||||
)
|
||||
const shellGeminiModel = sanitizeProviderConfigValue(
|
||||
processEnv.GEMINI_MODEL,
|
||||
processEnv as SecretValueSource,
|
||||
const shellGeminiModel = normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(
|
||||
processEnv.GEMINI_MODEL,
|
||||
processEnv as SecretValueSource,
|
||||
),
|
||||
)
|
||||
const shellGeminiBaseUrl = sanitizeProviderConfigValue(
|
||||
processEnv.GEMINI_BASE_URL,
|
||||
@@ -702,11 +743,15 @@ export async function buildLaunchEnv(options: {
|
||||
delete env.CLAUDE_CODE_USE_VERTEX
|
||||
delete env.CLAUDE_CODE_USE_FOUNDRY
|
||||
|
||||
const shellMistralModel = sanitizeProviderConfigValue(
|
||||
processEnv.MISTRAL_MODEL,
|
||||
const shellMistralModel = normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(
|
||||
processEnv.MISTRAL_MODEL,
|
||||
),
|
||||
)
|
||||
const persistedMistralModel = sanitizeProviderConfigValue(
|
||||
persistedEnv.MISTRAL_MODEL,
|
||||
const persistedMistralModel = normalizeProfileModel(
|
||||
sanitizeProviderConfigValue(
|
||||
persistedEnv.MISTRAL_MODEL,
|
||||
),
|
||||
)
|
||||
const shellMistralBaseUrl = sanitizeProviderConfigValue(
|
||||
processEnv.MISTRAL_BASE_URL,
|
||||
|
||||
@@ -221,6 +221,23 @@ describe('applyProviderProfileToProcessEnv', () => {
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
|
||||
})
|
||||
|
||||
test('openai profile with semicolon-separated multi-model string sets only first model in OPENAI_MODEL', async () => {
|
||||
const { applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
|
||||
applyProviderProfileToProcessEnv(
|
||||
buildProfile({
|
||||
provider: 'openai',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
model: 'glm-4.7; glm-4.7-flash; glm-4.7-plus',
|
||||
}),
|
||||
)
|
||||
|
||||
expect(process.env.OPENAI_MODEL).toBe('glm-4.7')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
|
||||
})
|
||||
|
||||
test('anthropic profile with multi-model string sets only first model in ANTHROPIC_MODEL', async () => {
|
||||
const { applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
@@ -236,6 +253,34 @@ describe('applyProviderProfileToProcessEnv', () => {
|
||||
expect(process.env.ANTHROPIC_MODEL).toBe('claude-sonnet-4-6')
|
||||
expect(process.env.ANTHROPIC_BASE_URL).toBe('https://api.anthropic.com')
|
||||
})
|
||||
|
||||
test('gemini profile with semicolon-separated multi-model string sets only first model in GEMINI_MODEL', async () => {
|
||||
const { applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
|
||||
applyProviderProfileToProcessEnv(
|
||||
buildGeminiProfile({
|
||||
model: 'gemini-3-flash-preview; gemini-3-pro-preview',
|
||||
}),
|
||||
)
|
||||
|
||||
expect(process.env.GEMINI_MODEL).toBe('gemini-3-flash-preview')
|
||||
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBe('1')
|
||||
})
|
||||
|
||||
test('mistral profile with semicolon-separated multi-model string sets only first model in MISTRAL_MODEL', async () => {
|
||||
const { applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
|
||||
applyProviderProfileToProcessEnv(
|
||||
buildMistralProfile({
|
||||
model: 'devstral-latest; mistral-medium-latest',
|
||||
}),
|
||||
)
|
||||
|
||||
expect(process.env.MISTRAL_MODEL).toBe('devstral-latest')
|
||||
expect(process.env.CLAUDE_CODE_USE_MISTRAL).toBe('1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyActiveProviderProfileFromConfig', () => {
|
||||
@@ -837,6 +882,24 @@ describe('getProfileModelOptions', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('generates options for semicolon-separated multi-model profile', async () => {
|
||||
const { getProfileModelOptions } =
|
||||
await importFreshProviderProfileModules()
|
||||
|
||||
const options = getProfileModelOptions(
|
||||
buildProfile({
|
||||
name: 'Test Provider',
|
||||
model: 'glm-4.7; glm-4.7-flash; glm-4.7-plus',
|
||||
}),
|
||||
)
|
||||
|
||||
expect(options).toEqual([
|
||||
{ value: 'glm-4.7', label: 'glm-4.7', description: 'Provider: Test Provider' },
|
||||
{ value: 'glm-4.7-flash', label: 'glm-4.7-flash', description: 'Provider: Test Provider' },
|
||||
{ value: 'glm-4.7-plus', label: 'glm-4.7-plus', description: 'Provider: Test Provider' },
|
||||
])
|
||||
})
|
||||
|
||||
test('returns single option for single-model profile', async () => {
|
||||
const { getProfileModelOptions } =
|
||||
await importFreshProviderProfileModules()
|
||||
|
||||
@@ -458,7 +458,7 @@ function isProcessEnvAlignedWithProfile(
|
||||
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
|
||||
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
|
||||
sameOptionalEnvValue(processEnv.MISTRAL_BASE_URL, profile.baseUrl) &&
|
||||
sameOptionalEnvValue(processEnv.MISTRAL_MODEL, profile.model) &&
|
||||
sameOptionalEnvValue(processEnv.MISTRAL_MODEL, getPrimaryModel(profile.model)) &&
|
||||
(!includeApiKey ||
|
||||
sameOptionalEnvValue(processEnv.MISTRAL_API_KEY, profile.apiKey))
|
||||
)
|
||||
@@ -474,7 +474,7 @@ function isProcessEnvAlignedWithProfile(
|
||||
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
|
||||
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
|
||||
sameOptionalEnvValue(processEnv.GEMINI_BASE_URL, profile.baseUrl) &&
|
||||
sameOptionalEnvValue(processEnv.GEMINI_MODEL, profile.model) &&
|
||||
sameOptionalEnvValue(processEnv.GEMINI_MODEL, getPrimaryModel(profile.model)) &&
|
||||
(!includeApiKey ||
|
||||
sameOptionalEnvValue(processEnv.GEMINI_API_KEY, profile.apiKey))
|
||||
)
|
||||
@@ -578,7 +578,7 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
|
||||
if (profile.provider === 'mistral') {
|
||||
process.env.CLAUDE_CODE_USE_MISTRAL = '1'
|
||||
process.env.MISTRAL_BASE_URL = profile.baseUrl
|
||||
process.env.MISTRAL_MODEL = profile.model
|
||||
process.env.MISTRAL_MODEL = getPrimaryModel(profile.model)
|
||||
|
||||
if (profile.apiKey) {
|
||||
process.env.MISTRAL_API_KEY = profile.apiKey
|
||||
@@ -595,7 +595,7 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
|
||||
if (profile.provider === 'gemini') {
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
||||
process.env.GEMINI_BASE_URL = profile.baseUrl
|
||||
process.env.GEMINI_MODEL = profile.model
|
||||
process.env.GEMINI_MODEL = getPrimaryModel(profile.model)
|
||||
|
||||
if (profile.apiKey) {
|
||||
process.env.GEMINI_API_KEY = profile.apiKey
|
||||
@@ -930,7 +930,7 @@ export function setActiveProviderProfile(
|
||||
case 'gemini':
|
||||
return (
|
||||
buildGeminiProfileEnv({
|
||||
model: activeProfile.model,
|
||||
model: getPrimaryModel(activeProfile.model),
|
||||
baseUrl: activeProfile.baseUrl,
|
||||
apiKey: activeProfile.apiKey,
|
||||
authMode: 'api-key',
|
||||
@@ -940,7 +940,7 @@ export function setActiveProviderProfile(
|
||||
case 'mistral':
|
||||
return (
|
||||
buildMistralProfileEnv({
|
||||
model: activeProfile.model,
|
||||
model: getPrimaryModel(activeProfile.model),
|
||||
baseUrl: activeProfile.baseUrl,
|
||||
apiKey: activeProfile.apiKey,
|
||||
processEnv: process.env,
|
||||
@@ -951,7 +951,7 @@ export function setActiveProviderProfile(
|
||||
? (
|
||||
buildOpenAIProfileEnv({
|
||||
goal: 'balanced',
|
||||
model: activeProfile.model,
|
||||
model: getPrimaryModel(activeProfile.model),
|
||||
baseUrl: activeProfile.baseUrl,
|
||||
apiKey: activeProfile.apiKey,
|
||||
processEnv: process.env,
|
||||
@@ -968,7 +968,7 @@ export function setActiveProviderProfile(
|
||||
profile: 'openai' as ProviderProfileStartup,
|
||||
env: {
|
||||
OPENAI_BASE_URL: activeProfile.baseUrl,
|
||||
OPENAI_MODEL: activeProfile.model,
|
||||
OPENAI_MODEL: getPrimaryModel(activeProfile.model),
|
||||
OPENAI_API_KEY: activeProfile.apiKey,
|
||||
},
|
||||
} as const)
|
||||
|
||||
Reference in New Issue
Block a user