fix: normalize /provider multi-model selection and semicolon parsing (#841)

* fix provider multi-model selection

* fix provider manager multi-model save path
This commit is contained in:
JATMN
2026-04-24 11:28:14 -07:00
committed by GitHub
parent b5f7047358
commit c4cb98a4f0
8 changed files with 439 additions and 59 deletions

View File

@@ -35,6 +35,21 @@ describe('parseModelList', () => {
])
})
test('splits semicolon-separated models', () => {
expect(parseModelList('glm-4.7; glm-4.7-flash')).toEqual([
'glm-4.7',
'glm-4.7-flash',
])
})
test('splits mixed comma- and semicolon-separated models', () => {
expect(parseModelList('gpt-5.4; gpt-5.4-mini, o3')).toEqual([
'gpt-5.4',
'gpt-5.4-mini',
'o3',
])
})
test('returns empty array for empty string', () => {
expect(parseModelList('')).toEqual([])
})
@@ -62,6 +77,10 @@ describe('getPrimaryModel', () => {
expect(getPrimaryModel('glm-4.7, glm-4.7-flash')).toBe('glm-4.7')
})
test('returns first model from semicolon-separated list', () => {
expect(getPrimaryModel('glm-4.7; glm-4.7-flash')).toBe('glm-4.7')
})
test('returns the only model when single model is provided', () => {
expect(getPrimaryModel('llama3.1:8b')).toBe('llama3.1:8b')
})
@@ -86,6 +105,10 @@ describe('hasMultipleModels', () => {
expect(hasMultipleModels('glm-4.7, glm-4.7-flash')).toBe(true)
})
test('returns true for semicolon-separated models', () => {
expect(hasMultipleModels('glm-4.7; glm-4.7-flash')).toBe(true)
})
test('returns false for a single model', () => {
expect(hasMultipleModels('llama3.1:8b')).toBe(false)
})

View File

@@ -1,28 +1,30 @@
/**
* Utility functions for parsing comma-separated model names in provider profiles.
* Utility functions for parsing provider-profile model lists.
*
* Example: "glm-4.7, glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
* Single model: "llama3.1:8b" -> ["llama3.1:8b"]
* Examples:
* - "glm-4.7, glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
* - "glm-4.7; glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
* - "llama3.1:8b" -> ["llama3.1:8b"]
*/
/**
* Splits a comma-separated model field into an array of trimmed model names,
* filtering out any empty entries.
* Splits a comma- or semicolon-separated model field into an array of trimmed
* model names, filtering out any empty entries.
*/
export function parseModelList(modelField: string): string[] {
return modelField
.split(',')
.split(/[;,]/)
.map((part) => part.trim())
.filter((part) => part.length > 0)
}
/**
* Returns the first (primary) model from a comma-separated model field.
* Falls back to the original string if parsing yields no results.
* Returns the first (primary) model from a model-list field.
* Falls back to the trimmed original string if parsing yields no results.
*/
export function getPrimaryModel(modelField: string): string {
const models = parseModelList(modelField)
return models.length > 0 ? models[0] : modelField
return models.length > 0 ? models[0] : modelField.trim()
}
/**

View File

@@ -391,6 +391,21 @@ test('gemini profiles accept google api key fallback', () => {
})
})
test('gemini profiles use the first model from a semicolon-separated list', () => {
const env = buildGeminiProfileEnv({
authMode: 'api-key',
apiKey: 'gem-live',
model: 'gemini-2.5-pro; gemini-2.5-flash',
processEnv: {},
})
assert.deepEqual(env, {
GEMINI_AUTH_MODE: 'api-key',
GEMINI_MODEL: 'gemini-2.5-pro',
GEMINI_API_KEY: 'gem-live',
})
})
test('gemini profiles support access-token auth mode without persisting a key', () => {
const env = buildGeminiProfileEnv({
authMode: 'access-token',
@@ -766,6 +781,21 @@ test('openai profiles ignore codex shell transport hints', () => {
})
})
test('openai profiles use the first model from a semicolon-separated list', () => {
const env = buildOpenAIProfileEnv({
goal: 'balanced',
apiKey: 'sk-live',
model: 'gpt-5.4; gpt-5.4-mini',
processEnv: {},
})
assert.deepEqual(env, {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-5.4',
OPENAI_API_KEY: 'sk-live',
})
})
test('openai profiles ignore poisoned shell model and base url values', () => {
const env = buildOpenAIProfileEnv({
goal: 'balanced',
@@ -800,6 +830,22 @@ test('startup env ignores poisoned persisted openai model and base url', async (
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
})
test('startup env normalizes a semicolon-separated persisted openai model list', async () => {
const env = await buildStartupEnvFromProfile({
persisted: profile('openai', {
OPENAI_API_KEY: 'sk-live',
OPENAI_MODEL: 'gpt-5.4; gpt-5.4-mini',
OPENAI_BASE_URL: 'https://api.openai.com/v1',
}),
processEnv: {},
})
assert.equal(env.CLAUDE_CODE_USE_OPENAI, '1')
assert.equal(env.OPENAI_API_KEY, 'sk-live')
assert.equal(env.OPENAI_MODEL, 'gpt-5.4')
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
})
test('auto profile falls back to openai when no viable ollama model exists', () => {
assert.equal(selectAutoProfile(null), 'openai')
assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama')

View File

@@ -22,6 +22,7 @@ import {
sanitizeApiKey,
sanitizeProviderConfigValue,
} from './providerSecrets.js'
import { getPrimaryModel } from './providerModels.js'
export {
maskSecretForDisplay,
@@ -147,6 +148,18 @@ function resolveProfileFilePath(options?: ProfileFileLocation): string {
return resolve(options?.cwd ?? process.cwd(), PROFILE_FILE_NAME)
}
function normalizeProfileModel(
value: string | undefined | null,
): string | undefined {
const trimmed = value?.trim()
if (!trimmed) {
return undefined
}
const primary = getPrimaryModel(trimmed)
return primary.length > 0 ? primary : undefined
}
export function isProviderProfile(value: unknown): value is ProviderProfile {
return (
value === 'openai' ||
@@ -207,8 +220,12 @@ export function buildNvidiaNimProfileEnv(options: {
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
defaultBaseUrl,
OPENAI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) ||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
normalizeProfileModel(
sanitizeProviderConfigValue(options.model, secretSource),
) ||
normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource),
) ||
'nvidia/llama-3.1-nemotron-70b-instruct',
OPENAI_API_KEY: key,
NVIDIA_NIM: '1',
@@ -237,8 +254,12 @@ export function buildMiniMaxProfileEnv(options: {
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
defaultBaseUrl,
OPENAI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) ||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
normalizeProfileModel(
sanitizeProviderConfigValue(options.model, secretSource),
) ||
normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource),
) ||
defaultModel,
OPENAI_API_KEY: key,
MINIMAX_API_KEY: key,
@@ -270,8 +291,12 @@ export function buildGeminiProfileEnv(options: {
const env: ProfileEnv = {
GEMINI_AUTH_MODE: authMode,
GEMINI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) ||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) ||
normalizeProfileModel(
sanitizeProviderConfigValue(options.model, secretSource),
) ||
normalizeProfileModel(
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource),
) ||
DEFAULT_GEMINI_MODEL,
}
@@ -304,9 +329,11 @@ export function buildOpenAIProfileEnv(options: {
const defaultModel = getGoalDefaultOpenAIModel(options.goal)
const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
const shellOpenAIModel = sanitizeProviderConfigValue(
processEnv.OPENAI_MODEL,
secretSource,
const shellOpenAIModel = normalizeProfileModel(
sanitizeProviderConfigValue(
processEnv.OPENAI_MODEL,
secretSource,
),
)
const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
processEnv.OPENAI_BASE_URL,
@@ -325,7 +352,9 @@ export function buildOpenAIProfileEnv(options: {
(useShellOpenAIConfig ? shellOpenAIBaseUrl : undefined) ||
DEFAULT_OPENAI_BASE_URL,
OPENAI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) ||
normalizeProfileModel(
sanitizeProviderConfigValue(options.model, secretSource),
) ||
(useShellOpenAIConfig ? shellOpenAIModel : undefined) ||
defaultModel,
OPENAI_API_KEY: key,
@@ -382,10 +411,14 @@ export function buildMistralProfileEnv(options: {
const env: ProfileEnv = {
MISTRAL_API_KEY: key,
MISTRAL_MODEL:
sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }) ||
sanitizeProviderConfigValue(
processEnv.MISTRAL_MODEL,
{ MISTRAL_API_KEY: key },
normalizeProfileModel(
sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }),
) ||
normalizeProfileModel(
sanitizeProviderConfigValue(
processEnv.MISTRAL_MODEL,
{ MISTRAL_API_KEY: key },
),
) ||
DEFAULT_MISTRAL_MODEL,
}
@@ -578,33 +611,41 @@ export async function buildLaunchEnv(options: {
options.persisted?.profile === options.profile
? options.persisted.env ?? {}
: {}
const persistedOpenAIModel = sanitizeProviderConfigValue(
persistedEnv.OPENAI_MODEL,
persistedEnv,
const persistedOpenAIModel = normalizeProfileModel(
sanitizeProviderConfigValue(
persistedEnv.OPENAI_MODEL,
persistedEnv,
),
)
const persistedOpenAIBaseUrl = sanitizeProviderConfigValue(
persistedEnv.OPENAI_BASE_URL,
persistedEnv,
)
const shellOpenAIModel = sanitizeProviderConfigValue(
processEnv.OPENAI_MODEL,
processEnv as SecretValueSource,
const shellOpenAIModel = normalizeProfileModel(
sanitizeProviderConfigValue(
processEnv.OPENAI_MODEL,
processEnv as SecretValueSource,
),
)
const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
processEnv.OPENAI_BASE_URL,
processEnv as SecretValueSource,
)
const persistedGeminiModel = sanitizeProviderConfigValue(
persistedEnv.GEMINI_MODEL,
persistedEnv,
const persistedGeminiModel = normalizeProfileModel(
sanitizeProviderConfigValue(
persistedEnv.GEMINI_MODEL,
persistedEnv,
),
)
const persistedGeminiBaseUrl = sanitizeProviderConfigValue(
persistedEnv.GEMINI_BASE_URL,
persistedEnv,
)
const shellGeminiModel = sanitizeProviderConfigValue(
processEnv.GEMINI_MODEL,
processEnv as SecretValueSource,
const shellGeminiModel = normalizeProfileModel(
sanitizeProviderConfigValue(
processEnv.GEMINI_MODEL,
processEnv as SecretValueSource,
),
)
const shellGeminiBaseUrl = sanitizeProviderConfigValue(
processEnv.GEMINI_BASE_URL,
@@ -702,11 +743,15 @@ export async function buildLaunchEnv(options: {
delete env.CLAUDE_CODE_USE_VERTEX
delete env.CLAUDE_CODE_USE_FOUNDRY
const shellMistralModel = sanitizeProviderConfigValue(
processEnv.MISTRAL_MODEL,
const shellMistralModel = normalizeProfileModel(
sanitizeProviderConfigValue(
processEnv.MISTRAL_MODEL,
),
)
const persistedMistralModel = sanitizeProviderConfigValue(
persistedEnv.MISTRAL_MODEL,
const persistedMistralModel = normalizeProfileModel(
sanitizeProviderConfigValue(
persistedEnv.MISTRAL_MODEL,
),
)
const shellMistralBaseUrl = sanitizeProviderConfigValue(
processEnv.MISTRAL_BASE_URL,

View File

@@ -221,6 +221,23 @@ describe('applyProviderProfileToProcessEnv', () => {
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
})
test('openai profile with semicolon-separated multi-model string sets only first model in OPENAI_MODEL', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(
buildProfile({
provider: 'openai',
baseUrl: 'https://api.openai.com/v1',
model: 'glm-4.7; glm-4.7-flash; glm-4.7-plus',
}),
)
expect(process.env.OPENAI_MODEL).toBe('glm-4.7')
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
})
test('anthropic profile with multi-model string sets only first model in ANTHROPIC_MODEL', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
@@ -236,6 +253,34 @@ describe('applyProviderProfileToProcessEnv', () => {
expect(process.env.ANTHROPIC_MODEL).toBe('claude-sonnet-4-6')
expect(process.env.ANTHROPIC_BASE_URL).toBe('https://api.anthropic.com')
})
test('gemini profile with semicolon-separated multi-model string sets only first model in GEMINI_MODEL', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(
buildGeminiProfile({
model: 'gemini-3-flash-preview; gemini-3-pro-preview',
}),
)
expect(process.env.GEMINI_MODEL).toBe('gemini-3-flash-preview')
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBe('1')
})
test('mistral profile with semicolon-separated multi-model string sets only first model in MISTRAL_MODEL', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(
buildMistralProfile({
model: 'devstral-latest; mistral-medium-latest',
}),
)
expect(process.env.MISTRAL_MODEL).toBe('devstral-latest')
expect(process.env.CLAUDE_CODE_USE_MISTRAL).toBe('1')
})
})
describe('applyActiveProviderProfileFromConfig', () => {
@@ -837,6 +882,24 @@ describe('getProfileModelOptions', () => {
])
})
test('generates options for semicolon-separated multi-model profile', async () => {
const { getProfileModelOptions } =
await importFreshProviderProfileModules()
const options = getProfileModelOptions(
buildProfile({
name: 'Test Provider',
model: 'glm-4.7; glm-4.7-flash; glm-4.7-plus',
}),
)
expect(options).toEqual([
{ value: 'glm-4.7', label: 'glm-4.7', description: 'Provider: Test Provider' },
{ value: 'glm-4.7-flash', label: 'glm-4.7-flash', description: 'Provider: Test Provider' },
{ value: 'glm-4.7-plus', label: 'glm-4.7-plus', description: 'Provider: Test Provider' },
])
})
test('returns single option for single-model profile', async () => {
const { getProfileModelOptions } =
await importFreshProviderProfileModules()

View File

@@ -458,7 +458,7 @@ function isProcessEnvAlignedWithProfile(
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
sameOptionalEnvValue(processEnv.MISTRAL_BASE_URL, profile.baseUrl) &&
sameOptionalEnvValue(processEnv.MISTRAL_MODEL, profile.model) &&
sameOptionalEnvValue(processEnv.MISTRAL_MODEL, getPrimaryModel(profile.model)) &&
(!includeApiKey ||
sameOptionalEnvValue(processEnv.MISTRAL_API_KEY, profile.apiKey))
)
@@ -474,7 +474,7 @@ function isProcessEnvAlignedWithProfile(
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
sameOptionalEnvValue(processEnv.GEMINI_BASE_URL, profile.baseUrl) &&
sameOptionalEnvValue(processEnv.GEMINI_MODEL, profile.model) &&
sameOptionalEnvValue(processEnv.GEMINI_MODEL, getPrimaryModel(profile.model)) &&
(!includeApiKey ||
sameOptionalEnvValue(processEnv.GEMINI_API_KEY, profile.apiKey))
)
@@ -578,7 +578,7 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
if (profile.provider === 'mistral') {
process.env.CLAUDE_CODE_USE_MISTRAL = '1'
process.env.MISTRAL_BASE_URL = profile.baseUrl
process.env.MISTRAL_MODEL = profile.model
process.env.MISTRAL_MODEL = getPrimaryModel(profile.model)
if (profile.apiKey) {
process.env.MISTRAL_API_KEY = profile.apiKey
@@ -595,7 +595,7 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
if (profile.provider === 'gemini') {
process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.GEMINI_BASE_URL = profile.baseUrl
process.env.GEMINI_MODEL = profile.model
process.env.GEMINI_MODEL = getPrimaryModel(profile.model)
if (profile.apiKey) {
process.env.GEMINI_API_KEY = profile.apiKey
@@ -930,7 +930,7 @@ export function setActiveProviderProfile(
case 'gemini':
return (
buildGeminiProfileEnv({
model: activeProfile.model,
model: getPrimaryModel(activeProfile.model),
baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey,
authMode: 'api-key',
@@ -940,7 +940,7 @@ export function setActiveProviderProfile(
case 'mistral':
return (
buildMistralProfileEnv({
model: activeProfile.model,
model: getPrimaryModel(activeProfile.model),
baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey,
processEnv: process.env,
@@ -951,7 +951,7 @@ export function setActiveProviderProfile(
? (
buildOpenAIProfileEnv({
goal: 'balanced',
model: activeProfile.model,
model: getPrimaryModel(activeProfile.model),
baseUrl: activeProfile.baseUrl,
apiKey: activeProfile.apiKey,
processEnv: process.env,
@@ -968,7 +968,7 @@ export function setActiveProviderProfile(
profile: 'openai' as ProviderProfileStartup,
env: {
OPENAI_BASE_URL: activeProfile.baseUrl,
OPENAI_MODEL: activeProfile.model,
OPENAI_MODEL: getPrimaryModel(activeProfile.model),
OPENAI_API_KEY: activeProfile.apiKey,
},
} as const)