fix(model): codex/nvidia-nim/minimax now read OPENAI_MODEL env (#815)

getUserSpecifiedModelSetting() decides which env var to consult based on
the active provider. The check included openai and github but omitted
codex, nvidia-nim, and minimax — even though all three use the OpenAI
shim transport and get their model routing via CLAUDE_CODE_USE_OPENAI=1
+ OPENAI_MODEL (set by applyProviderProfileToProcessEnv).

Concrete failure: user switches from Moonshot profile (which persisted
settings.model='kimi-k2.6') to the Codex profile. The new profile
correctly writes OPENAI_MODEL=codexplan + base URL to
chatgpt.com/backend-api/codex. Startup banner reflects Codex / gpt-5.4
correctly. But at request time getUserSpecifiedModelSetting() returns
early for provider='codex' (not in the env-consult list), falls through
to the stale settings.model='kimi-k2.6', and the Codex API rejects:

  API Error 400: "The 'kimi-k2.6' model is not supported when using
  Codex with a ChatGPT account."

Fix: extract an isOpenAIShimProvider flag covering openai|codex|github|
nvidia-nim|minimax — all providers that set OPENAI_MODEL as their model
env var. The Gemini and Mistral branches stay as-is (they use
GEMINI_MODEL / MISTRAL_MODEL).

Five regression tests pin the fix for each OpenAI-shim provider plus
guard tests for openai and github that already worked.

Co-authored-by: OpenClaude <openclaude@gitlawb.com>
This commit is contained in:
Kevin Codex
2026-04-22 09:01:44 +08:00
committed by GitHub
parent ee19159c17
commit 458120889f
2 changed files with 129 additions and 1 deletions

View File

@@ -0,0 +1,115 @@
import { afterEach, beforeEach, expect, test } from 'bun:test'
import { saveGlobalConfig } from '../config.js'
import { getUserSpecifiedModelSetting } from './model.js'
const SAVED_ENV = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
CLAUDE_CODE_USE_MISTRAL: process.env.CLAUDE_CODE_USE_MISTRAL,
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
NVIDIA_NIM: process.env.NVIDIA_NIM,
MINIMAX_API_KEY: process.env.MINIMAX_API_KEY,
OPENAI_MODEL: process.env.OPENAI_MODEL,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
CODEX_API_KEY: process.env.CODEX_API_KEY,
CHATGPT_ACCOUNT_ID: process.env.CHATGPT_ACCOUNT_ID,
}
function restoreEnv(key: keyof typeof SAVED_ENV): void {
if (SAVED_ENV[key] === undefined) {
delete process.env[key]
} else {
process.env[key] = SAVED_ENV[key]
}
}
beforeEach(() => {
delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.CLAUDE_CODE_USE_MISTRAL
delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY
delete process.env.NVIDIA_NIM
delete process.env.MINIMAX_API_KEY
delete process.env.OPENAI_MODEL
delete process.env.OPENAI_BASE_URL
delete process.env.CODEX_API_KEY
delete process.env.CHATGPT_ACCOUNT_ID
saveGlobalConfig(current => ({
...current,
model: undefined,
}))
})
afterEach(() => {
for (const key of Object.keys(SAVED_ENV) as Array<keyof typeof SAVED_ENV>) {
restoreEnv(key)
}
saveGlobalConfig(current => ({
...current,
model: undefined,
}))
})
test('codex provider reads OPENAI_MODEL, not stale settings.model', () => {
// Regression: switching from Moonshot (settings.model='kimi-k2.6' persisted
// from that session) to the Codex profile. Codex profile correctly sets
// OPENAI_MODEL=codexplan + base URL to chatgpt.com/backend-api/codex.
// getUserSpecifiedModelSetting previously ignored env for 'codex' provider
// and returned settings.model='kimi-k2.6', causing Codex's API to reject
// the request: "The 'kimi-k2.6' model is not supported when using Codex".
saveGlobalConfig(current => ({ ...current, model: 'kimi-k2.6' }))
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'https://chatgpt.com/backend-api/codex'
process.env.OPENAI_MODEL = 'codexplan'
process.env.CODEX_API_KEY = 'codex-test'
process.env.CHATGPT_ACCOUNT_ID = 'acct_test'
const model = getUserSpecifiedModelSetting()
expect(model).toBe('codexplan')
})
test('nvidia-nim provider reads OPENAI_MODEL, not stale settings.model', () => {
saveGlobalConfig(current => ({ ...current, model: 'kimi-k2.6' }))
process.env.NVIDIA_NIM = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
const model = getUserSpecifiedModelSetting()
expect(model).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
})
test('minimax provider reads OPENAI_MODEL, not stale settings.model', () => {
saveGlobalConfig(current => ({ ...current, model: 'kimi-k2.6' }))
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'MiniMax-M2.5'
const model = getUserSpecifiedModelSetting()
expect(model).toBe('MiniMax-M2.5')
})
test('openai provider still reads OPENAI_MODEL (regression guard)', () => {
saveGlobalConfig(current => ({ ...current, model: 'stale-default' }))
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'gpt-4o'
const model = getUserSpecifiedModelSetting()
expect(model).toBe('gpt-4o')
})
test('github provider still reads OPENAI_MODEL (regression guard)', () => {
saveGlobalConfig(current => ({ ...current, model: 'stale-default' }))
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'github:copilot'
const model = getUserSpecifiedModelSetting()
expect(model).toBe('github:copilot')
})

View File

@@ -91,11 +91,24 @@ export function getUserSpecifiedModelSetting(): ModelSetting | undefined {
const setting = normalizeModelSetting(settings.model)
// Read the model env var that matches the active provider to prevent
// cross-provider leaks (e.g. ANTHROPIC_MODEL sent to the OpenAI API).
//
// All OpenAI-shim providers (openai, codex, github, nvidia-nim, minimax)
// set CLAUDE_CODE_USE_OPENAI=1 + OPENAI_MODEL via
// applyProviderProfileToProcessEnv. Earlier this check only included
// openai/github — codex/nvidia-nim/minimax fell through to the stale
// settings.model, so switching from (say) Moonshot to Codex kept firing
// `kimi-k2.6` at the Codex endpoint and getting 400s.
const provider = getAPIProvider()
const isOpenAIShimProvider =
provider === 'openai' ||
provider === 'codex' ||
provider === 'github' ||
provider === 'nvidia-nim' ||
provider === 'minimax'
specifiedModel =
(provider === 'gemini' ? process.env.GEMINI_MODEL : undefined) ||
(provider === 'mistral' ? process.env.MISTRAL_MODEL : undefined) ||
(provider === 'openai' || provider === 'gemini' || provider === 'mistral' || provider === 'github' ? process.env.OPENAI_MODEL : undefined) ||
(isOpenAIShimProvider ? process.env.OPENAI_MODEL : undefined) ||
(provider === 'firstParty' ? process.env.ANTHROPIC_MODEL : undefined) ||
setting ||
undefined