fix: custom OPENAI_BASE_URL always wins over Codex model alias detection (#222)
* feat: add --provider CLI flag for multi-provider support Adds a --provider flag that maps friendly provider names to the environment variables the codebase uses for provider detection. No more manual env-var configuration — users can now simply run: openclaude --provider openai --model gpt-4o openclaude --provider gemini --model gemini-2.0-flash openclaude --provider ollama --model llama3.2 openclaude --provider bedrock openclaude --provider vertex Implementation details: - providerFlag.ts: core logic — maps provider names to env vars, uses ??= so explicit env vars always win over the flag defaults - providerFlag.test.ts: 18 tests covering all 7 providers, error messages, model passthrough, and env-var precedence - cli.tsx: early fast-path (mirrors --bare pattern) — sets env vars before Commander option-building and module constants run - main.tsx: adds --provider to Commander option chain for --help Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * fix: custom OPENAI_BASE_URL always wins over Codex model alias detection When OPENAI_MODEL=gpt-5.4 (or gpt-5.4-mini) and a custom OPENAI_BASE_URL is set (Azure, OpenRouter, etc), the transport was incorrectly forced to codex_responses because gpt-5.4 is in CODEX_ALIAS_MODELS. This caused requests to be sent with Codex auth instead of the user's API key, resulting in 401 Unauthorized errors. Fix: only use codex_responses when the base URL is explicitly the Codex endpoint, OR when no custom base URL is set and the model is a Codex alias. An explicit OPENAI_BASE_URL always takes priority over model-name based Codex detection. Verified locally: gpt-5.4 via OpenRouter now correctly shows Provider=OpenRouter, Endpoint=https://openrouter.ai/api/v1 instead of routing to chatgpt.com/backend-api/codex. Fixes #200, #203 --------- Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
139
src/utils/providerFlag.test.ts
Normal file
139
src/utils/providerFlag.test.ts
Normal file
@@ -0,0 +1,139 @@
|
||||
import { describe, expect, test, afterEach } from 'bun:test'
|
||||
import { parseProviderFlag, applyProviderFlag, VALID_PROVIDERS } from './providerFlag.js'
|
||||
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
afterEach(() => {
|
||||
for (const key of [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_MODEL',
|
||||
]) {
|
||||
if (originalEnv[key] === undefined) delete process.env[key]
|
||||
else process.env[key] = originalEnv[key]
|
||||
}
|
||||
})
|
||||
|
||||
// --- parseProviderFlag ---
|
||||
|
||||
describe('parseProviderFlag', () => {
|
||||
test('returns provider name when --provider flag present', () => {
|
||||
expect(parseProviderFlag(['--provider', 'openai'])).toBe('openai')
|
||||
})
|
||||
|
||||
test('returns provider name with --model alongside', () => {
|
||||
expect(parseProviderFlag(['--provider', 'gemini', '--model', 'gemini-2.0-flash'])).toBe('gemini')
|
||||
})
|
||||
|
||||
test('returns null when --provider flag absent', () => {
|
||||
expect(parseProviderFlag(['--model', 'gpt-4o'])).toBeNull()
|
||||
})
|
||||
|
||||
test('returns null for empty args', () => {
|
||||
expect(parseProviderFlag([])).toBeNull()
|
||||
})
|
||||
|
||||
test('returns null when --provider has no value', () => {
|
||||
expect(parseProviderFlag(['--provider'])).toBeNull()
|
||||
})
|
||||
|
||||
test('returns null when --provider value starts with --', () => {
|
||||
expect(parseProviderFlag(['--provider', '--model'])).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
// --- applyProviderFlag ---
|
||||
|
||||
describe('applyProviderFlag - anthropic', () => {
|
||||
test('sets no env vars for anthropic (default)', () => {
|
||||
const result = applyProviderFlag('anthropic', [])
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - openai', () => {
|
||||
test('sets CLAUDE_CODE_USE_OPENAI=1', () => {
|
||||
const result = applyProviderFlag('openai', [])
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
})
|
||||
|
||||
test('sets OPENAI_MODEL when --model is provided', () => {
|
||||
applyProviderFlag('openai', ['--model', 'gpt-4o'])
|
||||
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - gemini', () => {
|
||||
test('sets CLAUDE_CODE_USE_GEMINI=1', () => {
|
||||
const result = applyProviderFlag('gemini', [])
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBe('1')
|
||||
})
|
||||
|
||||
test('sets GEMINI_MODEL when --model is provided', () => {
|
||||
applyProviderFlag('gemini', ['--model', 'gemini-2.0-flash'])
|
||||
expect(process.env.GEMINI_MODEL).toBe('gemini-2.0-flash')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - github', () => {
|
||||
test('sets CLAUDE_CODE_USE_GITHUB=1', () => {
|
||||
const result = applyProviderFlag('github', [])
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - bedrock', () => {
|
||||
test('sets CLAUDE_CODE_USE_BEDROCK=1', () => {
|
||||
const result = applyProviderFlag('bedrock', [])
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_BEDROCK).toBe('1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - vertex', () => {
|
||||
test('sets CLAUDE_CODE_USE_VERTEX=1', () => {
|
||||
const result = applyProviderFlag('vertex', [])
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_VERTEX).toBe('1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - ollama', () => {
|
||||
test('sets CLAUDE_CODE_USE_OPENAI=1 with Ollama base URL', () => {
|
||||
const result = applyProviderFlag('ollama', [])
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
|
||||
expect(process.env.OPENAI_API_KEY).toBe('ollama')
|
||||
})
|
||||
|
||||
test('sets OPENAI_MODEL when --model is provided', () => {
|
||||
applyProviderFlag('ollama', ['--model', 'llama3.2'])
|
||||
expect(process.env.OPENAI_MODEL).toBe('llama3.2')
|
||||
})
|
||||
|
||||
test('does not override existing OPENAI_BASE_URL when user set a custom one', () => {
|
||||
process.env.OPENAI_BASE_URL = 'http://my-ollama:11434/v1'
|
||||
applyProviderFlag('ollama', [])
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('http://my-ollama:11434/v1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - invalid provider', () => {
|
||||
test('returns error for unknown provider', () => {
|
||||
const result = applyProviderFlag('unknown-provider', [])
|
||||
expect(result.error).toContain('unknown-provider')
|
||||
expect(result.error).toContain(VALID_PROVIDERS.join(', '))
|
||||
})
|
||||
})
|
||||
Reference in New Issue
Block a user