fix: route OpenAI Codex shortcuts to correct endpoint (#566)

* feat: enhance codex provider resolution with shortcut aliases and improved base URL handling

* fix: enhance codex alias resolution to include shell model

* feat: enhance Codex provider resolution to support new aliases and base URL handling

* fix: update base URL resolution logic for Codex models in GitHub mode

* fix: update provider transport logic to enforce Codex responses and adjust base URL handling

* fix: update provider request resolution to respect custom base URLs and adjust transport logic

* fix: restore OPENAI_MODEL environment variable handling in tests and provider config
This commit is contained in:
Meetpatel006
2026-04-13 16:01:15 +05:30
committed by GitHub
parent 64298a663f
commit 7c8bdcc3e2
4 changed files with 102 additions and 29 deletions

View File

@@ -264,7 +264,7 @@ test('buildCurrentProviderSummary labels generic local openai-compatible provide
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1') expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
}) })
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => { test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex when custom base URL is set', () => {
const summary = buildCurrentProviderSummary({ const summary = buildCurrentProviderSummary({
processEnv: { processEnv: {
CLAUDE_CODE_USE_OPENAI: '1', CLAUDE_CODE_USE_OPENAI: '1',

View File

@@ -5,7 +5,7 @@
* Addresses: https://github.com/Gitlawb/openclaude/issues/55 * Addresses: https://github.com/Gitlawb/openclaude/issues/55
*/ */
import { isLocalProviderUrl } from '../services/api/providerConfig.js' import { isLocalProviderUrl, resolveProviderRequest } from '../services/api/providerConfig.js'
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js' import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
import { getSettings_DEPRECATED } from '../utils/settings/settings.js' import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
import { parseUserSpecifiedModel } from '../utils/model/model.js' import { parseUserSpecifiedModel } from '../utils/model/model.js'
@@ -110,10 +110,17 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
if (useOpenAI) { if (useOpenAI) {
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o' const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' const resolvedRequest = resolveProviderRequest({
model: rawModel,
baseUrl: process.env.OPENAI_BASE_URL,
})
const baseUrl = resolvedRequest.baseUrl
const isLocal = isLocalProviderUrl(baseUrl) const isLocal = isLocalProviderUrl(baseUrl)
let name = 'OpenAI' let name = 'OpenAI'
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek' // Override to Codex when resolved endpoint is Codex
if (resolvedRequest.transport === 'codex_responses' || baseUrl.includes('chatgpt.com/backend-api/codex')) {
name = 'Codex'
} else if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter' else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
else if (/together/i.test(baseUrl)) name = 'Together AI' else if (/together/i.test(baseUrl)) name = 'Together AI'
else if (/groq/i.test(baseUrl)) name = 'Groq' else if (/groq/i.test(baseUrl)) name = 'Groq'
@@ -123,26 +130,9 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl) else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
// Resolve model alias to actual model name + reasoning effort // Resolve model alias to actual model name + reasoning effort
let displayModel = rawModel let displayModel = resolvedRequest.resolvedModel
const codexAliases: Record<string, { model: string; reasoningEffort?: string }> = { if (resolvedRequest.reasoning?.effort) {
codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' }, displayModel = `${displayModel} (${resolvedRequest.reasoning.effort})`
'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' },
'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' },
codexspark: { model: 'gpt-5.3-codex-spark' },
'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' },
'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' },
'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' },
'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' },
'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' },
}
const alias = rawModel.toLowerCase()
if (alias in codexAliases) {
const resolved = codexAliases[alias]
displayModel = resolved.model
if (resolved.reasoningEffort) {
displayModel = `${displayModel} (${resolved.reasoningEffort})`
}
} }
return { name, model: displayModel, baseUrl, isLocal } return { name, model: displayModel, baseUrl, isLocal }

View File

@@ -18,6 +18,7 @@ const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL, OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_BASE: process.env.OPENAI_API_BASE, OPENAI_API_BASE: process.env.OPENAI_API_BASE,
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
OPENAI_MODEL: process.env.OPENAI_MODEL,
} }
afterEach(() => { afterEach(() => {
@@ -30,6 +31,9 @@ afterEach(() => {
if (originalEnv.CLAUDE_CODE_USE_GITHUB === undefined) delete process.env.CLAUDE_CODE_USE_GITHUB if (originalEnv.CLAUDE_CODE_USE_GITHUB === undefined) delete process.env.CLAUDE_CODE_USE_GITHUB
else process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB else process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
if (originalEnv.OPENAI_MODEL === undefined) delete process.env.OPENAI_MODEL
else process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
while (tempDirs.length > 0) { while (tempDirs.length > 0) {
const dir = tempDirs.pop() const dir = tempDirs.pop()
if (dir) rmSync(dir, { recursive: true, force: true }) if (dir) rmSync(dir, { recursive: true, force: true })
@@ -84,6 +88,18 @@ describe('Codex provider config', () => {
expect(resolved.transport).toBe('codex_responses') expect(resolved.transport).toBe('codex_responses')
expect(resolved.resolvedModel).toBe('gpt-5.4') expect(resolved.resolvedModel).toBe('gpt-5.4')
expect(resolved.reasoning).toEqual({ effort: 'high' }) expect(resolved.reasoning).toEqual({ effort: 'high' })
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
})
test('resolves codexspark alias to Codex transport with Codex base URL', () => {
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
delete process.env.CLAUDE_CODE_USE_GITHUB
const resolved = resolveProviderRequest({ model: 'codexspark' })
expect(resolved.transport).toBe('codex_responses')
expect(resolved.resolvedModel).toBe('gpt-5.3-codex-spark')
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
}) })
test('does not force Codex transport when a local non-Codex base URL is explicit', () => { test('does not force Codex transport when a local non-Codex base URL is explicit', () => {
@@ -118,6 +134,37 @@ describe('Codex provider config', () => {
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex') expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
}) })
test('default gpt-4o uses OpenAI base URL (no regression)', () => {
delete process.env.OPENAI_BASE_URL
delete process.env.CLAUDE_CODE_USE_GITHUB
const resolved = resolveProviderRequest({ model: 'gpt-4o' })
expect(resolved.transport).toBe('chat_completions')
expect(resolved.baseUrl).toBe('https://api.openai.com/v1')
expect(resolved.resolvedModel).toBe('gpt-4o')
})
test('resolves codexplan from env var OPENAI_MODEL to Codex endpoint', () => {
process.env.OPENAI_MODEL = 'codexplan'
delete process.env.OPENAI_BASE_URL
delete process.env.CLAUDE_CODE_USE_GITHUB
const resolved = resolveProviderRequest()
expect(resolved.transport).toBe('codex_responses')
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
expect(resolved.resolvedModel).toBe('gpt-5.4')
})
test('does not override custom base URL for codexplan (e.g., local provider)', () => {
process.env.OPENAI_MODEL = 'codexplan'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
delete process.env.CLAUDE_CODE_USE_GITHUB
const resolved = resolveProviderRequest()
expect(resolved.transport).toBe('chat_completions')
expect(resolved.baseUrl).toBe('http://localhost:11434/v1')
})
test('loads Codex credentials from auth.json fallback', () => { test('loads Codex credentials from auth.json fallback', () => {
const authPath = createTempAuthJson({ const authPath = createTempAuthJson({
tokens: { tokens: {

View File

@@ -60,6 +60,8 @@ const CODEX_ALIAS_MODELS: Record<
type CodexAlias = keyof typeof CODEX_ALIAS_MODELS type CodexAlias = keyof typeof CODEX_ALIAS_MODELS
type ReasoningEffort = 'low' | 'medium' | 'high' | 'xhigh' type ReasoningEffort = 'low' | 'medium' | 'high' | 'xhigh'
const OPENAI_CODEX_SHORTCUT_ALIASES = new Set(['codexplan', 'codexspark'])
export type ProviderTransport = 'chat_completions' | 'codex_responses' export type ProviderTransport = 'chat_completions' | 'codex_responses'
export type ResolvedProviderRequest = { export type ResolvedProviderRequest = {
@@ -220,6 +222,12 @@ export function isCodexAlias(model: string): boolean {
return base in CODEX_ALIAS_MODELS return base in CODEX_ALIAS_MODELS
} }
function isOpenAICodexShortcutAlias(model: string): boolean {
const normalized = model.trim().toLowerCase()
const base = normalized.split('?', 1)[0] ?? normalized
return OPENAI_CODEX_SHORTCUT_ALIASES.has(base)
}
export function shouldUseCodexTransport( export function shouldUseCodexTransport(
model: string, model: string,
baseUrl: string | undefined, baseUrl: string | undefined,
@@ -367,13 +375,41 @@ export function resolveProviderRequest(options?: {
options?.fallbackModel?.trim() || options?.fallbackModel?.trim() ||
(isGithubMode ? 'github:copilot' : 'gpt-4o') (isGithubMode ? 'github:copilot' : 'gpt-4o')
const descriptor = parseModelDescriptor(requestedModel) const descriptor = parseModelDescriptor(requestedModel)
const rawBaseUrl = const explicitBaseUrl = asEnvUrl(options?.baseUrl)
asEnvUrl(options?.baseUrl) ?? const envBaseUrlRaw =
explicitBaseUrl ??
asEnvUrl( asEnvUrl(
isMistralMode ? (process.env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL) : process.env.OPENAI_BASE_URL, isMistralMode
? (process.env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL)
: process.env.OPENAI_BASE_URL
) ?? ) ??
asEnvUrl(process.env.OPENAI_API_BASE) asEnvUrl(process.env.OPENAI_API_BASE)
const isCodexModelForGithub = isGithubMode && isCodexAlias(requestedModel)
const envBaseUrl =
isCodexModelForGithub && envBaseUrlRaw && getGithubEndpointType(envBaseUrlRaw) === 'custom'
? undefined
: envBaseUrlRaw
const rawBaseUrl = explicitBaseUrl ?? envBaseUrl
const shellModel = process.env.OPENAI_MODEL?.trim() ?? ''
const envIsCodexShortcut = isOpenAICodexShortcutAlias(shellModel)
const envResolvedCodexModel = envIsCodexShortcut
? parseModelDescriptor(shellModel).baseModel
: null
const requestedMatchesEnvCodexShortcut =
Boolean(options?.model) &&
Boolean(envResolvedCodexModel) &&
descriptor.baseModel === envResolvedCodexModel
const isCodexAliasModel =
isOpenAICodexShortcutAlias(requestedModel) || requestedMatchesEnvCodexShortcut
const hasUserSetBaseUrl = rawBaseUrl && rawBaseUrl !== DEFAULT_OPENAI_BASE_URL
const finalBaseUrl =
!isGithubMode && isCodexAliasModel && !hasUserSetBaseUrl
? DEFAULT_CODEX_BASE_URL
: rawBaseUrl
const githubEndpointType = isGithubMode const githubEndpointType = isGithubMode
? getGithubEndpointType(rawBaseUrl) ? getGithubEndpointType(rawBaseUrl)
: 'custom' : 'custom'
@@ -386,7 +422,7 @@ export function resolveProviderRequest(options?: {
: requestedModel : requestedModel
const transport: ProviderTransport = const transport: ProviderTransport =
shouldUseCodexTransport(requestedModel, rawBaseUrl) || shouldUseCodexTransport(requestedModel, finalBaseUrl) ||
(isGithubCopilot && shouldUseGithubResponsesApi(githubResolvedModel)) (isGithubCopilot && shouldUseGithubResponsesApi(githubResolvedModel))
? 'codex_responses' ? 'codex_responses'
: 'chat_completions' : 'chat_completions'
@@ -410,7 +446,7 @@ export function resolveProviderRequest(options?: {
requestedModel, requestedModel,
resolvedModel, resolvedModel,
baseUrl: baseUrl:
(rawBaseUrl ?? (finalBaseUrl ??
(isGithubCopilot && transport === 'codex_responses' (isGithubCopilot && transport === 'codex_responses'
? GITHUB_COPILOT_BASE_URL ? GITHUB_COPILOT_BASE_URL
: (isGithubMode : (isGithubMode