feat(model): add GPT-5.5 support for Codex provider (#880)
- Bump Codex provider defaults from gpt-5.4 to gpt-5.5 across all ModelConfigs - Update codexplan alias to resolve to gpt-5.5 - Add gpt-5.5 and gpt-5.5-mini to model picker with reasoning effort mappings - Add context window and max output token specs for gpt-5.5 family - Add gpt-5.5 entries to COPILOT_MODELS registry - Keep official OpenAI API preset at gpt-5.4 (API availability pending) - Update codexShim tests to expect gpt-5.5 from codexplan alias Co-authored-by: OpenClaude <openclaude@gitlawb.com>
This commit is contained in:
@@ -88,7 +88,7 @@ describe('Codex provider config', () => {
|
||||
|
||||
const resolved = resolveProviderRequest({ model: 'codexplan' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.5')
|
||||
expect(resolved.reasoning).toEqual({ effort: 'high' })
|
||||
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
|
||||
})
|
||||
@@ -114,7 +114,7 @@ describe('Codex provider config', () => {
|
||||
|
||||
expect(resolved.transport).toBe('chat_completions')
|
||||
expect(resolved.baseUrl).toBe('http://127.0.0.1:8080/v1')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.5')
|
||||
})
|
||||
|
||||
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', async () => {
|
||||
@@ -161,7 +161,7 @@ describe('Codex provider config', () => {
|
||||
const resolved = resolveProviderRequest()
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.5')
|
||||
})
|
||||
|
||||
test('does not override custom base URL for codexplan (e.g., local provider)', async () => {
|
||||
|
||||
@@ -31,7 +31,11 @@ const CODEX_ALIAS_MODELS: Record<
|
||||
}
|
||||
> = {
|
||||
codexplan: {
|
||||
model: 'gpt-5.4',
|
||||
model: 'gpt-5.5',
|
||||
reasoningEffort: 'high',
|
||||
},
|
||||
'gpt-5.5': {
|
||||
model: 'gpt-5.5',
|
||||
reasoningEffort: 'high',
|
||||
},
|
||||
'gpt-5.4': {
|
||||
@@ -59,6 +63,10 @@ const CODEX_ALIAS_MODELS: Record<
|
||||
'gpt-5.1-codex-mini': {
|
||||
model: 'gpt-5.1-codex-mini',
|
||||
},
|
||||
'gpt-5.5-mini': {
|
||||
model: 'gpt-5.5-mini',
|
||||
reasoningEffort: 'medium',
|
||||
},
|
||||
'gpt-5.4-mini': {
|
||||
model: 'gpt-5.4-mini',
|
||||
reasoningEffort: 'medium',
|
||||
|
||||
@@ -36,7 +36,7 @@ export const CLAUDE_3_7_SONNET_CONFIG = {
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -49,7 +49,7 @@ export const CLAUDE_3_5_V2_SONNET_CONFIG = {
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -62,7 +62,7 @@ export const CLAUDE_3_5_HAIKU_CONFIG = {
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash-lite',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -75,7 +75,7 @@ export const CLAUDE_HAIKU_4_5_CONFIG = {
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash-lite',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -88,7 +88,7 @@ export const CLAUDE_SONNET_4_CONFIG = {
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -101,7 +101,7 @@ export const CLAUDE_SONNET_4_5_CONFIG = {
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -114,7 +114,7 @@ export const CLAUDE_OPUS_4_CONFIG = {
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.5-pro',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -127,7 +127,7 @@ export const CLAUDE_OPUS_4_1_CONFIG = {
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.5-pro',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -140,7 +140,7 @@ export const CLAUDE_OPUS_4_5_CONFIG = {
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.5-pro',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -153,7 +153,7 @@ export const CLAUDE_OPUS_4_6_CONFIG = {
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.5-pro',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
@@ -166,7 +166,7 @@ export const CLAUDE_SONNET_4_6_CONFIG = {
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
codex: 'gpt-5.5',
|
||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
||||
minimax: 'MiniMax-M2.5',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
@@ -32,6 +32,38 @@ export type CopilotModel = {
|
||||
}
|
||||
|
||||
export const COPILOT_MODELS: Record<string, CopilotModel> = {
|
||||
'gpt-5.5': {
|
||||
id: 'gpt-5.5',
|
||||
name: 'GPT-5.5',
|
||||
family: 'gpt',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-5.5-mini': {
|
||||
id: 'gpt-5.5-mini',
|
||||
name: 'GPT-5.5 mini',
|
||||
family: 'gpt-mini',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-5.4': {
|
||||
id: 'gpt-5.4',
|
||||
name: 'GPT-5.4',
|
||||
|
||||
@@ -178,9 +178,9 @@ export function getDefaultOpusModel(): ModelName {
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
}
|
||||
// Codex provider: use user-specified model or default to gpt-5.4
|
||||
// Codex provider: use user-specified model or default to gpt-5.5
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.5'
|
||||
}
|
||||
// GitHub Copilot provider
|
||||
if (getAPIProvider() === 'github') {
|
||||
@@ -222,7 +222,7 @@ export function getDefaultSonnetModel(): ModelName {
|
||||
}
|
||||
// Codex provider
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.5'
|
||||
}
|
||||
// GitHub Copilot provider
|
||||
if (getAPIProvider() === 'github') {
|
||||
@@ -258,7 +258,7 @@ export function getDefaultHaikuModel(): ModelName {
|
||||
}
|
||||
// Codex provider
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.5'
|
||||
}
|
||||
// GitHub Copilot provider
|
||||
if (getAPIProvider() === 'github') {
|
||||
@@ -340,9 +340,9 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
}
|
||||
// Codex provider: always use the configured Codex model (default gpt-5.4)
|
||||
// Codex provider: always use the configured Codex model (default gpt-5.5)
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.5'
|
||||
}
|
||||
|
||||
// Ants default to defaultModel from flag config, or Opus 1M if not configured
|
||||
@@ -506,7 +506,7 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
|
||||
}
|
||||
// Handle Codex models - show actual model name + resolved model
|
||||
if (setting === 'codexplan') {
|
||||
return 'codexplan (gpt-5.4)'
|
||||
return 'codexplan (gpt-5.5)'
|
||||
}
|
||||
if (setting === 'codexspark') {
|
||||
return 'codexspark (gpt-5.3-codex-spark)'
|
||||
@@ -527,6 +527,8 @@ export function getPublicModelDisplayName(model: ModelName): string | null {
|
||||
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex' || getAPIProvider() === 'github') {
|
||||
// Return display names for known GitHub Copilot models
|
||||
const copilotModelNames: Record<string, string> = {
|
||||
'gpt-5.5': 'GPT-5.5',
|
||||
'gpt-5.5-mini': 'GPT-5.5 mini',
|
||||
'gpt-5.4': 'GPT-5.4',
|
||||
'gpt-5.4-mini': 'GPT-5.4 mini',
|
||||
'gpt-5.3-codex': 'GPT-5.3 Codex',
|
||||
@@ -553,6 +555,8 @@ export function getPublicModelDisplayName(model: ModelName): string | null {
|
||||
return null
|
||||
}
|
||||
switch (model) {
|
||||
case 'gpt-5.5':
|
||||
return 'GPT-5.5'
|
||||
case 'gpt-5.4':
|
||||
return 'GPT-5.4'
|
||||
case 'gpt-5.3-codex-spark':
|
||||
@@ -687,7 +691,7 @@ export function parseUserSpecifiedModel(
|
||||
|
||||
// Handle Codex aliases - map to actual model names
|
||||
if (modelString === 'codexplan') {
|
||||
return 'gpt-5.4'
|
||||
return 'gpt-5.5'
|
||||
}
|
||||
if (modelString === 'codexspark') {
|
||||
return 'gpt-5.3-codex-spark'
|
||||
|
||||
@@ -297,9 +297,9 @@ function getOpusPlanOption(): ModelOption {
|
||||
|
||||
function getCodexPlanOption(): ModelOption {
|
||||
return {
|
||||
value: 'gpt-5.4',
|
||||
label: 'gpt-5.4',
|
||||
description: 'GPT-5.4 on the Codex backend with high reasoning',
|
||||
value: 'gpt-5.5',
|
||||
label: 'gpt-5.5',
|
||||
description: 'GPT-5.5 on the Codex backend with high reasoning',
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,6 +313,11 @@ function getCodexSparkOption(): ModelOption {
|
||||
|
||||
function getCodexModelOptions(): ModelOption[] {
|
||||
return [
|
||||
{
|
||||
value: 'gpt-5.5',
|
||||
label: 'gpt-5.5',
|
||||
description: 'GPT-5.5 with high reasoning',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.4',
|
||||
label: 'gpt-5.4',
|
||||
@@ -348,6 +353,11 @@ function getCodexModelOptions(): ModelOption[] {
|
||||
label: 'gpt-5.1-codex-mini',
|
||||
description: 'GPT-5.1 Codex Mini - faster, cheaper',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.5-mini',
|
||||
label: 'gpt-5.5-mini',
|
||||
description: 'GPT-5.5 Mini - faster, cheaper',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.4-mini',
|
||||
label: 'gpt-5.4-mini',
|
||||
@@ -679,7 +689,7 @@ export function getModelOptions(fastMode = false): ModelOption[] {
|
||||
return filterModelOptionsByAllowlist(options)
|
||||
} else if (customModel === 'opusplan') {
|
||||
return filterModelOptionsByAllowlist([...options, getOpusPlanOption()])
|
||||
} else if (customModel === 'gpt-5.4') {
|
||||
} else if (customModel === 'gpt-5.5') {
|
||||
return filterModelOptionsByAllowlist([...options, getCodexPlanOption()])
|
||||
} else if (customModel === 'gpt-5.3-codex-spark') {
|
||||
return filterModelOptionsByAllowlist([...options, getCodexSparkOption()])
|
||||
|
||||
@@ -38,6 +38,8 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'github:copilot:gpt-5.2': 400_000,
|
||||
'github:copilot:gpt-5.2-codex': 400_000,
|
||||
'github:copilot:gpt-5.3-codex': 400_000,
|
||||
'github:copilot:gpt-5.5': 400_000,
|
||||
'github:copilot:gpt-5.5-mini': 400_000,
|
||||
'github:copilot:gpt-5.4': 400_000,
|
||||
'github:copilot:gpt-5.4-mini': 400_000,
|
||||
// Gemini
|
||||
@@ -60,6 +62,8 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'github_copilot/gpt-4.1': 128_000,
|
||||
'github_copilot/gpt-4o': 128_000,
|
||||
'github_copilot/gpt-5-mini': 264_000,
|
||||
'github_copilot/gpt-5.5': 400_000,
|
||||
'github_copilot/gpt-5.5-mini': 400_000,
|
||||
'github_copilot/gpt-5.4': 400_000,
|
||||
'github_copilot/gpt-5.4-mini': 400_000,
|
||||
'github_copilot/gemini-2.5-pro': 128_000,
|
||||
@@ -71,6 +75,9 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
// limits for the same model name, so we cannot safely hardcode values here.
|
||||
|
||||
// OpenAI
|
||||
'gpt-5.5': 1_050_000,
|
||||
'gpt-5.5-mini': 400_000,
|
||||
'gpt-5.5-nano': 400_000,
|
||||
'gpt-5.4': 1_050_000,
|
||||
'gpt-5.4-mini': 400_000,
|
||||
'gpt-5.4-nano': 400_000,
|
||||
@@ -288,6 +295,9 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
// NOTE: bare Claude model names omitted — see context windows comment above.
|
||||
|
||||
// OpenAI
|
||||
'gpt-5.5': 128_000,
|
||||
'gpt-5.5-mini': 128_000,
|
||||
'gpt-5.5-nano': 128_000,
|
||||
'gpt-5.4': 128_000,
|
||||
'gpt-5.4-mini': 128_000,
|
||||
'gpt-5.4-nano': 128_000,
|
||||
|
||||
@@ -153,7 +153,7 @@ export function getProviderPresetDefaults(
|
||||
provider: 'openai',
|
||||
name: 'OpenAI',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
model: 'gpt-5.3-codex',
|
||||
model: 'gpt-5.4',
|
||||
apiKey: '',
|
||||
requiresApiKey: true,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user