feat(model): add GPT-5.5 support for Codex provider (#880)

- Bump Codex provider defaults from gpt-5.4 to gpt-5.5 across all ModelConfigs
- Update codexplan alias to resolve to gpt-5.5
- Add gpt-5.5 and gpt-5.5-mini to model picker with reasoning effort mappings
- Add context window and max output token specs for gpt-5.5 family
- Add gpt-5.5 entries to COPILOT_MODELS registry
- Keep official OpenAI API preset at gpt-5.4 (API availability pending)
- Update codexShim tests to expect gpt-5.5 from codexplan alias

Co-authored-by: OpenClaude <openclaude@gitlawb.com>
This commit is contained in:
Kevin Codex
2026-04-24 19:06:36 +08:00
committed by GitHub
parent b694ccfff1
commit 038f715b7a
8 changed files with 92 additions and 28 deletions

View File

@@ -36,7 +36,7 @@ export const CLAUDE_3_7_SONNET_CONFIG = {
openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -49,7 +49,7 @@ export const CLAUDE_3_5_V2_SONNET_CONFIG = {
openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -62,7 +62,7 @@ export const CLAUDE_3_5_HAIKU_CONFIG = {
openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash-lite',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -75,7 +75,7 @@ export const CLAUDE_HAIKU_4_5_CONFIG = {
openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash-lite',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -88,7 +88,7 @@ export const CLAUDE_SONNET_4_CONFIG = {
openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -101,7 +101,7 @@ export const CLAUDE_SONNET_4_5_CONFIG = {
openai: 'gpt-4o',
gemini: 'gemini-2.0-flash',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -114,7 +114,7 @@ export const CLAUDE_OPUS_4_CONFIG = {
openai: 'gpt-4o',
gemini: 'gemini-2.5-pro',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -127,7 +127,7 @@ export const CLAUDE_OPUS_4_1_CONFIG = {
openai: 'gpt-4o',
gemini: 'gemini-2.5-pro',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -140,7 +140,7 @@ export const CLAUDE_OPUS_4_5_CONFIG = {
openai: 'gpt-4o',
gemini: 'gemini-2.5-pro',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -153,7 +153,7 @@ export const CLAUDE_OPUS_4_6_CONFIG = {
openai: 'gpt-4o',
gemini: 'gemini-2.5-pro',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig
@@ -166,7 +166,7 @@ export const CLAUDE_SONNET_4_6_CONFIG = {
openai: 'gpt-4o',
gemini: 'gemini-2.0-flash',
github: 'github:copilot',
codex: 'gpt-5.4',
codex: 'gpt-5.5',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig