Merge pull request #59 from Vasanthdev2004/gpt4o-max-tokens-test

test: cover OpenAI max token caps for gpt-4o and GPT-5.4
This commit is contained in:
Kevin Codex
2026-04-02 08:24:25 +08:00
committed by GitHub
2 changed files with 57 additions and 0 deletions

View File

@@ -35,3 +35,54 @@ test('deepseek-chat clamps oversized max output overrides to the provider limit'
expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192)
})
test('gpt-4o uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('gpt-4o')).toBe(128_000)
expect(getModelMaxOutputTokens('gpt-4o')).toEqual({
default: 16_384,
upperLimit: 16_384,
})
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
})
test('gpt-4o clamps oversized max output overrides to the provider limit', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
})
test('gpt-5.4 family uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('gpt-5.4')).toBe(1_050_000)
expect(getModelMaxOutputTokens('gpt-5.4')).toEqual({
default: 128_000,
upperLimit: 128_000,
})
expect(getContextWindowForModel('gpt-5.4-mini')).toBe(400_000)
expect(getModelMaxOutputTokens('gpt-5.4-mini')).toEqual({
default: 128_000,
upperLimit: 128_000,
})
expect(getContextWindowForModel('gpt-5.4-nano')).toBe(400_000)
expect(getModelMaxOutputTokens('gpt-5.4-nano')).toEqual({
default: 128_000,
upperLimit: 128_000,
})
})
test('gpt-5.4 family keeps large max output overrides within provider limits', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '200000'
expect(getMaxOutputTokensForModel('gpt-5.4')).toBe(128_000)
expect(getMaxOutputTokensForModel('gpt-5.4-mini')).toBe(128_000)
expect(getMaxOutputTokensForModel('gpt-5.4-nano')).toBe(128_000)
})

View File

@@ -13,6 +13,9 @@
const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
// OpenAI
'gpt-5.4': 1_050_000,
'gpt-5.4-mini': 400_000,
'gpt-5.4-nano': 400_000,
'gpt-4o': 128_000,
'gpt-4o-mini': 128_000,
'gpt-4.1': 1_047_576,
@@ -62,6 +65,9 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
*/
const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
// OpenAI
'gpt-5.4': 128_000,
'gpt-5.4-mini': 128_000,
'gpt-5.4-nano': 128_000,
'gpt-4o': 16_384,
'gpt-4o-mini': 16_384,
'gpt-4.1': 32_768,