test: add GPT-5.4 token coverage
This commit is contained in:
@@ -54,3 +54,35 @@ test('gpt-4o clamps oversized max output overrides to the provider limit', () =>
|
||||
|
||||
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
|
||||
})
|
||||
|
||||
test('gpt-5.4 family uses provider-specific context and output caps', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
|
||||
expect(getContextWindowForModel('gpt-5.4')).toBe(1_050_000)
|
||||
expect(getModelMaxOutputTokens('gpt-5.4')).toEqual({
|
||||
default: 128_000,
|
||||
upperLimit: 128_000,
|
||||
})
|
||||
|
||||
expect(getContextWindowForModel('gpt-5.4-mini')).toBe(400_000)
|
||||
expect(getModelMaxOutputTokens('gpt-5.4-mini')).toEqual({
|
||||
default: 128_000,
|
||||
upperLimit: 128_000,
|
||||
})
|
||||
|
||||
expect(getContextWindowForModel('gpt-5.4-nano')).toBe(400_000)
|
||||
expect(getModelMaxOutputTokens('gpt-5.4-nano')).toEqual({
|
||||
default: 128_000,
|
||||
upperLimit: 128_000,
|
||||
})
|
||||
})
|
||||
|
||||
test('gpt-5.4 family keeps large max output overrides within provider limits', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '200000'
|
||||
|
||||
expect(getMaxOutputTokensForModel('gpt-5.4')).toBe(128_000)
|
||||
expect(getMaxOutputTokensForModel('gpt-5.4-mini')).toBe(128_000)
|
||||
expect(getMaxOutputTokensForModel('gpt-5.4-nano')).toBe(128_000)
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user