test: cover gpt-4o max token limits

This commit is contained in:
Vasanthdev2004
2026-04-01 21:59:21 +05:30
parent 8750f84464
commit 29493bde1a

View File

@@ -35,3 +35,22 @@ test('deepseek-chat clamps oversized max output overrides to the provider limit'
expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192) expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192)
}) })
test('gpt-4o uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('gpt-4o')).toBe(128_000)
expect(getModelMaxOutputTokens('gpt-4o')).toEqual({
default: 16_384,
upperLimit: 16_384,
})
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
})
test('gpt-4o clamps oversized max output overrides to the provider limit', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
})