diff --git a/src/utils/context.test.ts b/src/utils/context.test.ts index 59647054..a95b209e 100644 --- a/src/utils/context.test.ts +++ b/src/utils/context.test.ts @@ -35,3 +35,54 @@ test('deepseek-chat clamps oversized max output overrides to the provider limit' expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192) }) + +test('gpt-4o uses provider-specific context and output caps', () => { + process.env.CLAUDE_CODE_USE_OPENAI = '1' + delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS + + expect(getContextWindowForModel('gpt-4o')).toBe(128_000) + expect(getModelMaxOutputTokens('gpt-4o')).toEqual({ + default: 16_384, + upperLimit: 16_384, + }) + expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384) +}) + +test('gpt-4o clamps oversized max output overrides to the provider limit', () => { + process.env.CLAUDE_CODE_USE_OPENAI = '1' + process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000' + + expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384) +}) + +test('gpt-5.4 family uses provider-specific context and output caps', () => { + process.env.CLAUDE_CODE_USE_OPENAI = '1' + delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS + + expect(getContextWindowForModel('gpt-5.4')).toBe(1_050_000) + expect(getModelMaxOutputTokens('gpt-5.4')).toEqual({ + default: 128_000, + upperLimit: 128_000, + }) + + expect(getContextWindowForModel('gpt-5.4-mini')).toBe(400_000) + expect(getModelMaxOutputTokens('gpt-5.4-mini')).toEqual({ + default: 128_000, + upperLimit: 128_000, + }) + + expect(getContextWindowForModel('gpt-5.4-nano')).toBe(400_000) + expect(getModelMaxOutputTokens('gpt-5.4-nano')).toEqual({ + default: 128_000, + upperLimit: 128_000, + }) +}) + +test('gpt-5.4 family keeps large max output overrides within provider limits', () => { + process.env.CLAUDE_CODE_USE_OPENAI = '1' + process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '200000' + + expect(getMaxOutputTokensForModel('gpt-5.4')).toBe(128_000) + expect(getMaxOutputTokensForModel('gpt-5.4-mini')).toBe(128_000) + expect(getMaxOutputTokensForModel('gpt-5.4-nano')).toBe(128_000) +}) diff --git a/src/utils/model/openaiContextWindows.ts b/src/utils/model/openaiContextWindows.ts index 63ac5f2c..4a31a8e5 100644 --- a/src/utils/model/openaiContextWindows.ts +++ b/src/utils/model/openaiContextWindows.ts @@ -13,6 +13,9 @@ const OPENAI_CONTEXT_WINDOWS: Record = { // OpenAI + 'gpt-5.4': 1_050_000, + 'gpt-5.4-mini': 400_000, + 'gpt-5.4-nano': 400_000, 'gpt-4o': 128_000, 'gpt-4o-mini': 128_000, 'gpt-4.1': 1_047_576, @@ -62,6 +65,9 @@ const OPENAI_CONTEXT_WINDOWS: Record = { */ const OPENAI_MAX_OUTPUT_TOKENS: Record = { // OpenAI + 'gpt-5.4': 128_000, + 'gpt-5.4-mini': 128_000, + 'gpt-5.4-nano': 128_000, 'gpt-4o': 16_384, 'gpt-4o-mini': 16_384, 'gpt-4.1': 32_768,