diff --git a/src/utils/context.ts b/src/utils/context.ts index 7dba02b7..4eae1782 100644 --- a/src/utils/context.ts +++ b/src/utils/context.ts @@ -74,12 +74,9 @@ export function getContextWindowForModel( // OpenAI-compatible provider — use known context windows for the model if ( - process.env.CLAUDE_CODE_USE_OPENAI === '1' || - process.env.CLAUDE_CODE_USE_OPENAI === 'true' || - process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' || - process.env.CLAUDE_CODE_USE_GITHUB === '1' || - process.env.CLAUDE_CODE_USE_GITHUB === 'true' + isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ) { const openaiWindow = getOpenAIContextWindow(model) if (openaiWindow !== undefined) { @@ -180,12 +177,9 @@ export function getModelMaxOutputTokens(model: string): { // OpenAI-compatible provider — use known output limits to avoid 400 errors if ( - process.env.CLAUDE_CODE_USE_OPENAI === '1' || - process.env.CLAUDE_CODE_USE_OPENAI === 'true' || - process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' || - process.env.CLAUDE_CODE_USE_GITHUB === '1' || - process.env.CLAUDE_CODE_USE_GITHUB === 'true' + isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ) { const openaiMax = getOpenAIMaxOutputTokens(model) if (openaiMax !== undefined) { diff --git a/src/utils/model/openaiContextWindows.ts b/src/utils/model/openaiContextWindows.ts index 4a31a8e5..6cb12c37 100644 --- a/src/utils/model/openaiContextWindows.ts +++ b/src/utils/model/openaiContextWindows.ts @@ -44,6 +44,11 @@ const OPENAI_CONTEXT_WINDOWS: Record = { 'google/gemini-2.0-flash':1_048_576, 'google/gemini-2.5-pro': 1_048_576, + // Google (native via CLAUDE_CODE_USE_GEMINI) + 'gemini-2.0-flash': 1_048_576, + 'gemini-2.5-pro': 1_048_576, + 'gemini-2.5-flash': 1_048_576, + // Ollama local models 'llama3.3:70b': 8_192, 'llama3.1:8b': 8_192, @@ -94,7 +99,12 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record = { // Google (via OpenRouter) 'google/gemini-2.0-flash': 8_192, - 'google/gemini-2.5-pro': 32_768, + 'google/gemini-2.5-pro': 65_536, + + // Google (native via CLAUDE_CODE_USE_GEMINI) + 'gemini-2.0-flash': 8_192, + 'gemini-2.5-pro': 65_536, + 'gemini-2.5-flash': 65_536, // Ollama local models (conservative safe defaults) 'llama3.3:70b': 4_096,