fix: use isEnvTruthy() for provider detection in context window lookup

Replace raw === '1' || === 'true' comparisons with isEnvTruthy() in
context.ts for consistency with getAPIProvider() in providers.ts.
This also covers the newly added CLAUDE_CODE_USE_GITHUB provider.

Add native Gemini model entries (without google/ prefix) to both
context window and max output token tables. Corrects gemini-2.5-pro
and gemini-2.5-flash max output tokens to 65,536 (was 8,192/32,768).
This commit is contained in:
Juan Camilo
2026-04-02 14:43:03 +02:00
parent 3d72d9e5e2
commit f385740bd6
2 changed files with 17 additions and 13 deletions

View File

@@ -44,6 +44,11 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'google/gemini-2.0-flash':1_048_576,
'google/gemini-2.5-pro': 1_048_576,
// Google (native via CLAUDE_CODE_USE_GEMINI)
'gemini-2.0-flash': 1_048_576,
'gemini-2.5-pro': 1_048_576,
'gemini-2.5-flash': 1_048_576,
// Ollama local models
'llama3.3:70b': 8_192,
'llama3.1:8b': 8_192,
@@ -94,7 +99,12 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
// Google (via OpenRouter)
'google/gemini-2.0-flash': 8_192,
'google/gemini-2.5-pro': 32_768,
'google/gemini-2.5-pro': 65_536,
// Google (native via CLAUDE_CODE_USE_GEMINI)
'gemini-2.0-flash': 8_192,
'gemini-2.5-pro': 65_536,
'gemini-2.5-flash': 65_536,
// Ollama local models (conservative safe defaults)
'llama3.3:70b': 4_096,