fix: use isEnvTruthy() for provider detection in context window lookup
Replace raw === '1' || === 'true' comparisons with isEnvTruthy() in context.ts for consistency with getAPIProvider() in providers.ts. This also covers the newly added CLAUDE_CODE_USE_GITHUB provider. Add native Gemini model entries (without google/ prefix) to both context window and max output token tables. Corrects gemini-2.5-pro and gemini-2.5-flash max output tokens to 65,536 (was 8,192/32,768).
This commit is contained in:
@@ -74,12 +74,9 @@ export function getContextWindowForModel(
|
|||||||
|
|
||||||
// OpenAI-compatible provider — use known context windows for the model
|
// OpenAI-compatible provider — use known context windows for the model
|
||||||
if (
|
if (
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI === '1' ||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI === 'true' ||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||||
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||||
process.env.CLAUDE_CODE_USE_GEMINI === 'true' ||
|
|
||||||
process.env.CLAUDE_CODE_USE_GITHUB === '1' ||
|
|
||||||
process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
|
||||||
) {
|
) {
|
||||||
const openaiWindow = getOpenAIContextWindow(model)
|
const openaiWindow = getOpenAIContextWindow(model)
|
||||||
if (openaiWindow !== undefined) {
|
if (openaiWindow !== undefined) {
|
||||||
@@ -180,12 +177,9 @@ export function getModelMaxOutputTokens(model: string): {
|
|||||||
|
|
||||||
// OpenAI-compatible provider — use known output limits to avoid 400 errors
|
// OpenAI-compatible provider — use known output limits to avoid 400 errors
|
||||||
if (
|
if (
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI === '1' ||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI === 'true' ||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||||
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||||
process.env.CLAUDE_CODE_USE_GEMINI === 'true' ||
|
|
||||||
process.env.CLAUDE_CODE_USE_GITHUB === '1' ||
|
|
||||||
process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
|
||||||
) {
|
) {
|
||||||
const openaiMax = getOpenAIMaxOutputTokens(model)
|
const openaiMax = getOpenAIMaxOutputTokens(model)
|
||||||
if (openaiMax !== undefined) {
|
if (openaiMax !== undefined) {
|
||||||
|
|||||||
@@ -44,6 +44,11 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
|||||||
'google/gemini-2.0-flash':1_048_576,
|
'google/gemini-2.0-flash':1_048_576,
|
||||||
'google/gemini-2.5-pro': 1_048_576,
|
'google/gemini-2.5-pro': 1_048_576,
|
||||||
|
|
||||||
|
// Google (native via CLAUDE_CODE_USE_GEMINI)
|
||||||
|
'gemini-2.0-flash': 1_048_576,
|
||||||
|
'gemini-2.5-pro': 1_048_576,
|
||||||
|
'gemini-2.5-flash': 1_048_576,
|
||||||
|
|
||||||
// Ollama local models
|
// Ollama local models
|
||||||
'llama3.3:70b': 8_192,
|
'llama3.3:70b': 8_192,
|
||||||
'llama3.1:8b': 8_192,
|
'llama3.1:8b': 8_192,
|
||||||
@@ -94,7 +99,12 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
|||||||
|
|
||||||
// Google (via OpenRouter)
|
// Google (via OpenRouter)
|
||||||
'google/gemini-2.0-flash': 8_192,
|
'google/gemini-2.0-flash': 8_192,
|
||||||
'google/gemini-2.5-pro': 32_768,
|
'google/gemini-2.5-pro': 65_536,
|
||||||
|
|
||||||
|
// Google (native via CLAUDE_CODE_USE_GEMINI)
|
||||||
|
'gemini-2.0-flash': 8_192,
|
||||||
|
'gemini-2.5-pro': 65_536,
|
||||||
|
'gemini-2.5-flash': 65_536,
|
||||||
|
|
||||||
// Ollama local models (conservative safe defaults)
|
// Ollama local models (conservative safe defaults)
|
||||||
'llama3.3:70b': 4_096,
|
'llama3.3:70b': 4_096,
|
||||||
|
|||||||
Reference in New Issue
Block a user