fix: replace discontinued gemini-2.5-pro-preview-03-25 with stable gemini-2.5-pro (#802)

Updates both the model config mappings (configs.ts) and the runtime
fallback in getDefaultOpusModel() (model.ts) so Gemini mode no longer
falls back to the discontinued preview model when GEMINI_MODEL is unset.

Fixes #398
This commit is contained in:
Juan Camilo Auriti
2026-04-21 11:01:33 +02:00
committed by GitHub
parent 85eab2751e
commit 64582c119d
2 changed files with 6 additions and 6 deletions

View File

@@ -20,7 +20,7 @@ export const OPENAI_MODEL_DEFAULTS = {
// Override with GEMINI_MODEL env var. // Override with GEMINI_MODEL env var.
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
export const GEMINI_MODEL_DEFAULTS = { export const GEMINI_MODEL_DEFAULTS = {
opus: 'gemini-2.5-pro-preview-03-25', // most capable opus: 'gemini-2.5-pro', // most capable
sonnet: 'gemini-2.0-flash', // balanced sonnet: 'gemini-2.0-flash', // balanced
haiku: 'gemini-2.0-flash-lite', // fast & cheap haiku: 'gemini-2.0-flash-lite', // fast & cheap
} as const } as const
@@ -112,7 +112,7 @@ export const CLAUDE_OPUS_4_CONFIG = {
vertex: 'claude-opus-4@20250514', vertex: 'claude-opus-4@20250514',
foundry: 'claude-opus-4', foundry: 'claude-opus-4',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.5-pro-preview-03-25', gemini: 'gemini-2.5-pro',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct', 'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
@@ -125,7 +125,7 @@ export const CLAUDE_OPUS_4_1_CONFIG = {
vertex: 'claude-opus-4-1@20250805', vertex: 'claude-opus-4-1@20250805',
foundry: 'claude-opus-4-1', foundry: 'claude-opus-4-1',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.5-pro-preview-03-25', gemini: 'gemini-2.5-pro',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct', 'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
@@ -138,7 +138,7 @@ export const CLAUDE_OPUS_4_5_CONFIG = {
vertex: 'claude-opus-4-5@20251101', vertex: 'claude-opus-4-5@20251101',
foundry: 'claude-opus-4-5', foundry: 'claude-opus-4-5',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.5-pro-preview-03-25', gemini: 'gemini-2.5-pro',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct', 'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
@@ -151,7 +151,7 @@ export const CLAUDE_OPUS_4_6_CONFIG = {
vertex: 'claude-opus-4-6', vertex: 'claude-opus-4-6',
foundry: 'claude-opus-4-6', foundry: 'claude-opus-4-6',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.5-pro-preview-03-25', gemini: 'gemini-2.5-pro',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct', 'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',

View File

@@ -140,7 +140,7 @@ export function getDefaultOpusModel(): ModelName {
} }
// Gemini provider // Gemini provider
if (getAPIProvider() === 'gemini') { if (getAPIProvider() === 'gemini') {
return process.env.GEMINI_MODEL || 'gemini-2.5-pro-preview-03-25' return process.env.GEMINI_MODEL || 'gemini-2.5-pro'
} }
// Mistral provider // Mistral provider
if (getAPIProvider() === 'mistral') { if (getAPIProvider() === 'mistral') {