Provider loading fix (#623)
* add mistral and gemini provider type for profile provider field * load latest locally selected * env variables take precedence over json save * add gemini context windows and fix gemini defaulting for env * load on startup fix * fix failing tests * clarify test message * fix variable mismatches * fix failing test * delete keys and set profile.apiKey for mistral and gemini * switch model as well when switching provider * set model when adding a new model
This commit is contained in:
@@ -181,9 +181,11 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'google/gemini-2.5-pro': 1_048_576,
|
||||
|
||||
// Google (native via CLAUDE_CODE_USE_GEMINI)
|
||||
'gemini-2.0-flash': 1_048_576,
|
||||
'gemini-2.5-pro': 1_048_576,
|
||||
'gemini-2.5-flash': 1_048_576,
|
||||
'gemini-2.0-flash': 1_048_576,
|
||||
'gemini-2.5-pro': 1_048_576,
|
||||
'gemini-2.5-flash': 1_048_576,
|
||||
'gemini-3.1-pro': 1_048_576,
|
||||
'gemini-3.1-flash-lite-preview': 1_048_576,
|
||||
|
||||
// Ollama local models
|
||||
// Llama 3.1+ models support 128k context natively (Meta official specs).
|
||||
@@ -331,9 +333,11 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
'google/gemini-2.5-pro': 65_536,
|
||||
|
||||
// Google (native via CLAUDE_CODE_USE_GEMINI)
|
||||
'gemini-2.0-flash': 8_192,
|
||||
'gemini-2.5-pro': 65_536,
|
||||
'gemini-2.5-flash': 65_536,
|
||||
'gemini-2.0-flash': 8_192,
|
||||
'gemini-2.5-pro': 65_536,
|
||||
'gemini-2.5-flash': 65_536,
|
||||
'gemini-3.1-pro': 65_536,
|
||||
'gemini-3.1-flash-lite-preview': 65_536,
|
||||
|
||||
// Ollama local models (conservative safe defaults)
|
||||
'llama3.3:70b': 4_096,
|
||||
|
||||
Reference in New Issue
Block a user