fix(api): consolidate 3P provider compatibility fixes
- Strip store field from request body for local providers (Ollama, vLLM) that reject unknown JSON fields with 400 errors - Add Gemini 3.x model context windows and output token limits (gemini-3-flash-preview, gemini-3.1-pro-preview, google/ OpenRouter variants) - Preserve reasoning_content on assistant tool-call message replays for providers that require it (Kimi k2.5, DeepSeek reasoner) - Use conservative max_output_tokens fallback (4096/16384) for unknown 3P models to prevent vLLM/Ollama 400 errors from exceeding max_model_len Consolidates fixes from: #258, #268, #237, #643, #666, #677 Co-authored-by: auriti <auriti@users.noreply.github.com> Co-authored-by: Gustavo-Falci <Gustavo-Falci@users.noreply.github.com> Co-authored-by: lttlin <lttlin@users.noreply.github.com> Co-authored-by: Durannd <Durannd@users.noreply.github.com>
This commit is contained in:
@@ -21,11 +21,11 @@ describe('Gemini store field fix', () => {
|
||||
test('isGeminiMode is imported and used in openaiShim', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// Verify the fix: store deletion should check for Gemini mode
|
||||
// Verify the fix: store deletion should check for Gemini mode and local providers
|
||||
expect(content).toContain('isGeminiMode()')
|
||||
expect(content).toContain("mistral and gemini don't recognize body.store")
|
||||
// Ensure the delete body.store is guarded for both Mistral and Gemini
|
||||
expect(content).toMatch(/isMistral\s*\|\|\s*isGeminiMode\(\)/)
|
||||
expect(content).toContain("Strip store for providers that don't recognize it")
|
||||
// Ensure the delete body.store is guarded for Mistral, Gemini, and local providers
|
||||
expect(content).toMatch(/isMistral\s*\|\|\s*isGeminiMode\(\)\s*\|\|\s*isLocal/)
|
||||
})
|
||||
|
||||
test('store: false is still set by default (OpenAI needs it)', async () => {
|
||||
|
||||
Reference in New Issue
Block a user