* feat(api): classify openai-compatible provider failures * Update src/services/api/providerConfig.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/errors.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * feat(api): harden openai-compatible diagnostics and env fallback * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/errors.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/errors.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Apply suggestion from @Copilot Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix openaiShim duplicate requests and diagnostics * remove unused url from http failure classifier * dedupe env diagnostic warnings * Remove hardcoded URLs from OpenAI error tests Removed hardcoded URLs from network failure classification tests. * Update providerConfig.envDiagnostics.test.ts * fix(openai-shim): return successful responses and restore localhost classifier tests * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * feat(provider): add truthful local generation readiness checks Implement Phase 2 provider readiness behavior by adding structured Ollama generation probes, wiring setup flows to readiness states, extending system-check with generation readiness output, and updating focused tests. * feat(api): add local self-healing fallback retries Implement Phase 3 self-healing behavior for local OpenAI-compatible providers: retry base URL fallbacks for localhost resolution and endpoint mismatches, plus capability-gated toolless retry for tool-incompatible local models; include diagnostics and focused tests. * fix(api): address review blockers for local provider reliability * Update src/utils/providerDiscovery.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix: harden readiness probes and cross-platform test stability * fix: refresh toolless retry payload and stabilize osc clipboard test * fix: harden Ollama readiness parsing and redact provider URLs --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
127 lines
4.2 KiB
TypeScript
127 lines
4.2 KiB
TypeScript
import { afterEach, expect, test } from 'bun:test'
|
|
|
|
import {
|
|
getAdditionalModelOptionsCacheScope,
|
|
getLocalProviderRetryBaseUrls,
|
|
isLocalProviderUrl,
|
|
resolveProviderRequest,
|
|
shouldAttemptLocalToollessRetry,
|
|
} from './providerConfig.js'
|
|
|
|
const originalEnv = {
|
|
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
|
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
|
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
|
}
|
|
|
|
afterEach(() => {
|
|
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
|
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
|
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
|
})
|
|
|
|
test('treats localhost endpoints as local', () => {
|
|
expect(isLocalProviderUrl('http://localhost:11434/v1')).toBe(true)
|
|
expect(isLocalProviderUrl('http://127.0.0.1:11434/v1')).toBe(true)
|
|
expect(isLocalProviderUrl('http://0.0.0.0:11434/v1')).toBe(true)
|
|
// Full 127.0.0.0/8 loopback range should be treated as local
|
|
expect(isLocalProviderUrl('http://127.0.0.2:11434/v1')).toBe(true)
|
|
expect(isLocalProviderUrl('http://127.1.2.3:11434/v1')).toBe(true)
|
|
expect(isLocalProviderUrl('http://127.255.255.255:11434/v1')).toBe(true)
|
|
})
|
|
|
|
test('treats private IPv4 endpoints as local', () => {
|
|
expect(isLocalProviderUrl('http://10.0.0.1:11434/v1')).toBe(true)
|
|
expect(isLocalProviderUrl('http://172.16.0.1:11434/v1')).toBe(true)
|
|
expect(isLocalProviderUrl('http://192.168.0.1:11434/v1')).toBe(true)
|
|
})
|
|
|
|
test('treats .local hostnames as local', () => {
|
|
expect(isLocalProviderUrl('http://ollama.local:11434/v1')).toBe(true)
|
|
})
|
|
|
|
test('treats private IPv6 endpoints as local', () => {
|
|
expect(isLocalProviderUrl('http://[fd00::1]:11434/v1')).toBe(true)
|
|
expect(isLocalProviderUrl('http://[fe80::1]:11434/v1')).toBe(true)
|
|
expect(isLocalProviderUrl('http://[::1]:11434/v1')).toBe(true)
|
|
})
|
|
|
|
test('treats public hosts as remote', () => {
|
|
expect(isLocalProviderUrl('http://203.0.113.1:11434/v1')).toBe(false)
|
|
expect(isLocalProviderUrl('https://example.com/v1')).toBe(false)
|
|
expect(isLocalProviderUrl('http://[2001:4860:4860::8888]:11434/v1')).toBe(false)
|
|
})
|
|
|
|
test('creates a cache scope for local openai-compatible providers', () => {
|
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
process.env.OPENAI_BASE_URL = 'http://localhost:1234/v1'
|
|
process.env.OPENAI_MODEL = 'llama-3.2-3b-instruct'
|
|
|
|
expect(getAdditionalModelOptionsCacheScope()).toBe(
|
|
'openai:http://localhost:1234/v1',
|
|
)
|
|
})
|
|
|
|
test('keeps codex alias models on chat completions for local openai-compatible providers', () => {
|
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
|
process.env.OPENAI_MODEL = 'gpt-5.4'
|
|
|
|
expect(resolveProviderRequest()).toMatchObject({
|
|
transport: 'chat_completions',
|
|
requestedModel: 'gpt-5.4',
|
|
resolvedModel: 'gpt-5.4',
|
|
baseUrl: 'http://127.0.0.1:8080/v1',
|
|
})
|
|
expect(getAdditionalModelOptionsCacheScope()).toBe(
|
|
'openai:http://127.0.0.1:8080/v1',
|
|
)
|
|
})
|
|
|
|
test('skips local model cache scope for remote openai-compatible providers', () => {
|
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
|
process.env.OPENAI_MODEL = 'gpt-4o'
|
|
|
|
expect(getAdditionalModelOptionsCacheScope()).toBeNull()
|
|
})
|
|
|
|
test('derives local retry base URLs with /v1 and loopback fallback candidates', () => {
|
|
expect(getLocalProviderRetryBaseUrls('http://localhost:11434')).toEqual([
|
|
'http://localhost:11434/v1',
|
|
'http://127.0.0.1:11434',
|
|
'http://127.0.0.1:11434/v1',
|
|
])
|
|
})
|
|
|
|
test('does not derive local retry base URLs for remote providers', () => {
|
|
expect(getLocalProviderRetryBaseUrls('https://api.openai.com/v1')).toEqual([])
|
|
})
|
|
|
|
test('enables local toolless retry for likely Ollama endpoints with tools', () => {
|
|
expect(
|
|
shouldAttemptLocalToollessRetry({
|
|
baseUrl: 'http://localhost:11434/v1',
|
|
hasTools: true,
|
|
}),
|
|
).toBe(true)
|
|
})
|
|
|
|
test('disables local toolless retry when no tools are present', () => {
|
|
expect(
|
|
shouldAttemptLocalToollessRetry({
|
|
baseUrl: 'http://localhost:11434/v1',
|
|
hasTools: false,
|
|
}),
|
|
).toBe(false)
|
|
})
|
|
|
|
test('disables local toolless retry for non-Ollama local endpoints', () => {
|
|
expect(
|
|
shouldAttemptLocalToollessRetry({
|
|
baseUrl: 'http://localhost:1234/v1',
|
|
hasTools: true,
|
|
}),
|
|
).toBe(false)
|
|
})
|