feat(api): improve local provider reliability with readiness and self-healing (#738)
* feat(api): classify openai-compatible provider failures * Update src/services/api/providerConfig.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/errors.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * feat(api): harden openai-compatible diagnostics and env fallback * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/errors.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/errors.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Apply suggestion from @Copilot Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix openaiShim duplicate requests and diagnostics * remove unused url from http failure classifier * dedupe env diagnostic warnings * Remove hardcoded URLs from OpenAI error tests Removed hardcoded URLs from network failure classification tests. * Update providerConfig.envDiagnostics.test.ts * fix(openai-shim): return successful responses and restore localhost classifier tests * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * feat(provider): add truthful local generation readiness checks Implement Phase 2 provider readiness behavior by adding structured Ollama generation probes, wiring setup flows to readiness states, extending system-check with generation readiness output, and updating focused tests. * feat(api): add local self-healing fallback retries Implement Phase 3 self-healing behavior for local OpenAI-compatible providers: retry base URL fallbacks for localhost resolution and endpoint mismatches, plus capability-gated toolless retry for tool-incompatible local models; include diagnostics and focused tests. * fix(api): address review blockers for local provider reliability * Update src/utils/providerDiscovery.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/api/openaiShim.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix: harden readiness probes and cross-platform test stability * fix: refresh toolless retry payload and stabilize osc clipboard test * fix: harden Ollama readiness parsing and redact provider URLs --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
@@ -66,10 +66,44 @@ import {
|
||||
import {
|
||||
getOllamaChatBaseUrl,
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
hasLocalOllama,
|
||||
listOllamaModels,
|
||||
probeOllamaGenerationReadiness,
|
||||
type OllamaGenerationReadiness,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
|
||||
function describeOllamaReadinessIssue(
|
||||
readiness: OllamaGenerationReadiness,
|
||||
options?: {
|
||||
baseUrl?: string
|
||||
allowManualFallback?: boolean
|
||||
},
|
||||
): string {
|
||||
const endpoint = options?.baseUrl ?? 'http://localhost:11434'
|
||||
|
||||
if (readiness.state === 'unreachable') {
|
||||
return `Could not reach Ollama at ${endpoint}. Start Ollama first, then run /provider again.`
|
||||
}
|
||||
|
||||
if (readiness.state === 'no_models') {
|
||||
const manualSuffix = options?.allowManualFallback
|
||||
? ', or enter details manually'
|
||||
: ''
|
||||
return `Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first${manualSuffix}.`
|
||||
}
|
||||
|
||||
if (readiness.state === 'generation_failed') {
|
||||
const modelHint = readiness.probeModel ?? 'the selected model'
|
||||
const detailSuffix = readiness.detail
|
||||
? ` Details: ${readiness.detail}.`
|
||||
: ''
|
||||
const manualSuffix = options?.allowManualFallback
|
||||
? ' You can also enter details manually.'
|
||||
: ''
|
||||
return `Ollama is reachable and models are installed, but a generation probe failed for ${modelHint}.${detailSuffix} Run "ollama run ${modelHint}" once and retry.${manualSuffix}`
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
type ProviderChoice = 'auto' | ProviderProfile | 'codex-oauth' | 'clear'
|
||||
|
||||
type Step =
|
||||
@@ -715,6 +749,7 @@ function AutoRecommendationStep({
|
||||
| {
|
||||
state: 'openai'
|
||||
defaultModel: string
|
||||
reason: string
|
||||
}
|
||||
| {
|
||||
state: 'error'
|
||||
@@ -728,19 +763,27 @@ function AutoRecommendationStep({
|
||||
void (async () => {
|
||||
const defaultModel = getGoalDefaultOpenAIModel(goal)
|
||||
try {
|
||||
const ollamaAvailable = await hasLocalOllama()
|
||||
if (!ollamaAvailable) {
|
||||
const readiness = await probeOllamaGenerationReadiness()
|
||||
if (readiness.state !== 'ready') {
|
||||
if (!cancelled) {
|
||||
setStatus({ state: 'openai', defaultModel })
|
||||
setStatus({
|
||||
state: 'openai',
|
||||
defaultModel,
|
||||
reason: describeOllamaReadinessIssue(readiness),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels()
|
||||
const recommended = recommendOllamaModel(models, goal)
|
||||
const recommended = recommendOllamaModel(readiness.models, goal)
|
||||
if (!recommended) {
|
||||
if (!cancelled) {
|
||||
setStatus({ state: 'openai', defaultModel })
|
||||
setStatus({
|
||||
state: 'openai',
|
||||
defaultModel,
|
||||
reason:
|
||||
'Ollama responded to a generation probe, but no recommended chat model matched this goal.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -796,10 +839,10 @@ function AutoRecommendationStep({
|
||||
<Dialog title="Auto setup fallback" onCancel={onCancel}>
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>
|
||||
No viable local Ollama chat model was detected. Auto setup can
|
||||
continue into OpenAI-compatible setup with a default model of{' '}
|
||||
Auto setup can continue into OpenAI-compatible setup with a default model of{' '}
|
||||
{status.defaultModel}.
|
||||
</Text>
|
||||
<Text dimColor>{status.reason}</Text>
|
||||
<Select
|
||||
options={[
|
||||
{ label: 'Continue to OpenAI-compatible setup', value: 'continue' },
|
||||
@@ -883,32 +926,19 @@ function OllamaModelStep({
|
||||
let cancelled = false
|
||||
|
||||
void (async () => {
|
||||
const available = await hasLocalOllama()
|
||||
if (!available) {
|
||||
const readiness = await probeOllamaGenerationReadiness()
|
||||
if (readiness.state !== 'ready') {
|
||||
if (!cancelled) {
|
||||
setStatus({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Could not reach Ollama at http://localhost:11434. Start Ollama first, then run /provider again.',
|
||||
message: describeOllamaReadinessIssue(readiness),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels()
|
||||
if (models.length === 0) {
|
||||
if (!cancelled) {
|
||||
setStatus({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const ranked = rankOllamaModels(models, 'balanced')
|
||||
const recommended = recommendOllamaModel(models, 'balanced')
|
||||
const ranked = rankOllamaModels(readiness.models, 'balanced')
|
||||
const recommended = recommendOllamaModel(readiness.models, 'balanced')
|
||||
if (!cancelled) {
|
||||
setStatus({
|
||||
state: 'ready',
|
||||
|
||||
Reference in New Issue
Block a user