From f4818dc213de9a409447cb6afad8558c111384d7 Mon Sep 17 00:00:00 2001 From: Juan Camilo Date: Thu, 2 Apr 2026 14:41:40 +0200 Subject: [PATCH] fix: shim reliability and protocol compliance overhaul MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses the most critical remaining issues in the provider shim layer, building on top of #124 (recursive schema normalization + try/finally). openaiShim.ts: - Throw APIError via SDK factory instead of plain Error — enables retry on 429/503 (was completely broken: zero retries for all 3P providers) - Guard stop_reason !== null before emitting usage-only message_delta (Azure/Groq send usage before finish_reason) - Fix assistant content: join text parts instead of invalid as-string cast (Mistral rejects array content on assistant role) - Expose real HTTP Response in withResponse() for header inspection - Skip stream_options for local providers (Ollama < 0.5 compatibility) codexShim.ts: - Throw APIError at all 4 throw sites (HTTP + 3 streaming errors) - Add tool_choice 'none' mapping (was silently ignored) - Forward is_error flag with Error: prefix (matching openaiShim) --- src/services/api/codexShim.ts | 36 +++++++++++++++++++++------------ src/services/api/openaiShim.ts | 37 ++++++++++++++++++++++++++-------- 2 files changed, 52 insertions(+), 21 deletions(-) diff --git a/src/services/api/codexShim.ts b/src/services/api/codexShim.ts index c65abdf0..1a2c375c 100644 --- a/src/services/api/codexShim.ts +++ b/src/services/api/codexShim.ts @@ -1,3 +1,4 @@ +import { APIError } from '@anthropic-ai/sdk' import type { ResolvedCodexCredentials, ResolvedProviderRequest, @@ -234,7 +235,10 @@ export function convertAnthropicMessagesToResponsesInput( items.push({ type: 'function_call_output', call_id: callId, - output: convertToolResultToText(toolResult.content), + output: (() => { + const out = convertToolResultToText(toolResult.content) + return toolResult.is_error ? `Error: ${out}` : out + })(), }) } @@ -453,6 +457,7 @@ function convertToolChoice(toolChoice: unknown): unknown { if (!choice?.type) return undefined if (choice.type === 'auto') return 'auto' if (choice.type === 'any') return 'required' + if (choice.type === 'none') return 'none' if (choice.type === 'tool' && choice.name) { return { type: 'function', @@ -553,7 +558,13 @@ export async function performCodexRequest(options: { if (!response.ok) { const errorBody = await response.text().catch(() => 'unknown error') - throw new Error(`Codex API error ${response.status}: ${errorBody}`) + let errorResponse: object | undefined + try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ } + throw APIError.generate( + response.status, errorResponse, + `Codex API error ${response.status}: ${errorBody}`, + response.headers as unknown as Record, + ) } return response @@ -633,11 +644,9 @@ export async function collectCodexCompletedResponse( for await (const event of readSseEvents(response)) { if (event.event === 'response.failed') { - throw new Error( - event.data?.response?.error?.message ?? - event.data?.error?.message ?? - 'Codex response failed', - ) + const msg = event.data?.response?.error?.message ?? + event.data?.error?.message ?? 'Codex response failed' + throw APIError.generate(500, undefined, msg, {} as Record) } if ( @@ -650,7 +659,10 @@ export async function collectCodexCompletedResponse( } if (!completedResponse) { - throw new Error('Codex response ended without a completed payload') + throw APIError.generate( + 500, undefined, 'Codex response ended without a completed payload', + {} as Record, + ) } return completedResponse @@ -806,11 +818,9 @@ export async function* codexStreamToAnthropic( } if (event.event === 'response.failed') { - throw new Error( - payload?.response?.error?.message ?? - payload?.error?.message ?? - 'Codex response failed', - ) + const msg = payload?.response?.error?.message ?? + payload?.error?.message ?? 'Codex response failed' + throw APIError.generate(500, undefined, msg, {} as Record) } } diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 1f99b7c4..645e602b 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -21,6 +21,7 @@ * OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs */ +import { APIError } from '@anthropic-ai/sdk' import { isEnvTruthy } from '../../utils/envUtils.js' import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js' import { @@ -33,6 +34,7 @@ import { type ShimCreateParams, } from './codexShim.js' import { + isLocalProviderUrl, resolveCodexApiCredentials, resolveProviderRequest, } from './providerConfig.js' @@ -213,7 +215,10 @@ function convertMessages( const assistantMsg: OpenAIMessage = { role: 'assistant', - content: convertContentBlocks(textContent) as string, + content: (() => { + const c = convertContentBlocks(textContent) + return typeof c === 'string' ? c : Array.isArray(c) ? c.map((p: { text?: string }) => p.text ?? '').join('') : '' + })(), } if (toolUses.length > 0) { @@ -242,7 +247,10 @@ function convertMessages( } else { result.push({ role: 'assistant', - content: convertContentBlocks(content) as string, + content: (() => { + const c = convertContentBlocks(content) + return typeof c === 'string' ? c : Array.isArray(c) ? c.map((p: { text?: string }) => p.text ?? '').join('') : '' + })(), }) } } @@ -617,7 +625,8 @@ async function* openaiStreamToAnthropic( if ( !hasEmittedFinalUsage && chunkUsage && - (chunk.choices?.length ?? 0) === 0 + (chunk.choices?.length ?? 0) === 0 && + lastStopReason !== null ) { yield { type: 'message_delta', @@ -666,9 +675,12 @@ class OpenAIShimMessages { ) { const self = this + let httpResponse: Response | undefined + const promise = (async () => { const request = resolveProviderRequest({ model: params.model }) const response = await self._doRequest(request, params, options) + httpResponse = response if (params.stream) { return new OpenAIShimStream( @@ -695,8 +707,9 @@ class OpenAIShimMessages { const data = await promise return { data, - response: new Response(), - request_id: makeMessageId(), + response: httpResponse ?? new Response(), + request_id: + httpResponse?.headers.get('x-request-id') ?? makeMessageId(), } } @@ -774,7 +787,7 @@ class OpenAIShimMessages { body.max_completion_tokens = maxCompletionTokensValue } - if (params.stream) { + if (params.stream && !isLocalProviderUrl(request.baseUrl)) { body.stream_options = { include_usage: true } } @@ -890,12 +903,20 @@ class OpenAIShimMessages { const errorBody = await response.text().catch(() => 'unknown error') const rateHint = isGithub && response.status === 429 ? formatRetryAfterHint(response) : '' - throw new Error( + let errorResponse: object | undefined + try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ } + throw APIError.generate( + response.status, + errorResponse, `OpenAI API error ${response.status}: ${errorBody}${rateHint}`, + response.headers as unknown as Record, ) } - throw new Error('OpenAI shim: request loop exited unexpectedly') + throw APIError.generate( + 500, undefined, 'OpenAI shim: request loop exited unexpectedly', + {} as Record, + ) } private _convertNonStreamingResponse(