From 858f06d964a71fdc689da8f2edbb2aaab2f38f7c Mon Sep 17 00:00:00 2001 From: Juan Camilo Date: Fri, 3 Apr 2026 14:05:34 +0200 Subject: [PATCH] fix: strip Anthropic-specific params from 3P provider paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three silent failure modes affecting all third-party provider users: 1. Thinking blocks serialized as text corrupt multi-turn context — strip them instead of converting to raw text tags. 2. Unknown models fall through to 200k context window default, so auto-compact never triggers — use conservative 8k for unknown 3P models with a warning log. 3. Session resume with thinking blocks causes 400 or context corruption on 3P providers — strip thinking/redacted_thinking content blocks from deserialized messages when resuming against a non-Anthropic provider. Addresses findings 2, 3, and 5 from #248. --- src/services/api/openaiShim.ts | 10 ++++++---- src/utils/context.ts | 13 ++++++++++--- src/utils/conversationRecovery.ts | 31 ++++++++++++++++++++++++++++++- 3 files changed, 46 insertions(+), 8 deletions(-) diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 6e1c6b05..c1c3f3fd 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -195,10 +195,12 @@ function convertContentBlocks( // handled separately break case 'thinking': - // Append thinking as text with a marker for models that support reasoning - if (block.thinking) { - parts.push({ type: 'text', text: `${block.thinking}` }) - } + case 'redacted_thinking': + // Strip thinking blocks for OpenAI-compatible providers. + // These are Anthropic-specific content types that 3P providers + // don't understand. Serializing them as text corrupts + // multi-turn context: the model sees the tags as part of its + // previous reply and may mimic or misattribute them. break default: if (block.text) { diff --git a/src/utils/context.ts b/src/utils/context.ts index 28937dd7..24b5dd85 100644 --- a/src/utils/context.ts +++ b/src/utils/context.ts @@ -72,16 +72,23 @@ export function getContextWindowForModel( return 1_000_000 } - // OpenAI-compatible provider — use known context windows for the model - if ( + // OpenAI-compatible provider — use known context windows for the model. + // Unknown models get a conservative 8k default so auto-compact triggers + // before hitting a hard context_window_exceeded error (issue #248 finding 3). + const isOpenAIProvider = isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) - ) { + if (isOpenAIProvider) { const openaiWindow = getOpenAIContextWindow(model) if (openaiWindow !== undefined) { return openaiWindow } + console.error( + `[context] Warning: model "${model}" not in context window table — using conservative 8k default. ` + + 'Add it to src/utils/model/openaiContextWindows.ts for accurate compaction.', + ) + return 8_000 } const cap = getModelCapability(model) diff --git a/src/utils/conversationRecovery.ts b/src/utils/conversationRecovery.ts index 773490f1..3d4ad44b 100644 --- a/src/utils/conversationRecovery.ts +++ b/src/utils/conversationRecovery.ts @@ -24,6 +24,7 @@ import { type FileHistorySnapshot, } from './fileHistory.js' import { logError } from './log.js' +import { getAPIProvider } from './model/providers.js' import { createAssistantMessage, createUserMessage, @@ -177,6 +178,25 @@ export type DeserializeResult = { turnInterruptionState: TurnInterruptionState } +/** + * Remove thinking/redacted_thinking content blocks from assistant messages. + * Messages that become empty after stripping are removed entirely. + */ +function stripThinkingBlocks(messages: NormalizedMessage[]): NormalizedMessage[] { + return messages.reduce((acc, msg) => { + if (msg.type !== 'assistant' || !Array.isArray(msg.message?.content)) { + acc.push(msg) + return acc + } + const filtered = msg.message.content.filter( + (block: { type?: string }) => block.type !== 'thinking' && block.type !== 'redacted_thinking', + ) + if (filtered.length === 0) return acc + acc.push({ ...msg, message: { ...msg.message, content: filtered } }) + return acc + }, []) +} + /** * Deserializes messages from a log file into the format expected by the REPL. * Filters unresolved tool uses, orphaned thinking messages, and appends a @@ -227,10 +247,19 @@ export function deserializeMessagesWithInterruptDetection( filteredToolUses, ) as NormalizedMessage[] + // Strip thinking/redacted_thinking content blocks from assistant messages + // when resuming against a 3P provider. These Anthropic-specific blocks cause + // 400 errors or context corruption on OpenAI-compatible providers (issue #248 finding 5). + const provider = getAPIProvider() + const isThirdPartyProvider = provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex' && provider !== 'foundry' + const thinkingStripped = isThirdPartyProvider + ? stripThinkingBlocks(filteredThinking) + : filteredThinking + // Filter out assistant messages with only whitespace text content. // This can happen when model outputs "\n\n" before thinking, user cancels mid-stream. const filteredMessages = filterWhitespaceOnlyAssistantMessages( - filteredThinking, + thinkingStripped, ) as NormalizedMessage[] const internalState = detectTurnInterruption(filteredMessages)