Fix/openclaude diagnostics settings (#483)
* fix: use openclaude paths in diagnostics and settings * fix: strip leaked reasoning from assistant output * fix: preserve legacy claude config compatibility * fix: tighten path and reasoning compatibility * fix: buffer streamed reasoning leak preambles * test: cover openclaude migration and reasoning fixes * test: isolate execFileNoThrow from cross-file mocks
This commit is contained in:
@@ -26,6 +26,11 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { resolveGeminiCredential } from '../../utils/geminiAuth.js'
|
||||
import { hydrateGeminiAccessTokenFromSecureStorage } from '../../utils/geminiCredentials.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
import {
|
||||
codexStreamToAnthropic,
|
||||
collectCodexCompletedResponse,
|
||||
@@ -588,6 +593,8 @@ async function* openaiStreamToAnthropic(
|
||||
let hasEmittedContentStart = false
|
||||
let hasEmittedThinkingStart = false
|
||||
let hasClosedThinking = false
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||
let hasEmittedFinalUsage = false
|
||||
let hasProcessedFinishReason = false
|
||||
@@ -618,6 +625,30 @@ async function* openaiStreamToAnthropic(
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
const closeActiveContentBlock = async function* () {
|
||||
if (!hasEmittedContentStart) return
|
||||
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'text_delta', text: sanitized },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
@@ -672,6 +703,7 @@ async function* openaiStreamToAnthropic(
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
activeTextBuffer += delta.content
|
||||
if (!hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
@@ -680,6 +712,35 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
hasEmittedContentStart = true
|
||||
}
|
||||
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
},
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
@@ -698,12 +759,7 @@ async function* openaiStreamToAnthropic(
|
||||
hasClosedThinking = true
|
||||
}
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
yield* closeActiveContentBlock()
|
||||
}
|
||||
|
||||
const toolBlockIndex = contentBlockIndex
|
||||
@@ -786,10 +842,7 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
// Close any open content blocks
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
yield* closeActiveContentBlock()
|
||||
}
|
||||
// Close active tool calls
|
||||
for (const [, tc] of activeToolCalls) {
|
||||
@@ -1383,9 +1436,9 @@ class OpenAIShimMessages {
|
||||
const choice = data.choices?.[0]
|
||||
const content: Array<Record<string, unknown>> = []
|
||||
|
||||
// Some reasoning models (e.g. GLM-5) put their reply in reasoning_content
|
||||
// while content stays null — emit reasoning as a thinking block, then
|
||||
// fall back to it for visible text if content is empty.
|
||||
// Some reasoning models (e.g. GLM-5) put their chain-of-thought in
|
||||
// reasoning_content while content stays null. Preserve it as a thinking
|
||||
// block, but do not surface it as visible assistant text.
|
||||
const reasoningText = choice?.message?.reasoning_content
|
||||
if (typeof reasoningText === 'string' && reasoningText) {
|
||||
content.push({ type: 'thinking', thinking: reasoningText })
|
||||
@@ -1393,9 +1446,12 @@ class OpenAIShimMessages {
|
||||
const rawContent =
|
||||
choice?.message?.content !== '' && choice?.message?.content != null
|
||||
? choice?.message?.content
|
||||
: choice?.message?.reasoning_content
|
||||
: null
|
||||
if (typeof rawContent === 'string' && rawContent) {
|
||||
content.push({ type: 'text', text: rawContent })
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(rawContent),
|
||||
})
|
||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||
const parts: string[] = []
|
||||
for (const part of rawContent) {
|
||||
@@ -1410,7 +1466,10 @@ class OpenAIShimMessages {
|
||||
}
|
||||
const joined = parts.join('\n')
|
||||
if (joined) {
|
||||
content.push({ type: 'text', text: joined })
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(joined),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user