From b4bd95b47715c9896240d708c106777507fd26ec Mon Sep 17 00:00:00 2001 From: KRATOS <84986124+gnanam1990@users.noreply.github.com> Date: Mon, 6 Apr 2026 19:38:45 +0530 Subject: [PATCH] fix: normalize malformed Bash tool arguments from OpenAI-compatible providers (#385) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: normalize malformed Bash tool arguments from OpenAI-compatible providers * fix: keep invalid Bash tool args from becoming commands * fix: preserve malformed Bash JSON literals * test: stabilize rebased PR 385 checks * test: isolate provider profile env assertions * fix: extend tool argument normalization to all tools and harden edge cases - Extend STRING_ARGUMENT_TOOL_FIELDS to normalize Read, Write, Edit, Glob, and Grep plain-string arguments (fixes "Invalid tool parameters" errors reported by VennDev) - Normalize streaming Bash args regardless of finish_reason, not only when finish_reason is 'tool_calls' - Broaden isLikelyStructuredObjectLiteral to catch malformed object-shaped strings like {command:"pwd"} and {'command':'pwd'} (fixes CR2 from Vasanthdev2004) - Apply blank/object-literal guard to all tools, not just Bash - Extract duplicated JSON repair suffix combinations into shared constant - Add 32 isolated unit tests for toolArgumentNormalization Co-Authored-By: Claude Opus 4.6 (1M context) * fix: skip streaming normalization on finish_reason length Truncated tool calls (finish_reason: 'length') now preserve the raw buffer instead of normalizing into executable commands, preventing incomplete commands from becoming runnable. Co-Authored-By: Claude Opus 4.6 (1M context) * fix: comprehensive tool argument normalization hardening - Remove all { raw: ... } returns that caused InputValidationError with z.strictObject schemas — return {} instead for clean Zod errors - Extend normalizeAtStop buffering to all mapped tools (Read, Write, Edit, Glob, Grep) so streaming paths also get normalized - Make repairPossiblyTruncatedObjectJson generic — repair any valid JSON object, not just ones with a command field - Export hasToolFieldMapping for streaming normalizeAtStop decision - Skip normalization on finish_reason: length to preserve raw truncated buffer - Update all test expectations to match new behavior Co-Authored-By: Claude Opus 4.6 (1M context) --------- Co-authored-by: Claude Opus 4.6 (1M context) --- src/ink/termio/osc.test.ts | 19 +- src/services/api/openaiShim.test.ts | 1119 +++++++++++++++++ src/services/api/openaiShim.ts | 97 +- .../api/toolArgumentNormalization.test.ts | 180 +++ src/services/api/toolArgumentNormalization.ts | 69 + src/utils/model/modelOptions.github.test.ts | 40 +- src/utils/providerFlag.test.ts | 32 +- src/utils/providerProfiles.test.ts | 36 +- 8 files changed, 1539 insertions(+), 53 deletions(-) create mode 100644 src/services/api/toolArgumentNormalization.test.ts create mode 100644 src/services/api/toolArgumentNormalization.ts diff --git a/src/ink/termio/osc.test.ts b/src/ink/termio/osc.test.ts index 46e0375f..7f4f4917 100644 --- a/src/ink/termio/osc.test.ts +++ b/src/ink/termio/osc.test.ts @@ -27,6 +27,21 @@ async function flushClipboardCopy(): Promise { await new Promise(resolve => setTimeout(resolve, 0)) } +async function waitForExecCall( + command: string, + attempts = 20, +): Promise<(typeof execFileNoThrowMock.mock.calls)[number] | undefined> { + for (let attempt = 0; attempt < attempts; attempt++) { + const call = execFileNoThrowMock.mock.calls.find(([cmd]) => cmd === command) + if (call) { + return call + } + await flushClipboardCopy() + } + + return undefined +} + describe('Windows clipboard fallback', () => { beforeEach(() => { execFileNoThrowMock.mockClear() @@ -62,9 +77,7 @@ describe('Windows clipboard fallback', () => { await setClipboard('Привет мир') await flushClipboardCopy() - const windowsCall = execFileNoThrowMock.mock.calls.find( - ([cmd]) => cmd === 'powershell', - ) + const windowsCall = await waitForExecCall('powershell') expect(windowsCall?.[2]).toMatchObject({ stdin: 'ignore', diff --git a/src/services/api/openaiShim.test.ts b/src/services/api/openaiShim.test.ts index bcfdef79..ebf0a9f3 100644 --- a/src/services/api/openaiShim.test.ts +++ b/src/services/api/openaiShim.test.ts @@ -500,6 +500,1125 @@ test('preserves Gemini tool call extra_content from streaming chunks', async () }) }) +test('normalizes plain string Bash tool arguments from OpenAI-compatible responses', async () => { + globalThis.fetch = (async (_input, _init) => { + return new Response( + JSON.stringify({ + id: 'chatcmpl-1', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + message: { + role: 'assistant', + tool_calls: [ + { + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: 'pwd', + }, + }, + ], + }, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const message = await client.beta.messages.create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: false, + }) as { + stop_reason?: string + content?: Array> + } + + expect(message.stop_reason).toBe('tool_use') + expect(message.content).toEqual([ + { + type: 'tool_use', + id: 'function-call-1', + name: 'Bash', + input: { command: 'pwd' }, + }, + ]) +}) + +test('normalizes Bash tool arguments that are valid JSON strings', async () => { + globalThis.fetch = (async (_input, _init) => { + return new Response( + JSON.stringify({ + id: 'chatcmpl-1', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + message: { + role: 'assistant', + tool_calls: [ + { + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: '"pwd"', + }, + }, + ], + }, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const message = await client.beta.messages.create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: false, + }) as { + content?: Array> + } + + expect(message.content).toEqual([ + { + type: 'tool_use', + id: 'function-call-1', + name: 'Bash', + input: { command: 'pwd' }, + }, + ]) +}) + +test.each([ + ['false', false], + ['null', null], + ['[]', []], +])( + 'preserves malformed Bash JSON literals as parsed values in non-streaming responses: %s', + async (argumentsValue, expectedInput) => { + globalThis.fetch = (async (_input, _init) => { + return new Response( + JSON.stringify({ + id: 'chatcmpl-1', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + message: { + role: 'assistant', + tool_calls: [ + { + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: argumentsValue, + }, + }, + ], + }, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const message = await client.beta.messages.create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: false, + }) as { + content?: Array> + } + + expect(message.content).toEqual([ + { + type: 'tool_use', + id: 'function-call-1', + name: 'Bash', + input: expectedInput, + }, + ]) + }, +) + +test('keeps terminal empty Bash tool arguments invalid in non-streaming responses', async () => { + globalThis.fetch = (async (_input, _init) => { + return new Response( + JSON.stringify({ + id: 'chatcmpl-1', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + message: { + role: 'assistant', + tool_calls: [ + { + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: '', + }, + }, + ], + }, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const message = await client.beta.messages.create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: false, + }) as { + content?: Array> + } + + expect(message.content).toEqual([ + { + type: 'tool_use', + id: 'function-call-1', + name: 'Bash', + input: {}, + }, + ]) +}) + +test('normalizes plain string Bash tool arguments in streaming responses', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: 'pwd', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const normalizedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(normalizedInput).toBe('{"command":"pwd"}') +}) + +test('normalizes plain string Bash tool arguments when streaming starts with an empty chunk', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: '', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + index: 0, + type: 'function', + function: { + arguments: 'pwd', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const normalizedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(normalizedInput).toBe('{"command":"pwd"}') +}) + +test('normalizes plain string Bash tool arguments when streaming starts with whitespace', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: ' ', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + index: 0, + type: 'function', + function: { + arguments: 'pwd', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const normalizedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(normalizedInput).toBe('{"command":" pwd"}') +}) + +test('keeps terminal whitespace-only Bash arguments invalid in streaming responses', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: ' ', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const normalizedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(normalizedInput).toBe('{}') +}) + +test('normalizes streaming Bash arguments that begin with bracket syntax', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: '[ -f package.json ] && pwd', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const normalizedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(normalizedInput).toBe('{"command":"[ -f package.json ] && pwd"}') +}) + +test('normalizes streaming Bash arguments when the first chunk is only an opening brace', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: '{', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + index: 0, + type: 'function', + function: { + arguments: ' pwd; }', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const normalizedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(normalizedInput).toBe('{"command":"{ pwd; }"}') +}) + +test('repairs truncated structured Bash JSON in streaming responses', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: '{"command":"pwd"', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const normalizedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(normalizedInput).toBe('{"command":"pwd"}') +}) + +test('does not normalize incomplete streamed Bash commands when finish_reason is length', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: 'rg --fi', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'length', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const streamedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(streamedInput).toBe('rg --fi') +}) + +test('repairs truncated JSON objects even without command field', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + function: { + name: 'Bash', + arguments: '{"cwd":"/tmp"', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const streamedInput = events + .filter( + event => + event.type === 'content_block_delta' && + typeof event.delta === 'object' && + event.delta !== null && + (event.delta as Record).type === 'input_json_delta', + ) + .map(event => (event.delta as Record).partial_json) + .join('') + + expect(streamedInput).toBe('{"cwd":"/tmp"}') +}) + +test('preserves raw input for unknown plain string tool arguments', async () => { + globalThis.fetch = (async (_input, _init) => { + return new Response( + JSON.stringify({ + id: 'chatcmpl-1', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + message: { + role: 'assistant', + tool_calls: [ + { + id: 'function-call-1', + type: 'function', + function: { + name: 'UnknownTool', + arguments: 'pwd', + }, + }, + ], + }, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const message = await client.beta.messages.create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use tool' }], + max_tokens: 64, + stream: false, + }) as { + content?: Array> + } + + expect(message.content).toEqual([ + { + type: 'tool_use', + id: 'function-call-1', + name: 'UnknownTool', + input: {}, + }, + ]) +}) + +test('preserves parsed string input for unknown JSON string tool arguments', async () => { + globalThis.fetch = (async (_input, _init) => { + return new Response( + JSON.stringify({ + id: 'chatcmpl-1', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + message: { + role: 'assistant', + tool_calls: [ + { + id: 'function-call-1', + type: 'function', + function: { + name: 'UnknownTool', + arguments: '"pwd"', + }, + }, + ], + }, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const message = await client.beta.messages.create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use tool' }], + max_tokens: 64, + stream: false, + }) as { + content?: Array> + } + + expect(message.content).toEqual([ + { + type: 'tool_use', + id: 'function-call-1', + name: 'UnknownTool', + input: 'pwd', + }, + ]) +}) + test('sanitizes malformed MCP tool schemas before sending them to OpenAI', async () => { let requestBody: Record | undefined diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 4450de96..348ad8af 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -42,6 +42,10 @@ import { } from './providerConfig.js' import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js' import { redactSecretValueForDisplay } from '../../utils/providerProfile.js' +import { + normalizeToolArguments, + hasToolFieldMapping, +} from './toolArgumentNormalization.js' type SecretValueSource = Partial<{ OPENAI_API_KEY: string @@ -514,6 +518,30 @@ function convertChunkUsage( } } +const JSON_REPAIR_SUFFIXES = [ + '}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}' +] + +function repairPossiblyTruncatedObjectJson(raw: string): string | null { + try { + const parsed = JSON.parse(raw) + return parsed && typeof parsed === 'object' && !Array.isArray(parsed) + ? raw + : null + } catch { + for (const combo of JSON_REPAIR_SUFFIXES) { + try { + const repaired = raw + combo + const parsed = JSON.parse(repaired) + if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) { + return repaired + } + } catch {} + } + return null + } +} + /** * Async generator that transforms an OpenAI SSE stream into * Anthropic-format BetaRawMessageStreamEvent objects. @@ -524,7 +552,16 @@ async function* openaiStreamToAnthropic( ): AsyncGenerator { const messageId = makeMessageId() let contentBlockIndex = 0 - const activeToolCalls = new Map() + const activeToolCalls = new Map< + number, + { + id: string + name: string + index: number + jsonBuffer: string + normalizeAtStop: boolean + } + >() let hasEmittedContentStart = false let hasEmittedThinkingStart = false let hasClosedThinking = false @@ -647,11 +684,14 @@ async function* openaiStreamToAnthropic( } const toolBlockIndex = contentBlockIndex + const initialArguments = tc.function.arguments ?? '' + const normalizeAtStop = hasToolFieldMapping(tc.function.name) activeToolCalls.set(tc.index, { id: tc.id, name: tc.function.name, index: toolBlockIndex, - jsonBuffer: tc.function.arguments ?? '', + jsonBuffer: initialArguments, + normalizeAtStop, }) yield { @@ -675,7 +715,7 @@ async function* openaiStreamToAnthropic( contentBlockIndex++ // Emit any initial arguments - if (tc.function.arguments) { + if (tc.function.arguments && !normalizeAtStop) { yield { type: 'content_block_delta', index: toolBlockIndex, @@ -692,6 +732,11 @@ async function* openaiStreamToAnthropic( if (tc.function.arguments) { active.jsonBuffer += tc.function.arguments } + + if (active.normalizeAtStop) { + continue + } + yield { type: 'content_block_delta', index: active.index, @@ -725,16 +770,44 @@ async function* openaiStreamToAnthropic( } // Close active tool calls for (const [, tc] of activeToolCalls) { + if (tc.normalizeAtStop) { + let partialJson: string + if (choice.finish_reason === 'length') { + // Truncated by max tokens — preserve raw buffer to avoid + // turning an incomplete tool call into an executable command + partialJson = tc.jsonBuffer + } else { + const repairedStructuredJson = repairPossiblyTruncatedObjectJson( + tc.jsonBuffer, + ) + if (repairedStructuredJson) { + partialJson = repairedStructuredJson + } else { + partialJson = JSON.stringify( + normalizeToolArguments(tc.name, tc.jsonBuffer), + ) + } + } + + yield { + type: 'content_block_delta', + index: tc.index, + delta: { + type: 'input_json_delta', + partial_json: partialJson, + }, + } + yield { type: 'content_block_stop', index: tc.index } + continue + } + let suffixToAdd = '' if (tc.jsonBuffer) { try { JSON.parse(tc.jsonBuffer) } catch { const str = tc.jsonBuffer.trimEnd() - const combinations = [ - '}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}' - ] - for (const combo of combinations) { + for (const combo of JSON_REPAIR_SUFFIXES) { try { JSON.parse(str + combo) suffixToAdd = combo @@ -1181,12 +1254,10 @@ class OpenAIShimMessages { if (choice?.message?.tool_calls) { for (const tc of choice.message.tool_calls) { - let input: unknown - try { - input = JSON.parse(tc.function.arguments) - } catch { - input = { raw: tc.function.arguments } - } + const input = normalizeToolArguments( + tc.function.name, + tc.function.arguments, + ) content.push({ type: 'tool_use', id: tc.id, diff --git a/src/services/api/toolArgumentNormalization.test.ts b/src/services/api/toolArgumentNormalization.test.ts new file mode 100644 index 00000000..1d12fc9c --- /dev/null +++ b/src/services/api/toolArgumentNormalization.test.ts @@ -0,0 +1,180 @@ +import { describe, expect, test } from 'bun:test' +import { normalizeToolArguments } from './toolArgumentNormalization' + +describe('normalizeToolArguments', () => { + describe('Bash tool', () => { + test('wraps plain string into { command }', () => { + expect(normalizeToolArguments('Bash', 'pwd')).toEqual({ command: 'pwd' }) + }) + + test('wraps multi-word command', () => { + expect(normalizeToolArguments('Bash', 'ls -la /tmp')).toEqual({ + command: 'ls -la /tmp', + }) + }) + + test('passes through structured JSON object', () => { + expect( + normalizeToolArguments('Bash', '{"command":"echo hi"}'), + ).toEqual({ command: 'echo hi' }) + }) + + test('returns empty object for blank string', () => { + expect(normalizeToolArguments('Bash', '')).toEqual({}) + expect(normalizeToolArguments('Bash', ' ')).toEqual({}) + }) + + test('returns parsed blank for JSON-encoded blank string', () => { + expect(normalizeToolArguments('Bash', '""')).toEqual('') + expect(normalizeToolArguments('Bash', '" "')).toEqual(' ') + }) + + test('returns empty object for malformed structured object literal', () => { + expect(normalizeToolArguments('Bash', '{ "command": "pwd"')).toEqual({}) + }) + + test.each([ + ['{command:"pwd"}'], + ["{'command':'pwd'}"], + ['{command: pwd}'], + ])( + 'returns empty object for malformed object-shaped string %s (does not wrap into command)', + (input) => { + expect(normalizeToolArguments('Bash', input)).toEqual({}) + }, + ) + + test.each([ + ['false', false], + ['null', null], + ['[]', [] as unknown[]], + ['0', 0], + ['true', true], + ['123', 123], + ])( + 'preserves JSON literal %s as-is (does not wrap into command)', + (input, expected) => { + expect(normalizeToolArguments('Bash', input)).toEqual(expected) + }, + ) + + test('wraps JSON-encoded string into { command }', () => { + expect(normalizeToolArguments('Bash', '"pwd"')).toEqual({ + command: 'pwd', + }) + }) + }) + + describe('undefined arguments', () => { + test('returns empty object for undefined', () => { + expect(normalizeToolArguments('Bash', undefined)).toEqual({}) + expect(normalizeToolArguments('UnknownTool', undefined)).toEqual({}) + }) + }) + + describe('Read tool', () => { + test('wraps plain string into { file_path }', () => { + expect(normalizeToolArguments('Read', '/home/user/file.txt')).toEqual({ + file_path: '/home/user/file.txt', + }) + }) + + test('wraps JSON-encoded string into { file_path }', () => { + expect(normalizeToolArguments('Read', '"/home/user/file.txt"')).toEqual({ + file_path: '/home/user/file.txt', + }) + }) + + test('passes through structured JSON object', () => { + expect( + normalizeToolArguments('Read', '{"file_path":"/tmp/f.txt","limit":10}'), + ).toEqual({ file_path: '/tmp/f.txt', limit: 10 }) + }) + }) + + describe('Write tool', () => { + test('wraps plain string into { file_path }', () => { + expect(normalizeToolArguments('Write', '/tmp/out.txt')).toEqual({ + file_path: '/tmp/out.txt', + }) + }) + + test('passes through structured JSON object', () => { + expect( + normalizeToolArguments( + 'Write', + '{"file_path":"/tmp/out.txt","content":"hello"}', + ), + ).toEqual({ file_path: '/tmp/out.txt', content: 'hello' }) + }) + }) + + describe('Edit tool', () => { + test('wraps plain string into { file_path }', () => { + expect(normalizeToolArguments('Edit', '/tmp/edit.ts')).toEqual({ + file_path: '/tmp/edit.ts', + }) + }) + + test('passes through structured JSON object', () => { + expect( + normalizeToolArguments( + 'Edit', + '{"file_path":"/tmp/f.ts","old_string":"a","new_string":"b"}', + ), + ).toEqual({ file_path: '/tmp/f.ts', old_string: 'a', new_string: 'b' }) + }) + }) + + describe('Glob tool', () => { + test('wraps plain string into { pattern }', () => { + expect(normalizeToolArguments('Glob', '**/*.ts')).toEqual({ + pattern: '**/*.ts', + }) + }) + + test('passes through structured JSON object', () => { + expect( + normalizeToolArguments('Glob', '{"pattern":"*.js","path":"/src"}'), + ).toEqual({ pattern: '*.js', path: '/src' }) + }) + }) + + describe('Grep tool', () => { + test('wraps plain string into { pattern }', () => { + expect(normalizeToolArguments('Grep', 'TODO')).toEqual({ + pattern: 'TODO', + }) + }) + + test('passes through structured JSON object', () => { + expect( + normalizeToolArguments('Grep', '{"pattern":"fixme","path":"/src"}'), + ).toEqual({ pattern: 'fixme', path: '/src' }) + }) + }) + + describe('unknown tools', () => { + test('returns empty object for plain string (no known field mapping)', () => { + expect(normalizeToolArguments('UnknownTool', 'some value')).toEqual({}) + }) + + test('passes through structured JSON object', () => { + expect( + normalizeToolArguments('UnknownTool', '{"key":"val"}'), + ).toEqual({ key: 'val' }) + }) + + test('preserves JSON literals as-is', () => { + expect(normalizeToolArguments('UnknownTool', 'false')).toEqual(false) + expect(normalizeToolArguments('UnknownTool', 'null')).toEqual(null) + expect(normalizeToolArguments('UnknownTool', '[]')).toEqual([]) + }) + + test('returns parsed string for JSON-encoded string on unknown tools', () => { + expect(normalizeToolArguments('UnknownTool', '"hello"')).toEqual( + 'hello', + ) + }) + }) +}) diff --git a/src/services/api/toolArgumentNormalization.ts b/src/services/api/toolArgumentNormalization.ts new file mode 100644 index 00000000..6052f823 --- /dev/null +++ b/src/services/api/toolArgumentNormalization.ts @@ -0,0 +1,69 @@ +const STRING_ARGUMENT_TOOL_FIELDS: Record = { + Bash: 'command', + Read: 'file_path', + Write: 'file_path', + Edit: 'file_path', + Glob: 'pattern', + Grep: 'pattern', +} + +function isBlankString(value: string): boolean { + return value.trim().length === 0 +} + +function isLikelyStructuredObjectLiteral(value: string): boolean { + // Match object-like patterns with key-value syntax: + // {"key":, {key:, {'key':, { "key" :, etc. + // But NOT bash compound commands like { pwd; } or { echo hi; } + return /^\s*\{\s*['"]?\w+['"]?\s*:/.test(value) +} + +function isRecord(value: unknown): value is Record { + return typeof value === 'object' && value !== null && !Array.isArray(value) +} + +function getPlainStringToolArgumentField(toolName: string): string | null { + return STRING_ARGUMENT_TOOL_FIELDS[toolName] ?? null +} + +export function hasToolFieldMapping(toolName: string): boolean { + return toolName in STRING_ARGUMENT_TOOL_FIELDS +} + +function wrapPlainStringToolArguments( + toolName: string, + value: string, +): Record | null { + const field = getPlainStringToolArgumentField(toolName) + if (!field) return null + return { [field]: value } +} + +export function normalizeToolArguments( + toolName: string, + rawArguments: string | undefined, +): unknown { + if (rawArguments === undefined) return {} + + try { + const parsed = JSON.parse(rawArguments) + if (isRecord(parsed)) { + return parsed + } + // Parsed as a non-object JSON value (string, number, boolean, null, array) + if (typeof parsed === 'string' && !isBlankString(parsed)) { + return wrapPlainStringToolArguments(toolName, parsed) ?? parsed + } + // For blank strings, booleans, null, arrays — pass through as-is + // and let Zod schema validation produce a meaningful error + return parsed + } catch { + // rawArguments is not valid JSON — treat as a plain string + if (isBlankString(rawArguments) || isLikelyStructuredObjectLiteral(rawArguments)) { + // Blank or looks like a malformed object literal — don't wrap into + // a tool field to avoid turning garbage into executable input + return {} + } + return wrapPlainStringToolArguments(toolName, rawArguments) ?? {} + } +} diff --git a/src/utils/model/modelOptions.github.test.ts b/src/utils/model/modelOptions.github.test.ts index 340e99c5..ef9ee48f 100644 --- a/src/utils/model/modelOptions.github.test.ts +++ b/src/utils/model/modelOptions.github.test.ts @@ -1,7 +1,17 @@ -import { afterEach, expect, mock, test } from 'bun:test' +import { afterEach, beforeEach, expect, mock, test } from 'bun:test' +import { resetModelStringsForTestingOnly } from '../../bootstrap/state.js' import { saveGlobalConfig } from '../config.js' +async function importFreshModelOptionsModule() { + mock.restore() + mock.module('./providers.js', () => ({ + getAPIProvider: () => 'github', + })) + const nonce = `${Date.now()}-${Math.random()}` + return import(`./modelOptions.js?ts=${nonce}`) +} + const originalEnv = { CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI, @@ -14,6 +24,20 @@ const originalEnv = { ANTHROPIC_CUSTOM_MODEL_OPTION: process.env.ANTHROPIC_CUSTOM_MODEL_OPTION, } +beforeEach(() => { + mock.restore() + delete process.env.CLAUDE_CODE_USE_GITHUB + delete process.env.CLAUDE_CODE_USE_OPENAI + delete process.env.CLAUDE_CODE_USE_GEMINI + delete process.env.CLAUDE_CODE_USE_BEDROCK + delete process.env.CLAUDE_CODE_USE_VERTEX + delete process.env.CLAUDE_CODE_USE_FOUNDRY + delete process.env.OPENAI_MODEL + delete process.env.OPENAI_BASE_URL + delete process.env.ANTHROPIC_CUSTOM_MODEL_OPTION + resetModelStringsForTestingOnly() +}) + afterEach(() => { process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI @@ -34,17 +58,9 @@ afterEach(() => { providerProfiles: [], activeProviderProfileId: undefined, })) + resetModelStringsForTestingOnly() }) -async function importFreshModelOptionsModule() { - mock.restore() - mock.module('./providers.js', () => ({ - getAPIProvider: () => 'github', - })) - const nonce = `${Date.now()}-${Math.random()}` - return import(`./modelOptions.js?ts=${nonce}`) -} - test('GitHub provider exposes only default + GitHub model in /model options', async () => { process.env.CLAUDE_CODE_USE_GITHUB = '1' delete process.env.CLAUDE_CODE_USE_OPENAI @@ -58,7 +74,9 @@ test('GitHub provider exposes only default + GitHub model in /model options', as const { getModelOptions } = await importFreshModelOptionsModule() const options = getModelOptions(false) - const nonDefault = options.filter(option => option.value !== null) + const nonDefault = options.filter( + (option: { value: unknown }) => option.value !== null, + ) expect(nonDefault.length).toBe(1) expect(nonDefault[0]?.value).toBe('github:copilot') diff --git a/src/utils/providerFlag.test.ts b/src/utils/providerFlag.test.ts index ce58fff6..db38fbc2 100644 --- a/src/utils/providerFlag.test.ts +++ b/src/utils/providerFlag.test.ts @@ -1,4 +1,4 @@ -import { describe, expect, test, afterEach } from 'bun:test' +import { afterEach, beforeEach, describe, expect, test } from 'bun:test' import { parseProviderFlag, applyProviderFlag, @@ -8,18 +8,26 @@ import { const originalEnv = { ...process.env } +const RESET_KEYS = [ + 'CLAUDE_CODE_USE_OPENAI', + 'CLAUDE_CODE_USE_GEMINI', + 'CLAUDE_CODE_USE_GITHUB', + 'CLAUDE_CODE_USE_BEDROCK', + 'CLAUDE_CODE_USE_VERTEX', + 'OPENAI_BASE_URL', + 'OPENAI_API_KEY', + 'OPENAI_MODEL', + 'GEMINI_MODEL', +] as const + +beforeEach(() => { + for (const key of RESET_KEYS) { + delete process.env[key] + } +}) + afterEach(() => { - for (const key of [ - 'CLAUDE_CODE_USE_OPENAI', - 'CLAUDE_CODE_USE_GEMINI', - 'CLAUDE_CODE_USE_GITHUB', - 'CLAUDE_CODE_USE_BEDROCK', - 'CLAUDE_CODE_USE_VERTEX', - 'OPENAI_BASE_URL', - 'OPENAI_API_KEY', - 'OPENAI_MODEL', - 'GEMINI_MODEL', - ]) { + for (const key of RESET_KEYS) { if (originalEnv[key] === undefined) delete process.env[key] else process.env[key] = originalEnv[key] } diff --git a/src/utils/providerProfiles.test.ts b/src/utils/providerProfiles.test.ts index 40521960..917f8dd6 100644 --- a/src/utils/providerProfiles.test.ts +++ b/src/utils/providerProfiles.test.ts @@ -2,6 +2,10 @@ import { afterEach, describe, expect, mock, test } from 'bun:test' import type { ProviderProfile } from './config.js' +async function importFreshProvidersModule() { + return import(`./model/providers.ts?ts=${Date.now()}-${Math.random()}`) +} + const originalEnv = { ...process.env } const RESTORED_KEYS = [ @@ -96,24 +100,26 @@ function buildProfile(overrides: Partial = {}): ProviderProfile describe('applyProviderProfileToProcessEnv', () => { test('openai profile clears competing gemini/github flags', async () => { - const { applyProviderProfileToProcessEnv, getAPIProvider } = + const { applyProviderProfileToProcessEnv } = await importFreshProviderProfileModules() process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1' applyProviderProfileToProcessEnv(buildProfile()) + const { getAPIProvider: getFreshAPIProvider } = + await importFreshProvidersModule() expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined() expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined() - expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1') + expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1') expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe( 'provider_test', ) - expect(getAPIProvider()).toBe('openai') + expect(getFreshAPIProvider()).toBe('openai') }) test('anthropic profile clears competing gemini/github flags', async () => { - const { applyProviderProfileToProcessEnv, getAPIProvider } = + const { applyProviderProfileToProcessEnv } = await importFreshProviderProfileModules() process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1' @@ -125,11 +131,13 @@ describe('applyProviderProfileToProcessEnv', () => { model: 'claude-sonnet-4-6', }), ) + const { getAPIProvider: getFreshAPIProvider } = + await importFreshProvidersModule() expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined() expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined() expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined() - expect(getAPIProvider()).toBe('firstParty') + expect(getFreshAPIProvider()).toBe('firstParty') }) }) @@ -177,7 +185,7 @@ describe('applyActiveProviderProfileFromConfig', () => { } as any) expect(applied).toBeUndefined() - expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1') + expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1') expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1') expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') }) @@ -267,7 +275,7 @@ describe('applyActiveProviderProfileFromConfig', () => { } as any) expect(applied?.id).toBe('saved_openai') - expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1') + expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1') expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1') expect(process.env.OPENAI_MODEL).toBe('gpt-4o') }) @@ -286,10 +294,10 @@ describe('persistActiveProviderProfileModel', () => { model: 'kimi-k2.5:cloud', }) - saveMockGlobalConfig(current => ({ - ...current, - providerProfiles: [activeProfile], - activeProviderProfileId: activeProfile.id, + saveMockGlobalConfig(current => ({ + ...current, + providerProfiles: [activeProfile], + activeProviderProfileId: activeProfile.id, })) applyProviderProfileToProcessEnv(activeProfile) @@ -303,7 +311,7 @@ describe('persistActiveProviderProfileModel', () => { ) const saved = getProviderProfiles().find( - profile => profile.id === activeProfile.id, + (profile: ProviderProfile) => profile.id === activeProfile.id, ) expect(saved?.model).toBe('minimax-m2.5:cloud') }) @@ -333,7 +341,7 @@ describe('persistActiveProviderProfileModel', () => { expect(process.env.OPENAI_MODEL).toBe('cli-model') const saved = getProviderProfiles().find( - profile => profile.id === activeProfile.id, + (profile: ProviderProfile) => profile.id === activeProfile.id, ) expect(saved?.model).toBe('minimax-m2.5:cloud') }) @@ -414,7 +422,7 @@ describe('deleteProviderProfile', () => { expect(result.activeProfileId).toBeUndefined() expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined() - expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1') + expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1') expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1') expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') })