Files
orcs-code/src/services/api/openaiShim.test.ts
ibaaaaal 07621a6f8d fix: scrub canonical Anthropic headers from 3P shim requests (#499)
* Stop canonical Anthropic headers from leaking into 3P shim requests

The remaining blocker from PR #268 was that canonical Anthropic headers such as
`anthropic-version` and `anthropic-beta` could still ride through supported 3P
paths even after the earlier x-anthropic/x-claude scrubber work. This tightens
header filtering inside the shim itself so direct defaultHeaders, env-driven
client setup, providerOverride routing, and per-request header injection all
share the same scrubber.

Constraint: Preserve non-Anthropic custom headers and provider auth while stripping only Anthropic/OpenClaude-internal headers from 3P requests
Rejected: Rely on client.ts filtering alone | direct shim construction and per-request headers would still leave gaps
Confidence: high
Scope-risk: narrow
Reversibility: clean
Directive: Keep header scrubbing centralized in the shim so new call paths do not reopen 3P leakage bugs
Tested: bun test src/services/api/openaiShim.test.ts src/services/api/client.test.ts src/utils/context.test.ts
Tested: bun run test:provider
Tested: bun run build && node dist/cli.mjs --version
Not-tested: bun run typecheck (repository baseline currently fails in many unrelated files)

* Keep OpenAI client tests from restoring undefined env as strings

The new header-leak regression tests in client.test.ts restored environment
variables via direct assignment, which can leave literal "undefined" strings in
process.env when the original value was unset. This switches the teardown over
to the same restore helper pattern already used in openaiShim.test.ts.

Constraint: Keep the fix limited to test hygiene without altering runtime behavior
Rejected: Restore only the two env vars Copilot called out | using one helper for all test env restores is simpler and less error-prone
Confidence: high
Scope-risk: narrow
Reversibility: clean
Directive: Use restore helpers for env teardown in tests so unset values stay deleted instead of becoming the string "undefined"
Tested: bun test src/services/api/client.test.ts src/services/api/openaiShim.test.ts src/utils/context.test.ts
Not-tested: Full provider suite (unchanged runtime path)

* Prevent GitHub Codex requests from forwarding unsanitized Anthropic headers

A base-sync with upstream exposed a separate GitHub+Codex transport branch
that still merged per-request headers raw before adding Copilot headers.
This keeps the filter aligned across Codex-family paths and adds explicit
regression tests for GitHub Codex routing, including providerOverride.

Constraint: Must not push or modify GitHub state while validating the reviewer concern
Rejected: Leave the GitHub Codex path unchanged | runtime repro showed anthropic-* headers still leaked after the upstream sync
Confidence: high
Scope-risk: narrow
Directive: Keep header scrubbing consistent across every Codex-family transport branch when provider routing changes
Tested: bun test src/services/api/openaiShim.test.ts
Tested: bun test src/services/api/client.test.ts src/services/api/codexShim.test.ts src/services/api/providerConfig.github.test.ts
Tested: bun run build
Not-tested: Full repository test suite
2026-04-10 21:56:40 +08:00

2579 lines
68 KiB
TypeScript

import { afterEach, beforeEach, expect, test } from 'bun:test'
import { createOpenAIShimClient } from './openaiShim.ts'
type FetchType = typeof globalThis.fetch
const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
OPENAI_MODEL: process.env.OPENAI_MODEL,
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
GH_TOKEN: process.env.GH_TOKEN,
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
GEMINI_API_KEY: process.env.GEMINI_API_KEY,
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
GEMINI_ACCESS_TOKEN: process.env.GEMINI_ACCESS_TOKEN,
GEMINI_AUTH_MODE: process.env.GEMINI_AUTH_MODE,
GEMINI_BASE_URL: process.env.GEMINI_BASE_URL,
GEMINI_MODEL: process.env.GEMINI_MODEL,
GOOGLE_CLOUD_PROJECT: process.env.GOOGLE_CLOUD_PROJECT,
ANTHROPIC_CUSTOM_HEADERS: process.env.ANTHROPIC_CUSTOM_HEADERS,
}
const originalFetch = globalThis.fetch
function restoreEnv(key: string, value: string | undefined): void {
if (value === undefined) {
delete process.env[key]
} else {
process.env[key] = value
}
}
type OpenAIShimClient = {
beta: {
messages: {
create: (
params: Record<string, unknown>,
options?: Record<string, unknown>,
) => Promise<unknown> & {
withResponse: () => Promise<{ data: AsyncIterable<Record<string, unknown>> }>
}
}
}
}
function makeSseResponse(lines: string[]): Response {
const encoder = new TextEncoder()
return new Response(
new ReadableStream({
start(controller) {
for (const line of lines) {
controller.enqueue(encoder.encode(line))
}
controller.close()
},
}),
{
headers: {
'Content-Type': 'text/event-stream',
},
},
)
}
function makeStreamChunks(chunks: unknown[]): string[] {
return [
...chunks.map(chunk => `data: ${JSON.stringify(chunk)}\n\n`),
'data: [DONE]\n\n',
]
}
beforeEach(() => {
process.env.OPENAI_BASE_URL = 'http://example.test/v1'
process.env.OPENAI_API_KEY = 'test-key'
delete process.env.OPENAI_MODEL
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.GEMINI_API_KEY
delete process.env.GOOGLE_API_KEY
delete process.env.GEMINI_ACCESS_TOKEN
delete process.env.GEMINI_AUTH_MODE
delete process.env.GEMINI_BASE_URL
delete process.env.GEMINI_MODEL
delete process.env.GOOGLE_CLOUD_PROJECT
delete process.env.ANTHROPIC_CUSTOM_HEADERS
})
afterEach(() => {
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY)
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
restoreEnv('CLAUDE_CODE_USE_GITHUB', originalEnv.CLAUDE_CODE_USE_GITHUB)
restoreEnv('GITHUB_TOKEN', originalEnv.GITHUB_TOKEN)
restoreEnv('GH_TOKEN', originalEnv.GH_TOKEN)
restoreEnv('CLAUDE_CODE_USE_OPENAI', originalEnv.CLAUDE_CODE_USE_OPENAI)
restoreEnv('CLAUDE_CODE_USE_GEMINI', originalEnv.CLAUDE_CODE_USE_GEMINI)
restoreEnv('GEMINI_API_KEY', originalEnv.GEMINI_API_KEY)
restoreEnv('GOOGLE_API_KEY', originalEnv.GOOGLE_API_KEY)
restoreEnv('GEMINI_ACCESS_TOKEN', originalEnv.GEMINI_ACCESS_TOKEN)
restoreEnv('GEMINI_AUTH_MODE', originalEnv.GEMINI_AUTH_MODE)
restoreEnv('GEMINI_BASE_URL', originalEnv.GEMINI_BASE_URL)
restoreEnv('GEMINI_MODEL', originalEnv.GEMINI_MODEL)
restoreEnv('GOOGLE_CLOUD_PROJECT', originalEnv.GOOGLE_CLOUD_PROJECT)
restoreEnv('ANTHROPIC_CUSTOM_HEADERS', originalEnv.ANTHROPIC_CUSTOM_HEADERS)
globalThis.fetch = originalFetch
})
test('strips canonical Anthropic headers from direct shim defaultHeaders', async () => {
let capturedHeaders: Headers | undefined
globalThis.fetch = (async (_input, init) => {
capturedHeaders = new Headers(init?.headers)
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'gpt-4o',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 8,
completion_tokens: 3,
total_tokens: 11,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({
defaultHeaders: {
'anthropic-version': '2023-06-01',
'anthropic-beta': 'prompt-caching-2024-07-31',
'x-anthropic-additional-protection': 'true',
'x-claude-remote-session-id': 'remote-123',
'x-app': 'cli',
'x-client-app': 'sdk',
'x-safe-header': 'keep-me',
},
}) as OpenAIShimClient
await client.beta.messages.create({
model: 'gpt-4o',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
})
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
expect(capturedHeaders?.get('x-app')).toBeNull()
expect(capturedHeaders?.get('x-client-app')).toBeNull()
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
})
test('strips canonical Anthropic headers from per-request shim headers too', async () => {
let capturedHeaders: Headers | undefined
globalThis.fetch = (async (_input, init) => {
capturedHeaders = new Headers(init?.headers)
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'gpt-4o',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 8,
completion_tokens: 3,
total_tokens: 11,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create(
{
model: 'gpt-4o',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
},
{
headers: {
'anthropic-version': '2023-06-01',
'anthropic-beta': 'prompt-caching-2024-07-31',
'x-safe-header': 'keep-me',
},
},
)
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
})
test('strips Anthropic-specific headers on GitHub Codex transport requests', async () => {
let capturedHeaders: Headers | undefined
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_API_KEY = 'github-test-key'
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_MODEL
globalThis.fetch = (async (_input, init) => {
capturedHeaders = new Headers(init?.headers)
return new Response('', {
status: 200,
headers: {
'Content-Type': 'text/event-stream',
},
})
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create(
{
model: 'github:gpt-5-codex',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: true,
},
{
headers: {
'anthropic-version': '2023-06-01',
'anthropic-beta': 'prompt-caching-2024-07-31',
'x-anthropic-additional-protection': 'true',
'x-safe-header': 'keep-me',
},
},
)
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
expect(capturedHeaders?.get('authorization')).toBe('Bearer github-test-key')
expect(capturedHeaders?.get('editor-plugin-version')).toBe('copilot-chat/0.26.7')
})
test('strips Anthropic-specific headers on GitHub Codex transport with providerOverride API key', async () => {
let capturedHeaders: Headers | undefined
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_API_KEY = 'env-should-not-win'
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_MODEL
globalThis.fetch = (async (_input, init) => {
capturedHeaders = new Headers(init?.headers)
return new Response('', {
status: 200,
headers: {
'Content-Type': 'text/event-stream',
},
})
}) as FetchType
const client = createOpenAIShimClient({
providerOverride: {
model: 'github:gpt-5-codex',
baseURL: 'https://api.githubcopilot.com',
apiKey: 'provider-override-key',
},
}) as OpenAIShimClient
await client.beta.messages.create(
{
model: 'ignored',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: true,
},
{
headers: {
'anthropic-version': '2023-06-01',
'x-claude-remote-session-id': 'remote-123',
'x-safe-header': 'keep-me',
},
},
)
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
expect(capturedHeaders?.get('authorization')).toBe('Bearer provider-override-key')
expect(capturedHeaders?.get('editor-plugin-version')).toBe('copilot-chat/0.26.7')
})
test('preserves usage from final OpenAI stream chunk with empty choices', async () => {
globalThis.fetch = (async (_input, init) => {
const url = typeof _input === 'string' ? _input : _input.url
expect(url).toBe('http://example.test/v1/chat/completions')
const body = JSON.parse(String(init?.body))
expect(body.stream).toBe(true)
expect(body.stream_options).toEqual({ include_usage: true })
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'fake-model',
choices: [
{
index: 0,
delta: { role: 'assistant', content: 'hello world' },
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'fake-model',
choices: [
{
index: 0,
delta: {},
finish_reason: 'stop',
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'fake-model',
choices: [],
usage: {
prompt_tokens: 123,
completion_tokens: 45,
total_tokens: 168,
},
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'fake-model',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const usageEvent = events.find(
event => event.type === 'message_delta' && typeof event.usage === 'object' && event.usage !== null,
) as { usage?: { input_tokens?: number; output_tokens?: number } } | undefined
expect(usageEvent).toBeDefined()
expect(usageEvent?.usage?.input_tokens).toBe(123)
expect(usageEvent?.usage?.output_tokens).toBe(45)
})
test('preserves Gemini tool call extra_content in follow-up requests', async () => {
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
message: {
role: 'assistant',
content: 'done',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [
{ role: 'user', content: 'Use Bash' },
{
role: 'assistant',
content: [
{
type: 'tool_use',
id: 'call_1',
name: 'Bash',
input: { command: 'pwd' },
extra_content: {
google: {
thought_signature: 'sig-123',
},
},
},
],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call_1',
content: 'D:\\repo',
},
],
},
],
max_tokens: 64,
stream: false,
})
const assistantWithToolCall = (requestBody?.messages as Array<Record<string, unknown>>).find(
message => Array.isArray(message.tool_calls),
) as { tool_calls?: Array<Record<string, unknown>> } | undefined
expect(assistantWithToolCall?.tool_calls?.[0]).toMatchObject({
id: 'call_1',
type: 'function',
function: {
name: 'Bash',
arguments: JSON.stringify({ command: 'pwd' }),
},
extra_content: {
google: {
thought_signature: 'sig-123',
},
},
})
})
test('preserves Grep tool pattern field in OpenAI-compatible schemas', async () => {
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-grep-schema',
model: 'qwen/qwen3.6-plus',
choices: [
{
message: {
role: 'assistant',
content: 'done',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'qwen/qwen3.6-plus',
system: 'test system',
messages: [{ role: 'user', content: 'Use Grep' }],
tools: [
{
name: 'Grep',
description: 'Search file contents',
input_schema: {
type: 'object',
properties: {
pattern: { type: 'string', description: 'Search pattern' },
path: { type: 'string' },
},
required: ['pattern'],
additionalProperties: false,
},
},
],
max_tokens: 64,
stream: false,
})
const tools = requestBody?.tools as Array<Record<string, unknown>> | undefined
const grepTool = tools?.find(tool => (tool.function as Record<string, unknown>)?.name === 'Grep') as
| { function?: { parameters?: { properties?: Record<string, unknown>; required?: string[] } } }
| undefined
expect(Object.keys(grepTool?.function?.parameters?.properties ?? {})).toContain('pattern')
expect(grepTool?.function?.parameters?.required).toContain('pattern')
})
test('does not infer Gemini mode from OPENAI_BASE_URL path substrings', async () => {
let capturedAuthorization: string | null = null
process.env.OPENAI_BASE_URL =
'https://evil.example/generativelanguage.googleapis.com/v1beta/openai'
delete process.env.OPENAI_API_KEY
process.env.GEMINI_API_KEY = 'gemini-secret'
globalThis.fetch = (async (_input, init) => {
const headers = init?.headers as Record<string, string> | undefined
capturedAuthorization =
headers?.Authorization ?? headers?.authorization ?? null
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'fake-model',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'fake-model',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
})
expect(capturedAuthorization).toBeNull()
})
test('preserves image tool results as placeholders in follow-up requests', async () => {
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'qwen/qwen3.6-plus',
choices: [
{
message: {
role: 'assistant',
content: 'done',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'qwen/qwen3.6-plus',
system: 'test system',
messages: [
{ role: 'user', content: 'Read this screenshot' },
{
role: 'assistant',
content: [
{
type: 'tool_use',
id: 'call_image_1',
name: 'Read',
input: { file_path: 'C:\\temp\\screenshot.png' },
},
],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call_image_1',
content: [
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/png',
data: 'ZmFrZQ==',
},
},
],
},
],
},
],
max_tokens: 64,
stream: false,
})
const toolMessage = (requestBody?.messages as Array<Record<string, unknown>>).find(
message => message.role === 'tool',
) as { content?: string } | undefined
expect(toolMessage?.content).toContain('[image:image/png]')
})
test('uses GEMINI_ACCESS_TOKEN for Gemini OpenAI-compatible requests', async () => {
let capturedAuthorization: string | null = null
let capturedProject: string | null = null
let requestUrl: string | undefined
process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.GEMINI_AUTH_MODE = 'access-token'
process.env.GEMINI_ACCESS_TOKEN = 'gemini-access-token'
process.env.GOOGLE_CLOUD_PROJECT = 'gemini-project'
process.env.GEMINI_BASE_URL =
'https://generativelanguage.googleapis.com/v1beta/openai'
process.env.GEMINI_MODEL = 'gemini-2.0-flash'
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_KEY
delete process.env.GEMINI_API_KEY
delete process.env.GOOGLE_API_KEY
globalThis.fetch = (async (input, init) => {
requestUrl = typeof input === 'string' ? input : input.url
const headers = init?.headers as Record<string, string> | undefined
capturedAuthorization =
headers?.Authorization ?? headers?.authorization ?? null
capturedProject =
headers?.['x-goog-user-project'] ??
headers?.['X-Goog-User-Project'] ??
null
return new Response(
JSON.stringify({
id: 'chatcmpl-gemini',
model: 'gemini-2.0-flash',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 3,
completion_tokens: 1,
total_tokens: 4,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'gemini-2.0-flash',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 32,
stream: false,
})
expect(requestUrl).toBe(
'https://generativelanguage.googleapis.com/v1beta/openai/chat/completions',
)
expect(capturedAuthorization).toBe('Bearer gemini-access-token')
expect(capturedProject).toBe('gemini-project')
})
test('preserves Gemini tool call extra_content from streaming chunks', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
extra_content: {
google: {
thought_signature: 'sig-stream',
},
},
function: {
name: 'Bash',
arguments: '{"command":"pwd"}',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const toolStart = events.find(
event =>
event.type === 'content_block_start' &&
typeof event.content_block === 'object' &&
event.content_block !== null &&
(event.content_block as Record<string, unknown>).type === 'tool_use',
) as { content_block?: Record<string, unknown> } | undefined
expect(toolStart?.content_block).toMatchObject({
type: 'tool_use',
id: 'function-call-1',
name: 'Bash',
extra_content: {
google: {
thought_signature: 'sig-stream',
},
},
})
})
test('normalizes plain string Bash tool arguments from OpenAI-compatible responses', async () => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
message: {
role: 'assistant',
tool_calls: [
{
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: 'pwd',
},
},
],
},
finish_reason: 'tool_calls',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const message = await client.beta.messages.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: false,
}) as {
stop_reason?: string
content?: Array<Record<string, unknown>>
}
expect(message.stop_reason).toBe('tool_use')
expect(message.content).toEqual([
{
type: 'tool_use',
id: 'function-call-1',
name: 'Bash',
input: { command: 'pwd' },
},
])
})
test('normalizes Bash tool arguments that are valid JSON strings', async () => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
message: {
role: 'assistant',
tool_calls: [
{
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: '"pwd"',
},
},
],
},
finish_reason: 'tool_calls',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const message = await client.beta.messages.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: false,
}) as {
content?: Array<Record<string, unknown>>
}
expect(message.content).toEqual([
{
type: 'tool_use',
id: 'function-call-1',
name: 'Bash',
input: { command: 'pwd' },
},
])
})
test.each([
['false', false],
['null', null],
['[]', []],
])(
'preserves malformed Bash JSON literals as parsed values in non-streaming responses: %s',
async (argumentsValue, expectedInput) => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
message: {
role: 'assistant',
tool_calls: [
{
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: argumentsValue,
},
},
],
},
finish_reason: 'tool_calls',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const message = await client.beta.messages.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: false,
}) as {
content?: Array<Record<string, unknown>>
}
expect(message.content).toEqual([
{
type: 'tool_use',
id: 'function-call-1',
name: 'Bash',
input: expectedInput,
},
])
},
)
test('keeps terminal empty Bash tool arguments invalid in non-streaming responses', async () => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
message: {
role: 'assistant',
tool_calls: [
{
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: '',
},
},
],
},
finish_reason: 'tool_calls',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const message = await client.beta.messages.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: false,
}) as {
content?: Array<Record<string, unknown>>
}
expect(message.content).toEqual([
{
type: 'tool_use',
id: 'function-call-1',
name: 'Bash',
input: {},
},
])
})
test('normalizes plain string Bash tool arguments in streaming responses', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: 'pwd',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const normalizedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(normalizedInput).toBe('{"command":"pwd"}')
})
test('normalizes plain string Bash tool arguments when streaming starts with an empty chunk', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: '',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
index: 0,
type: 'function',
function: {
arguments: 'pwd',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const normalizedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(normalizedInput).toBe('{"command":"pwd"}')
})
test('normalizes plain string Bash tool arguments when streaming starts with whitespace', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: ' ',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
index: 0,
type: 'function',
function: {
arguments: 'pwd',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const normalizedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(normalizedInput).toBe('{"command":" pwd"}')
})
test('keeps terminal whitespace-only Bash arguments invalid in streaming responses', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: ' ',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const normalizedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(normalizedInput).toBe('{}')
})
test('normalizes streaming Bash arguments that begin with bracket syntax', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: '[ -f package.json ] && pwd',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const normalizedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(normalizedInput).toBe('{"command":"[ -f package.json ] && pwd"}')
})
test('normalizes streaming Bash arguments when the first chunk is only an opening brace', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: '{',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
index: 0,
type: 'function',
function: {
arguments: ' pwd; }',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const normalizedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(normalizedInput).toBe('{"command":"{ pwd; }"}')
})
test('repairs truncated structured Bash JSON in streaming responses', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: '{"command":"pwd"',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const normalizedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(normalizedInput).toBe('{"command":"pwd"}')
})
test('does not normalize incomplete streamed Bash commands when finish_reason is length', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: 'rg --fi',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'length',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const streamedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(streamedInput).toBe('rg --fi')
})
test('repairs truncated JSON objects even without command field', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
function: {
name: 'Bash',
arguments: '{"cwd":"/tmp"',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const streamedInput = events
.filter(
event =>
event.type === 'content_block_delta' &&
typeof event.delta === 'object' &&
event.delta !== null &&
(event.delta as Record<string, unknown>).type === 'input_json_delta',
)
.map(event => (event.delta as Record<string, unknown>).partial_json)
.join('')
expect(streamedInput).toBe('{"cwd":"/tmp"}')
})
test('preserves raw input for unknown plain string tool arguments', async () => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
message: {
role: 'assistant',
tool_calls: [
{
id: 'function-call-1',
type: 'function',
function: {
name: 'UnknownTool',
arguments: 'pwd',
},
},
],
},
finish_reason: 'tool_calls',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const message = await client.beta.messages.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use tool' }],
max_tokens: 64,
stream: false,
}) as {
content?: Array<Record<string, unknown>>
}
expect(message.content).toEqual([
{
type: 'tool_use',
id: 'function-call-1',
name: 'UnknownTool',
input: {},
},
])
})
test('preserves parsed string input for unknown JSON string tool arguments', async () => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
message: {
role: 'assistant',
tool_calls: [
{
id: 'function-call-1',
type: 'function',
function: {
name: 'UnknownTool',
arguments: '"pwd"',
},
},
],
},
finish_reason: 'tool_calls',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const message = await client.beta.messages.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use tool' }],
max_tokens: 64,
stream: false,
}) as {
content?: Array<Record<string, unknown>>
}
expect(message.content).toEqual([
{
type: 'tool_use',
id: 'function-call-1',
name: 'UnknownTool',
input: 'pwd',
},
])
})
test('sanitizes malformed MCP tool schemas before sending them to OpenAI', async () => {
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'gpt-4o',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 1,
total_tokens: 11,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'gpt-4o',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
tools: [
{
name: 'mcp__clientry__create_task',
description: 'Create a task',
input_schema: {
type: 'object',
properties: {
priority: {
type: 'integer',
description: 'Priority: 0=low, 1=medium, 2=high, 3=urgent',
default: true,
enum: [false, 0, 1, 2, 3],
},
},
},
},
],
max_tokens: 64,
stream: false,
})
const parameters = (
requestBody?.tools as Array<{ function?: { parameters?: Record<string, unknown> } }>
)?.[0]?.function?.parameters
const properties = parameters?.properties as
| Record<string, { default?: unknown; enum?: unknown[]; type?: string }>
| undefined
expect(parameters?.additionalProperties).toBe(false)
// No required[] in the original schema → none added (optional properties must not be forced required)
expect(parameters?.required).toEqual([])
expect(properties?.priority?.type).toBe('integer')
expect(properties?.priority?.enum).toEqual([0, 1, 2, 3])
expect(properties?.priority).not.toHaveProperty('default')
})
test('optional tool properties are not added to required[] — fixes Groq/Azure 400 tool_use_failed', async () => {
// Regression test for: all optional properties being sent as required in strict mode,
// causing providers like Groq to reject valid tool calls where the model omits optional args.
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-4',
model: 'gpt-4o',
choices: [{ message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 5, completion_tokens: 2, total_tokens: 7 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'read a file' }],
tools: [
{
name: 'Read',
description: 'Read a file',
input_schema: {
type: 'object',
properties: {
file_path: { type: 'string', description: 'Absolute path to file' },
offset: { type: 'number', description: 'Line to start from' },
limit: { type: 'number', description: 'Max lines to read' },
pages: { type: 'string', description: 'Page range for PDFs' },
},
required: ['file_path'],
},
},
],
max_tokens: 16,
stream: false,
})
const parameters = (
requestBody?.tools as Array<{ function?: { parameters?: Record<string, unknown> } }>
)?.[0]?.function?.parameters
expect(parameters?.required).toEqual(['file_path'])
const required = parameters?.required as string[] | undefined
expect(required).not.toContain('offset')
expect(required).not.toContain('limit')
expect(required).not.toContain('pages')
expect(parameters?.additionalProperties).toBe(false)
})
// ---------------------------------------------------------------------------
// Issue #202 — consecutive role coalescing (Devstral, Mistral strict templates)
// ---------------------------------------------------------------------------
function makeNonStreamResponse(content = 'ok'): Response {
return new Response(
JSON.stringify({
id: 'chatcmpl-test',
model: 'test-model',
choices: [{ message: { role: 'assistant', content }, finish_reason: 'stop' }],
usage: { prompt_tokens: 5, completion_tokens: 1, total_tokens: 6 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}
test('coalesces consecutive user messages to avoid alternation errors (issue #202)', async () => {
let sentMessages: Array<{ role: string; content: unknown }> | undefined
globalThis.fetch = (async (_input: unknown, init: RequestInit | undefined) => {
sentMessages = JSON.parse(String(init?.body)).messages
return makeNonStreamResponse()
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'test-model',
system: 'sys',
messages: [
{ role: 'user', content: 'first message' },
{ role: 'user', content: 'second message' },
],
max_tokens: 64,
stream: false,
})
expect(sentMessages?.length).toBe(2)
expect(sentMessages?.[0]?.role).toBe('system')
expect(sentMessages?.[1]?.role).toBe('user')
const userContent = sentMessages?.[1]?.content as string
expect(userContent).toContain('first message')
expect(userContent).toContain('second message')
})
test('coalesces consecutive assistant messages preserving tool_calls (issue #202)', async () => {
let sentMessages: Array<{ role: string; content: unknown; tool_calls?: unknown[] }> | undefined
globalThis.fetch = (async (_input: unknown, init: RequestInit | undefined) => {
sentMessages = JSON.parse(String(init?.body)).messages
return makeNonStreamResponse()
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'test-model',
system: 'sys',
messages: [
{ role: 'user', content: 'go' },
{ role: 'assistant', content: 'thinking...' },
{
role: 'assistant',
content: [{ type: 'tool_use', id: 'call_1', name: 'Bash', input: { command: 'ls' } }],
},
{ role: 'user', content: [{ type: 'tool_result', tool_use_id: 'call_1', content: 'file.txt' }] },
],
max_tokens: 64,
stream: false,
})
const assistantMsgs = sentMessages?.filter(m => m.role === 'assistant')
expect(assistantMsgs?.length).toBe(1)
expect(assistantMsgs?.[0]?.tool_calls?.length).toBeGreaterThan(0)
})
test('non-streaming: reasoning_content emitted as thinking block only when content is null', async () => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'glm-5',
choices: [
{
message: {
role: 'assistant',
content: null,
reasoning_content: 'Let me think about this step by step.',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = (await client.beta.messages.create({
model: 'glm-5',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
})) as { content: Array<Record<string, unknown>> }
expect(result.content).toEqual([
{ type: 'thinking', thinking: 'Let me think about this step by step.' },
])
})
test('non-streaming: empty string content does not fall through to reasoning_content as text', async () => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'glm-5',
choices: [
{
message: {
role: 'assistant',
content: '',
reasoning_content: 'Chain of thought here.',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = (await client.beta.messages.create({
model: 'glm-5',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
})) as { content: Array<Record<string, unknown>> }
expect(result.content).toEqual([
{ type: 'thinking', thinking: 'Chain of thought here.' },
])
})
test('non-streaming: real content takes precedence over reasoning_content', async () => {
globalThis.fetch = (async (_input, _init) => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'glm-5',
choices: [
{
message: {
role: 'assistant',
content: 'The answer is 42.',
reasoning_content: 'I need to calculate this.',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = (await client.beta.messages.create({
model: 'glm-5',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
})) as { content: Array<Record<string, unknown>> }
expect(result.content).toEqual([
{ type: 'thinking', thinking: 'I need to calculate this.' },
{ type: 'text', text: 'The answer is 42.' },
])
})
test('non-streaming: strips leaked reasoning preamble from assistant content', async () => {
globalThis.fetch = (async () => {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'gpt-5-mini',
choices: [
{
message: {
role: 'assistant',
content:
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = (await client.beta.messages.create({
model: 'gpt-5-mini',
system: 'test system',
messages: [{ role: 'user', content: 'hey' }],
max_tokens: 64,
stream: false,
})) as { content: Array<Record<string, unknown>> }
expect(result.content).toEqual([
{ type: 'text', text: 'Hey! How can I help you today?' },
])
})
test('streaming: thinking block closed before tool call', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'glm-5',
choices: [
{
index: 0,
delta: { role: 'assistant', reasoning_content: 'Thinking...' },
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'glm-5',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
index: 0,
id: 'call-1',
type: 'function',
function: {
name: 'Bash',
arguments: '{"command":"ls"}',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'glm-5',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'glm-5',
system: 'test system',
messages: [{ role: 'user', content: 'Run ls' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const types = events.map(e => e.type)
const thinkingStartIdx = types.indexOf('content_block_start')
const firstStopIdx = types.indexOf('content_block_stop')
const toolStartIdx = types.indexOf(
'content_block_start',
thinkingStartIdx + 1,
)
expect(thinkingStartIdx).toBeGreaterThanOrEqual(0)
expect(firstStopIdx).toBeGreaterThan(thinkingStartIdx)
expect(toolStartIdx).toBeGreaterThan(firstStopIdx)
const thinkingStart = events[thinkingStartIdx] as {
content_block?: Record<string, unknown>
}
expect(thinkingStart?.content_block?.type).toBe('thinking')
})
test('streaming: strips leaked reasoning preamble from assistant content deltas', async () => {
globalThis.fetch = (async () => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'gpt-5-mini',
choices: [
{
index: 0,
delta: {
role: 'assistant',
content:
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'gpt-5-mini',
choices: [
{
index: 0,
delta: {},
finish_reason: 'stop',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'gpt-5-mini',
system: 'test system',
messages: [{ role: 'user', content: 'hey' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const textDeltas: string[] = []
for await (const event of result.data) {
const delta = (event as { delta?: { type?: string; text?: string } }).delta
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
textDeltas.push(delta.text)
}
}
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
})
test('streaming: strips leaked reasoning preamble when split across multiple content chunks', async () => {
globalThis.fetch = (async () => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'gpt-5-mini',
choices: [
{
index: 0,
delta: {
role: 'assistant',
content: 'The user said "hey" - this is a simple greeting. ',
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'gpt-5-mini',
choices: [
{
index: 0,
delta: {
content:
'I should respond in a friendly, concise way.\n\nHey! How can I help you today?',
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'gpt-5-mini',
choices: [
{
index: 0,
delta: {},
finish_reason: 'stop',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'gpt-5-mini',
system: 'test system',
messages: [{ role: 'user', content: 'hey' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const textDeltas: string[] = []
for await (const event of result.data) {
const delta = (event as { delta?: { type?: string; text?: string } }).delta
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
textDeltas.push(delta.text)
}
}
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
})