diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml new file mode 100644 index 00000000..1b2bba1b --- /dev/null +++ b/.github/workflows/pr-checks.yml @@ -0,0 +1,37 @@ +name: PR Checks + +on: + pull_request: + push: + branches: + - main + +jobs: + smoke-and-tests: + runs-on: ubuntu-latest + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Set up Bun + uses: oven-sh/setup-bun@v2 + with: + bun-version: 1.3.11 + + - name: Install dependencies + run: bun install --frozen-lockfile + + - name: Smoke check + run: bun run smoke + + - name: Provider tests + run: bun run test:provider + + - name: Provider recommendation tests + run: npm run test:provider-recommendation diff --git a/src/services/api/openaiShim.test.ts b/src/services/api/openaiShim.test.ts index 34f81678..6141bdb9 100644 --- a/src/services/api/openaiShim.test.ts +++ b/src/services/api/openaiShim.test.ts @@ -10,6 +10,19 @@ const originalEnv = { const originalFetch = globalThis.fetch +type OpenAIShimClient = { + beta: { + messages: { + create: ( + params: Record, + options?: Record, + ) => Promise & { + withResponse: () => Promise<{ data: AsyncIterable> }> + } + } + } +} + function makeSseResponse(lines: string[]): Response { const encoder = new TextEncoder() return new Response( @@ -97,18 +110,7 @@ test('preserves usage from final OpenAI stream chunk with empty choices', async return makeSseResponse(chunks) }) as FetchType - const client = createOpenAIShimClient({}) as { - beta: { - messages: { - create: ( - params: Record, - options?: Record, - ) => Promise & { - withResponse: () => Promise<{ data: AsyncIterable> }> - } - } - } - } + const client = createOpenAIShimClient({}) as OpenAIShimClient const result = await client.beta.messages .create({ @@ -133,3 +135,180 @@ test('preserves usage from final OpenAI stream chunk with empty choices', async expect(usageEvent?.usage?.input_tokens).toBe(123) expect(usageEvent?.usage?.output_tokens).toBe(45) }) + +test('preserves Gemini tool call extra_content in follow-up requests', async () => { + let requestBody: Record | undefined + + globalThis.fetch = (async (_input, init) => { + requestBody = JSON.parse(String(init?.body)) + + return new Response( + JSON.stringify({ + id: 'chatcmpl-1', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + message: { + role: 'assistant', + content: 'done', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + await client.beta.messages.create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [ + { role: 'user', content: 'Use Bash' }, + { + role: 'assistant', + content: [ + { + type: 'tool_use', + id: 'call_1', + name: 'Bash', + input: { command: 'pwd' }, + extra_content: { + google: { + thought_signature: 'sig-123', + }, + }, + }, + ], + }, + { + role: 'user', + content: [ + { + type: 'tool_result', + tool_use_id: 'call_1', + content: 'D:\\repo', + }, + ], + }, + ], + max_tokens: 64, + stream: false, + }) + + const assistantWithToolCall = (requestBody?.messages as Array>).find( + message => Array.isArray(message.tool_calls), + ) as { tool_calls?: Array> } | undefined + + expect(assistantWithToolCall?.tool_calls?.[0]).toMatchObject({ + id: 'call_1', + type: 'function', + function: { + name: 'Bash', + arguments: JSON.stringify({ command: 'pwd' }), + }, + extra_content: { + google: { + thought_signature: 'sig-123', + }, + }, + }) +}) + +test('preserves Gemini tool call extra_content from streaming chunks', async () => { + globalThis.fetch = (async (_input, _init) => { + const chunks = makeStreamChunks([ + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + tool_calls: [ + { + index: 0, + id: 'function-call-1', + type: 'function', + extra_content: { + google: { + thought_signature: 'sig-stream', + }, + }, + function: { + name: 'Bash', + arguments: '{"command":"pwd"}', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-1', + object: 'chat.completion.chunk', + model: 'google/gemini-3.1-pro-preview', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + }, + ]) + + return makeSseResponse(chunks) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + const result = await client.beta.messages + .create({ + model: 'google/gemini-3.1-pro-preview', + system: 'test system', + messages: [{ role: 'user', content: 'Use Bash' }], + max_tokens: 64, + stream: true, + }) + .withResponse() + + const events: Array> = [] + for await (const event of result.data) { + events.push(event) + } + + const toolStart = events.find( + event => + event.type === 'content_block_start' && + typeof event.content_block === 'object' && + event.content_block !== null && + (event.content_block as Record).type === 'tool_use', + ) as { content_block?: Record } | undefined + + expect(toolStart?.content_block).toMatchObject({ + type: 'tool_use', + id: 'function-call-1', + name: 'Bash', + extra_content: { + google: { + thought_signature: 'sig-stream', + }, + }, + }) +}) diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 8e0ff86a..4b70a63c 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -45,6 +45,7 @@ interface OpenAIMessage { id: string type: 'function' function: { name: string; arguments: string } + extra_content?: Record }> tool_call_id?: string name?: string @@ -191,7 +192,12 @@ function convertMessages( if (toolUses.length > 0) { assistantMsg.tool_calls = toolUses.map( - (tu: { id?: string; name?: string; input?: unknown }) => ({ + (tu: { + id?: string + name?: string + input?: unknown + extra_content?: Record + }) => ({ id: tu.id ?? `call_${Math.random().toString(36).slice(2)}`, type: 'function' as const, function: { @@ -201,6 +207,7 @@ function convertMessages( ? tu.input : JSON.stringify(tu.input ?? {}), }, + ...(tu.extra_content ? { extra_content: tu.extra_content } : {}), }), ) } @@ -278,6 +285,7 @@ interface OpenAIStreamChunk { id?: string type?: string function?: { name?: string; arguments?: string } + extra_content?: Record }> } finish_reason: string | null @@ -420,6 +428,7 @@ async function* openaiStreamToAnthropic( id: tc.id, name: tc.function.name, input: {}, + ...(tc.extra_content ? { extra_content: tc.extra_content } : {}), }, } contentBlockIndex++ @@ -703,6 +712,7 @@ class OpenAIShimMessages { tool_calls?: Array<{ id: string function: { name: string; arguments: string } + extra_content?: Record }> } finish_reason?: string @@ -734,6 +744,7 @@ class OpenAIShimMessages { id: tc.id, name: tc.function.name, input, + ...(tc.extra_content ? { extra_content: tc.extra_content } : {}), }) } } diff --git a/src/utils/context.test.ts b/src/utils/context.test.ts index 17895b88..59647054 100644 --- a/src/utils/context.test.ts +++ b/src/utils/context.test.ts @@ -21,7 +21,7 @@ test('deepseek-chat uses provider-specific context and output caps', () => { process.env.CLAUDE_CODE_USE_OPENAI = '1' delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS - expect(getContextWindowForModel('deepseek-chat')).toBe(64_000) + expect(getContextWindowForModel('deepseek-chat')).toBe(128_000) expect(getModelMaxOutputTokens('deepseek-chat')).toEqual({ default: 8_192, upperLimit: 8_192,