Merge pull request #71 from Vasanthdev2004/pr-checks

ci: add automated PR smoke and provider checks
This commit is contained in:
Kevin Codex
2026-04-02 02:33:25 +08:00
committed by GitHub
4 changed files with 241 additions and 14 deletions

View File

@@ -10,6 +10,19 @@ const originalEnv = {
const originalFetch = globalThis.fetch
type OpenAIShimClient = {
beta: {
messages: {
create: (
params: Record<string, unknown>,
options?: Record<string, unknown>,
) => Promise<unknown> & {
withResponse: () => Promise<{ data: AsyncIterable<Record<string, unknown>> }>
}
}
}
}
function makeSseResponse(lines: string[]): Response {
const encoder = new TextEncoder()
return new Response(
@@ -97,18 +110,7 @@ test('preserves usage from final OpenAI stream chunk with empty choices', async
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as {
beta: {
messages: {
create: (
params: Record<string, unknown>,
options?: Record<string, unknown>,
) => Promise<unknown> & {
withResponse: () => Promise<{ data: AsyncIterable<Record<string, unknown>> }>
}
}
}
}
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
@@ -133,3 +135,180 @@ test('preserves usage from final OpenAI stream chunk with empty choices', async
expect(usageEvent?.usage?.input_tokens).toBe(123)
expect(usageEvent?.usage?.output_tokens).toBe(45)
})
test('preserves Gemini tool call extra_content in follow-up requests', async () => {
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
message: {
role: 'assistant',
content: 'done',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [
{ role: 'user', content: 'Use Bash' },
{
role: 'assistant',
content: [
{
type: 'tool_use',
id: 'call_1',
name: 'Bash',
input: { command: 'pwd' },
extra_content: {
google: {
thought_signature: 'sig-123',
},
},
},
],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call_1',
content: 'D:\\repo',
},
],
},
],
max_tokens: 64,
stream: false,
})
const assistantWithToolCall = (requestBody?.messages as Array<Record<string, unknown>>).find(
message => Array.isArray(message.tool_calls),
) as { tool_calls?: Array<Record<string, unknown>> } | undefined
expect(assistantWithToolCall?.tool_calls?.[0]).toMatchObject({
id: 'call_1',
type: 'function',
function: {
name: 'Bash',
arguments: JSON.stringify({ command: 'pwd' }),
},
extra_content: {
google: {
thought_signature: 'sig-123',
},
},
})
})
test('preserves Gemini tool call extra_content from streaming chunks', async () => {
globalThis.fetch = (async (_input, _init) => {
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {
role: 'assistant',
tool_calls: [
{
index: 0,
id: 'function-call-1',
type: 'function',
extra_content: {
google: {
thought_signature: 'sig-stream',
},
},
function: {
name: 'Bash',
arguments: '{"command":"pwd"}',
},
},
],
},
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'google/gemini-3.1-pro-preview',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
const result = await client.beta.messages
.create({
model: 'google/gemini-3.1-pro-preview',
system: 'test system',
messages: [{ role: 'user', content: 'Use Bash' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const toolStart = events.find(
event =>
event.type === 'content_block_start' &&
typeof event.content_block === 'object' &&
event.content_block !== null &&
(event.content_block as Record<string, unknown>).type === 'tool_use',
) as { content_block?: Record<string, unknown> } | undefined
expect(toolStart?.content_block).toMatchObject({
type: 'tool_use',
id: 'function-call-1',
name: 'Bash',
extra_content: {
google: {
thought_signature: 'sig-stream',
},
},
})
})

View File

@@ -45,6 +45,7 @@ interface OpenAIMessage {
id: string
type: 'function'
function: { name: string; arguments: string }
extra_content?: Record<string, unknown>
}>
tool_call_id?: string
name?: string
@@ -191,7 +192,12 @@ function convertMessages(
if (toolUses.length > 0) {
assistantMsg.tool_calls = toolUses.map(
(tu: { id?: string; name?: string; input?: unknown }) => ({
(tu: {
id?: string
name?: string
input?: unknown
extra_content?: Record<string, unknown>
}) => ({
id: tu.id ?? `call_${Math.random().toString(36).slice(2)}`,
type: 'function' as const,
function: {
@@ -201,6 +207,7 @@ function convertMessages(
? tu.input
: JSON.stringify(tu.input ?? {}),
},
...(tu.extra_content ? { extra_content: tu.extra_content } : {}),
}),
)
}
@@ -278,6 +285,7 @@ interface OpenAIStreamChunk {
id?: string
type?: string
function?: { name?: string; arguments?: string }
extra_content?: Record<string, unknown>
}>
}
finish_reason: string | null
@@ -420,6 +428,7 @@ async function* openaiStreamToAnthropic(
id: tc.id,
name: tc.function.name,
input: {},
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
},
}
contentBlockIndex++
@@ -703,6 +712,7 @@ class OpenAIShimMessages {
tool_calls?: Array<{
id: string
function: { name: string; arguments: string }
extra_content?: Record<string, unknown>
}>
}
finish_reason?: string
@@ -734,6 +744,7 @@ class OpenAIShimMessages {
id: tc.id,
name: tc.function.name,
input,
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
})
}
}

View File

@@ -21,7 +21,7 @@ test('deepseek-chat uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('deepseek-chat')).toBe(64_000)
expect(getContextWindowForModel('deepseek-chat')).toBe(128_000)
expect(getModelMaxOutputTokens('deepseek-chat')).toEqual({
default: 8_192,
upperLimit: 8_192,