Compare commits

..

4 Commits

Author SHA1 Message Date
gnanam1990
b5dbb71a44 fix: preserve explicit ollama startup intent 2026-04-07 19:40:47 +05:30
gnanam1990
b2cabdd950 fix: preserve explicit provider intent for active profiles 2026-04-07 18:47:35 +05:30
gnanam1990
139610950c fix: preserve explicit provider startup intent 2026-04-07 14:50:20 +05:30
gnanam1990
65dd19cf87 fix: preserve explicit startup provider selection 2026-04-07 10:08:30 +05:30
34 changed files with 408 additions and 808 deletions

1
.gitignore vendored
View File

@@ -10,3 +10,4 @@ GEMINI.md
package-lock.json
/.claude
coverage/
.worktrees/

View File

@@ -68,11 +68,11 @@ When a user describes what they want an agent to do, you will:
assistant: "Now let me use the test-runner agent to run the tests"
</example>
- <example>
Context: User is creating an agent for Claude Code product questions.
user: "How do I configure Claude Code hooks?"
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the claude-code-guide agent to answer the question"
Context: User is creating an agent to respond to the word "hello" with a friendly jok.
user: "Hello"
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the greeting-responder agent to respond with a friendly joke"
<commentary>
Since the user is asking how to use Claude Code, use the claude-code-guide agent.
Since the user is greeting, use the greeting-responder agent to respond with a friendly joke.
</commentary>
</example>
- If the user mentioned or implied that the agent should be used proactively, you should include examples of this.

View File

@@ -8,34 +8,6 @@ import {
validateProviderEnvOrExit,
} from '../utils/providerValidation.js'
// OpenClaude: polyfill globalThis.File for Node < 20.
// undici v7 references `File` at module evaluation time (webidl type
// assertions). Node 18 lacks the global, causing a ReferenceError inside
// the bundled __commonJS require chain which deadlocks the process when a
// proxy is configured (configureGlobalAgents → require_undici).
// eslint-disable-next-line custom-rules/no-top-level-side-effects
if (typeof globalThis.File === 'undefined') {
try {
// Node 18.13+ exposes File in node:buffer but not as a global.
// eslint-disable-next-line @typescript-eslint/no-require-imports
const { File: NodeFile } = require('node:buffer')
// @ts-expect-error -- polyfilling missing global
globalThis.File = NodeFile
} catch {
// Absolute fallback: stub so `MakeTypeAssertion(File)` doesn't throw.
// @ts-expect-error -- minimal polyfill
globalThis.File = class File extends Blob {
name: string
lastModified: number
constructor(parts: BlobPart[], name: string, opts?: FilePropertyBag) {
super(parts, opts)
this.name = name
this.lastModified = opts?.lastModified ?? Date.now()
}
}
}
}
// OpenClaude: disable experimental API betas by default.
// Tool search (defer_loading), global cache scope, and context management
// require internal API support not available to external accounts → 500.

View File

@@ -238,7 +238,7 @@ import { usePromptsFromClaudeInChrome } from 'src/hooks/usePromptsFromClaudeInCh
import { getTipToShowOnSpinner, recordShownTip } from 'src/services/tips/tipScheduler.js';
import type { Theme } from 'src/utils/theme.js';
import { isPromptTypingSuppressionActive } from './replInputSuppression.js';
import { shouldRunStartupChecks } from './replStartupGates.js';
import { shouldStartStartupChecks } from './replStartupGates.js';
import { checkAndDisableBypassPermissionsIfNeeded, checkAndDisableAutoModeIfNeeded, useKickOffCheckAndDisableBypassPermissionsIfNeeded, useKickOffCheckAndDisableAutoModeIfNeeded } from 'src/utils/permissions/bypassPermissionsKillswitch.js';
import { SandboxManager } from 'src/utils/sandbox/sandbox-adapter.js';
import { SANDBOX_NETWORK_ACCESS_TOOL_NAME } from 'src/cli/structuredIO.js';
@@ -785,17 +785,6 @@ export function REPL({
});
const tasksV2 = useTasksV2WithCollapseEffect();
// Start background plugin installations
// SECURITY: This code is guaranteed to run ONLY after the "trust this folder" dialog
// has been confirmed by the user. The trust dialog is shown in cli.tsx (line ~387)
// before the REPL component is rendered. The dialog blocks execution until the user
// accepts, and only then is the REPL component mounted and this effect runs.
// This ensures that plugin installations from repository and user settings only
// happen after explicit user consent to trust the current working directory.
// Deferring startup checks is handled below (after promptTypingSuppressionActive
// is declared) to avoid temporal dead zone issues.
// Allow Claude in Chrome MCP to send prompts through MCP notifications
// and sync permission mode changes to the Chrome extension
usePromptsFromClaudeInChrome(isRemoteSession ? EMPTY_MCP_CLIENTS : mcpClients, toolPermissionContext.mode);
@@ -1336,6 +1325,7 @@ export function REPL({
const [inputValue, setInputValueRaw] = useState(() => consumeEarlyInput());
const inputValueRef = useRef(inputValue);
inputValueRef.current = inputValue;
const startupChecksStartedRef = useRef(false);
const promptTypingSuppressionActive = isPromptTypingSuppressionActive(isPromptInputActive, inputValue);
const insertTextRef = useRef<{
insert: (text: string) => void;
@@ -1343,6 +1333,24 @@ export function REPL({
cursorOffset: number;
} | null>(null);
// Start background plugin installations after the initial input window is idle.
// SECURITY: This still runs only after the "trust this folder" dialog has been
// confirmed because the REPL is not mounted until that dialog completes.
useEffect(() => {
if (
!shouldStartStartupChecks({
isRemoteSession,
promptTypingSuppressionActive,
startupChecksStarted: startupChecksStartedRef.current,
})
) {
return;
}
startupChecksStartedRef.current = true;
void performStartupChecks(setAppState);
}, [isRemoteSession, promptTypingSuppressionActive, setAppState]);
// Wrap setInputValue to co-locate suppression state updates.
// Both setState calls happen in the same synchronous context so React
// batches them into a single render, eliminating the extra render that
@@ -1428,25 +1436,6 @@ export function REPL({
const activeRemote = sshRemote.isRemoteMode ? sshRemote : directConnect.isRemoteMode ? directConnect : remoteSession;
const [pastedContents, setPastedContents] = useState<Record<number, PastedContent>>({});
const [submitCount, setSubmitCount] = useState(0);
// Defer startup checks until the user has submitted their first message.
// A timeout or grace period is insufficient (issue #363): if the user pauses
// before typing, startup checks can still fire and recommendation dialogs
// steal focus. Only the user's first submission guarantees the prompt was
// the first thing they interacted with.
const startupChecksStartedRef = React.useRef(false);
const hasHadFirstSubmission = (submitCount ?? 0) > 0;
useEffect(() => {
if (isRemoteSession) return;
if (startupChecksStartedRef.current) return;
if (!shouldRunStartupChecks({
isRemoteSession,
hasStarted: startupChecksStartedRef.current,
hasHadFirstSubmission,
})) return;
startupChecksStartedRef.current = true;
void performStartupChecks(setAppState);
}, [setAppState, isRemoteSession, hasHadFirstSubmission]);
// Ref instead of state to avoid triggering React re-renders on every
// streaming text_delta. The spinner reads this via its animation timer.
const responseLengthRef = useRef(0);
@@ -2079,14 +2068,13 @@ export function REPL({
if (allowDialogsWithAnimation && showRemoteCallout) return 'remote-callout';
// LSP plugin recommendation (lowest priority - non-blocking suggestion)
// Suppress during startup window to prevent stealing focus from the prompt (issue #363)
if (allowDialogsWithAnimation && lspRecommendation && startupChecksStartedRef.current) return 'lsp-recommendation';
if (allowDialogsWithAnimation && lspRecommendation) return 'lsp-recommendation';
// Plugin hint from CLI/SDK stderr (same priority band as LSP rec)
if (allowDialogsWithAnimation && hintRecommendation && startupChecksStartedRef.current) return 'plugin-hint';
if (allowDialogsWithAnimation && hintRecommendation) return 'plugin-hint';
// Desktop app upsell (max 3 launches, lowest priority)
if (allowDialogsWithAnimation && showDesktopUpsellStartup && startupChecksStartedRef.current) return 'desktop-upsell';
if (allowDialogsWithAnimation && showDesktopUpsellStartup) return 'desktop-upsell';
return undefined;
}
const focusedInputDialog = getFocusedInputDialog();

View File

@@ -1,53 +1,44 @@
import { describe, expect, test } from 'bun:test'
import { shouldStartStartupChecks } from './replStartupGates.js'
import { shouldRunStartupChecks } from './replStartupGates.js'
describe('shouldRunStartupChecks', () => {
test('runs checks after first message submission', () => {
expect(shouldRunStartupChecks({
isRemoteSession: false,
hasStarted: false,
hasHadFirstSubmission: true,
})).toBe(true)
describe('shouldStartStartupChecks', () => {
test('returns false for remote sessions', () => {
expect(
shouldStartStartupChecks({
isRemoteSession: true,
promptTypingSuppressionActive: false,
startupChecksStarted: false,
}),
).toBe(false)
})
test('skips checks in remote sessions even after submission', () => {
expect(shouldRunStartupChecks({
isRemoteSession: true,
hasStarted: false,
hasHadFirstSubmission: true,
})).toBe(false)
test('returns false while prompt typing suppression is active', () => {
expect(
shouldStartStartupChecks({
isRemoteSession: false,
promptTypingSuppressionActive: true,
startupChecksStarted: false,
}),
).toBe(false)
})
test('skips checks if already started', () => {
expect(shouldRunStartupChecks({
isRemoteSession: false,
hasStarted: true,
hasHadFirstSubmission: true,
})).toBe(false)
test('returns true once local startup is idle and checks have not started', () => {
expect(
shouldStartStartupChecks({
isRemoteSession: false,
promptTypingSuppressionActive: false,
startupChecksStarted: false,
}),
).toBe(true)
})
test('does not run checks before first submission', () => {
expect(shouldRunStartupChecks({
isRemoteSession: false,
hasStarted: false,
hasHadFirstSubmission: false,
})).toBe(false)
test('returns false after startup checks have already started', () => {
expect(
shouldStartStartupChecks({
isRemoteSession: false,
promptTypingSuppressionActive: false,
startupChecksStarted: true,
}),
).toBe(false)
})
test('does not run checks when idle before first submission', () => {
expect(shouldRunStartupChecks({
isRemoteSession: false,
hasStarted: false,
hasHadFirstSubmission: false,
})).toBe(false)
})
test('skips checks in remote session regardless of other conditions', () => {
expect(shouldRunStartupChecks({
isRemoteSession: true,
hasStarted: false,
hasHadFirstSubmission: false,
})).toBe(false)
})
})
})

View File

@@ -1,35 +1,11 @@
/**
* Startup gates for the REPL.
*
* Prevents startup plugin checks and recommendation dialogs from stealing
* focus before the user has interacted with the prompt.
*
* This addresses the root cause of issue #363: on mount, performStartupChecks
* triggers plugin loading, which populates trackedFiles, which triggers
* useLspPluginRecommendation to surface an LSP recommendation dialog. Since
* promptTypingSuppressionActive is false before the user has typed anything,
* getFocusedInputDialog() returns the dialog, unmounting PromptInput entirely.
*
* The fix gates startup checks on actual prompt interaction. A pure timeout
* or grace period is insufficient because pausing before typing would still
* allow dialogs to steal focus. Only the user's first submission guarantees
* the prompt is no longer in the vulnerable pre-interaction window.
*/
/**
* Determines whether startup checks should run.
*
* Startup checks are deferred until the user has submitted their first
* message. This guarantees the prompt was the first thing the user interacted
* with, so no recommendation dialog can steal focus before the first keystroke.
*/
export function shouldRunStartupChecks(options: {
isRemoteSession: boolean;
hasStarted: boolean;
hasHadFirstSubmission: boolean;
export function shouldStartStartupChecks(options: {
isRemoteSession: boolean
promptTypingSuppressionActive: boolean
startupChecksStarted: boolean
}): boolean {
if (options.isRemoteSession) return false;
if (options.hasStarted) return false;
if (!options.hasHadFirstSubmission) return false;
return true;
}
return (
!options.isRemoteSession &&
!options.promptTypingSuppressionActive &&
!options.startupChecksStarted
)
}

View File

@@ -201,117 +201,6 @@ describe('Codex request translation', () => {
])
})
test('preserves Grep tool pattern field in Codex strict schemas', () => {
const tools = convertToolsToResponsesTools([
{
name: 'Grep',
description: 'Search file contents',
input_schema: {
type: 'object',
properties: {
pattern: { type: 'string', description: 'Search pattern' },
path: { type: 'string' },
},
required: ['pattern'],
additionalProperties: false,
},
},
])
expect(tools).toEqual([
{
type: 'function',
name: 'Grep',
description: 'Search file contents',
parameters: {
type: 'object',
properties: {
pattern: { type: 'string', description: 'Search pattern' },
path: { type: 'string' },
},
required: ['pattern', 'path'],
additionalProperties: false,
},
strict: true,
},
])
})
test('preserves Glob tool pattern field in Codex strict schemas', () => {
const tools = convertToolsToResponsesTools([
{
name: 'Glob',
description: 'Find files by pattern',
input_schema: {
type: 'object',
properties: {
pattern: { type: 'string', description: 'Glob pattern' },
path: { type: 'string' },
},
required: ['pattern'],
additionalProperties: false,
},
},
])
expect(tools).toEqual([
{
type: 'function',
name: 'Glob',
description: 'Find files by pattern',
parameters: {
type: 'object',
properties: {
pattern: { type: 'string', description: 'Glob pattern' },
path: { type: 'string' },
},
required: ['pattern', 'path'],
additionalProperties: false,
},
strict: true,
},
])
})
test('strips validator pattern keyword but keeps string field named pattern in Codex schemas', () => {
const tools = convertToolsToResponsesTools([
{
name: 'RegexProbe',
description: 'Probe regex schema handling',
input_schema: {
type: 'object',
properties: {
pattern: {
type: 'string',
pattern: '^[a-z]+$',
},
},
required: ['pattern'],
additionalProperties: false,
},
},
])
expect(tools).toEqual([
{
type: 'function',
name: 'RegexProbe',
description: 'Probe regex schema handling',
parameters: {
type: 'object',
properties: {
pattern: {
type: 'string',
},
},
required: ['pattern'],
additionalProperties: false,
},
strict: true,
},
])
})
test('removes unsupported uri format from strict Responses schemas', () => {
const tools = convertToolsToResponsesTools([
{

View File

@@ -261,73 +261,6 @@ test('preserves Gemini tool call extra_content in follow-up requests', async ()
})
})
test('preserves Grep tool pattern field in OpenAI-compatible schemas', async () => {
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-grep-schema',
model: 'qwen/qwen3.6-plus',
choices: [
{
message: {
role: 'assistant',
content: 'done',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 12,
completion_tokens: 4,
total_tokens: 16,
},
}),
{
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'qwen/qwen3.6-plus',
system: 'test system',
messages: [{ role: 'user', content: 'Use Grep' }],
tools: [
{
name: 'Grep',
description: 'Search file contents',
input_schema: {
type: 'object',
properties: {
pattern: { type: 'string', description: 'Search pattern' },
path: { type: 'string' },
},
required: ['pattern'],
additionalProperties: false,
},
},
],
max_tokens: 64,
stream: false,
})
const tools = requestBody?.tools as Array<Record<string, unknown>> | undefined
const grepTool = tools?.find(tool => (tool.function as Record<string, unknown>)?.name === 'Grep') as
| { function?: { parameters?: { properties?: Record<string, unknown>; required?: string[] } } }
| undefined
expect(Object.keys(grepTool?.function?.parameters?.properties ?? {})).toContain('pattern')
expect(grepTool?.function?.parameters?.required).toContain('pattern')
})
test('does not infer Gemini mode from OPENAI_BASE_URL path substrings', async () => {
let capturedAuthorization: string | null = null

View File

@@ -195,12 +195,10 @@ function convertContentBlocks(
// handled separately
break
case 'thinking':
case 'redacted_thinking':
// Strip thinking blocks for OpenAI-compatible providers.
// These are Anthropic-specific content types that 3P providers
// don't understand. Serializing them as <thinking> text corrupts
// multi-turn context: the model sees the tags as part of its
// previous reply and may mimic or misattribute them.
// Append thinking as text with a marker for models that support reasoning
if (block.thinking) {
parts.push({ type: 'text', text: `<thinking>${block.thinking}</thinking>` })
}
break
default:
if (block.text) {

View File

@@ -1,33 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { SkillTool } from '../../tools/SkillTool/SkillTool.js'
import {
getSchemaValidationErrorOverride,
getSchemaValidationToolUseResult,
} from './toolExecution.js'
describe('getSchemaValidationErrorOverride', () => {
test('returns actionable missing-skill error for SkillTool', () => {
expect(getSchemaValidationErrorOverride(SkillTool, {})).toBe(
'Missing skill name. Pass the slash command name as the skill parameter (e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).',
)
})
test('does not override unrelated tool schema failures', () => {
expect(getSchemaValidationErrorOverride({ name: 'Read' } as never, {})).toBe(
null,
)
})
test('does not override SkillTool when skill is present', () => {
expect(
getSchemaValidationErrorOverride(SkillTool, { skill: 'commit' }),
).toBe(null)
})
test('uses the actionable override for structured toolUseResult too', () => {
expect(getSchemaValidationToolUseResult(SkillTool, {} as never)).toBe(
'InputValidationError: Missing skill name. Pass the slash command name as the skill parameter (e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).',
)
})
})

View File

@@ -43,7 +43,6 @@ import { FILE_READ_TOOL_NAME } from '../../tools/FileReadTool/prompt.js'
import { FILE_WRITE_TOOL_NAME } from '../../tools/FileWriteTool/prompt.js'
import { NOTEBOOK_EDIT_TOOL_NAME } from '../../tools/NotebookEditTool/constants.js'
import { POWERSHELL_TOOL_NAME } from '../../tools/PowerShellTool/toolName.js'
import { SKILL_TOOL_NAME } from '../../tools/SkillTool/constants.js'
import { parseGitCommitId } from '../../tools/shared/gitOperationTracking.js'
import {
isDeferredTool,
@@ -597,31 +596,6 @@ export function buildSchemaNotSentHint(
)
}
export function getSchemaValidationErrorOverride(
tool: Tool,
input: unknown,
): string | null {
if (tool.name !== SKILL_TOOL_NAME || !input || typeof input !== 'object') {
return null
}
const skill = (input as { skill?: unknown }).skill
if (skill === undefined || skill === null) {
return 'Missing skill name. Pass the slash command name as the skill parameter (e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).'
}
return null
}
export function getSchemaValidationToolUseResult(
tool: Tool,
input: unknown,
fallbackMessage?: string,
): string {
const override = getSchemaValidationErrorOverride(tool, input)
return `InputValidationError: ${override ?? fallbackMessage ?? ''}`
}
async function checkPermissionsAndCallTool(
tool: Tool,
toolUseID: string,
@@ -640,9 +614,7 @@ async function checkPermissionsAndCallTool(
// Validate input types with zod (surprisingly, the model is not great at generating valid input)
const parsedInput = tool.inputSchema.safeParse(input)
if (!parsedInput.success) {
const fallbackErrorContent = formatZodValidationError(tool.name, parsedInput.error)
let errorContent =
getSchemaValidationErrorOverride(tool, input) ?? fallbackErrorContent
let errorContent = formatZodValidationError(tool.name, parsedInput.error)
const schemaHint = buildSchemaNotSentHint(
tool,
@@ -700,11 +672,7 @@ async function checkPermissionsAndCallTool(
tool_use_id: toolUseID,
},
],
toolUseResult: getSchemaValidationToolUseResult(
tool,
input,
parsedInput.error.message,
),
toolUseResult: `InputValidationError: ${parsedInput.error.message}`,
sourceToolAssistantUUID: assistantMessage.uuid,
}),
},

View File

@@ -156,24 +156,34 @@ ${AGENT_TOOL_NAME}({
const currentExamples = `Example usage:
<example_agent_descriptions>
"claude-code-guide": use this agent when the user asks how Claude Code works or how to use its features
"statusline-setup": use this agent to configure the user's Claude Code status line setting
"test-runner": use this agent after you are done writing code to run tests
"greeting-responder": use this agent to respond to user greetings with a friendly joke
</example_agent_descriptions>
<example>
user: "How do I configure Claude Code hooks?"
user: "Please write a function that checks if a number is prime"
assistant: I'm going to use the ${FILE_WRITE_TOOL_NAME} tool to write the following code:
<code>
function isPrime(n) {
if (n <= 1) return false
for (let i = 2; i * i <= n; i++) {
if (n % i === 0) return false
}
return true
}
</code>
<commentary>
This is a Claude Code usage question, so use the claude-code-guide agent
Since a significant piece of code was written and the task was completed, now use the test-runner agent to run the tests
</commentary>
assistant: Uses the ${AGENT_TOOL_NAME} tool to launch the claude-code-guide agent
assistant: Uses the ${AGENT_TOOL_NAME} tool to launch the test-runner agent
</example>
<example>
user: "Set up my Claude Code status line"
user: "Hello"
<commentary>
This matches the statusline-setup agent, so use it to configure the setting
Since the user is greeting, use the greeting-responder agent to respond with a friendly joke
</commentary>
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the statusline-setup agent"
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the greeting-responder agent"
</example>
`

View File

@@ -1,31 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { SkillTool } from './SkillTool.js'
describe('SkillTool missing parameter handling', () => {
test('missing skill stays required at the schema level', async () => {
const parsed = SkillTool.inputSchema.safeParse({})
expect(parsed.success).toBe(false)
})
test('validateInput still returns an actionable error when called with missing skill', async () => {
const result = await SkillTool.validateInput?.({} as never, {
options: { tools: [] },
messages: [],
} as never)
expect(result).toEqual({
result: false,
message:
'Missing skill name. Pass the slash command name as the skill parameter (e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).',
errorCode: 1,
})
})
test('valid skill input still parses and validates', async () => {
const parsed = SkillTool.inputSchema.safeParse({ skill: 'commit' })
expect(parsed.success).toBe(true)
})
})

View File

@@ -352,16 +352,6 @@ export const SkillTool: Tool<InputSchema, Output, Progress> = buildTool({
toAutoClassifierInput: ({ skill }) => skill ?? '',
async validateInput({ skill }, context): Promise<ValidationResult> {
if (!skill || typeof skill !== 'string') {
return {
result: false,
message:
'Missing skill name. Pass the slash command name as the skill parameter ' +
'(e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).',
errorCode: 1,
}
}
// Skills are just skill names, no arguments
const trimmed = skill.trim()
if (!trimmed) {
@@ -444,7 +434,7 @@ export const SkillTool: Tool<InputSchema, Output, Progress> = buildTool({
context,
): Promise<PermissionDecision> {
// Skills are just skill names, no arguments
const trimmed = skill ?? ''
const trimmed = skill.trim()
// Remove leading slash if present (for compatibility)
const commandName = trimmed.startsWith('/') ? trimmed.substring(1) : trimmed
@@ -602,7 +592,7 @@ export const SkillTool: Tool<InputSchema, Output, Progress> = buildTool({
// - Skill is a prompt-based skill
// Skills are just names, with optional arguments
const trimmed = skill ?? ''
const trimmed = skill.trim()
// Remove leading slash if present (for compatibility)
const commandName = trimmed.startsWith('/') ? trimmed.substring(1) : trimmed

View File

@@ -1,7 +1,6 @@
import { expect, test } from 'bun:test'
import { z } from 'zod/v4'
import { getEmptyToolPermissionContext, type Tool, type Tools } from '../Tool.js'
import { SkillTool } from '../tools/SkillTool/SkillTool.js'
import { toolToAPISchema } from './api.js'
test('toolToAPISchema preserves provider-specific schema keywords in input_schema', async () => {
@@ -65,16 +64,3 @@ test('toolToAPISchema preserves provider-specific schema keywords in input_schem
},
})
})
test('toolToAPISchema keeps skill required for SkillTool', async () => {
const schema = await toolToAPISchema(SkillTool, {
getToolPermissionContext: async () => getEmptyToolPermissionContext(),
tools: [] as unknown as Tools,
agents: [],
})
expect((schema as { input_schema: unknown }).input_schema).toMatchObject({
type: 'object',
required: ['skill'],
})
})

View File

@@ -94,22 +94,3 @@ test('gpt-5.4 family keeps large max output overrides within provider limits', (
expect(getMaxOutputTokensForModel('gpt-5.4-mini')).toBe(128_000)
expect(getMaxOutputTokensForModel('gpt-5.4-nano')).toBe(128_000)
})
test('MiniMax-M2.7 uses explicit provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('MiniMax-M2.7')).toBe(204_800)
expect(getModelMaxOutputTokens('MiniMax-M2.7')).toEqual({
default: 131_072,
upperLimit: 131_072,
})
expect(getMaxOutputTokensForModel('MiniMax-M2.7')).toBe(131_072)
})
test('unknown openai-compatible models still use the conservative fallback window', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('some-unknown-3p-model')).toBe(8_000)
})

View File

@@ -72,23 +72,16 @@ export function getContextWindowForModel(
return 1_000_000
}
// OpenAI-compatible provider — use known context windows for the model.
// Unknown models get a conservative 8k default so auto-compact triggers
// before hitting a hard context_window_exceeded error.
const isOpenAIProvider =
// OpenAI-compatible provider — use known context windows for the model
if (
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
if (isOpenAIProvider) {
) {
const openaiWindow = getOpenAIContextWindow(model)
if (openaiWindow !== undefined) {
return openaiWindow
}
console.error(
`[context] Warning: model "${model}" not in context window table — using conservative 8k default. ` +
'Add it to src/utils/model/openaiContextWindows.ts for accurate compaction.',
)
return 8_000
}
const cap = getModelCapability(model)

View File

@@ -69,93 +69,3 @@ test('loadConversationForResume rejects oversized transcripts before resume hook
)
expect(hookSpy).not.toHaveBeenCalled()
})
test('deserializeMessagesWithInterruptDetection strips thinking blocks only for OpenAI-compatible providers', async () => {
const serializedMessages = [
user(id(10), 'hello'),
{
type: 'assistant',
uuid: id(11),
parentUuid: id(10),
timestamp: ts,
cwd: '/tmp',
sessionId,
version: 'test',
message: {
role: 'assistant',
content: [
{ type: 'thinking', thinking: 'secret reasoning' },
{ type: 'text', text: 'visible reply' },
],
},
},
{
type: 'assistant',
uuid: id(12),
parentUuid: id(11),
timestamp: ts,
cwd: '/tmp',
sessionId,
version: 'test',
message: {
role: 'assistant',
content: [{ type: 'thinking', thinking: 'only hidden reasoning' }],
},
},
user(id(13), 'follow up'),
]
mock.module('./model/providers.js', () => ({
getAPIProvider: () => 'openai',
isOpenAICompatibleProvider: (provider: string) =>
provider === 'openai' ||
provider === 'gemini' ||
provider === 'github' ||
provider === 'codex',
}))
const openaiModule = await import(`./conversationRecovery.ts?provider=openai-${Date.now()}`)
const thirdParty = openaiModule.deserializeMessagesWithInterruptDetection(serializedMessages as never[])
const thirdPartyAssistantMessages = thirdParty.messages.filter(
message => message.type === 'assistant',
)
expect(thirdPartyAssistantMessages).toHaveLength(2)
expect(thirdPartyAssistantMessages[0]?.message?.content).toEqual([
{ type: 'text', text: 'visible reply' },
])
expect(
JSON.stringify(thirdPartyAssistantMessages.map(message => message.message?.content)),
).not.toContain('secret reasoning')
expect(
JSON.stringify(thirdPartyAssistantMessages.map(message => message.message?.content)),
).not.toContain('only hidden reasoning')
mock.restore()
mock.module('./model/providers.js', () => ({
getAPIProvider: () => 'bedrock',
isOpenAICompatibleProvider: (provider: string) =>
provider === 'openai' ||
provider === 'gemini' ||
provider === 'github' ||
provider === 'codex',
}))
const bedrockModule = await import(`./conversationRecovery.ts?provider=bedrock-${Date.now()}`)
const anthropicCompatible = bedrockModule.deserializeMessagesWithInterruptDetection(serializedMessages as never[])
const anthropicAssistantMessages = anthropicCompatible.messages.filter(
message => message.type === 'assistant',
)
expect(anthropicAssistantMessages).toHaveLength(2)
expect(anthropicAssistantMessages[0]?.message?.content).toEqual([
{ type: 'thinking', thinking: 'secret reasoning' },
{ type: 'text', text: 'visible reply' },
])
expect(
JSON.stringify(anthropicAssistantMessages.map(message => message.message?.content)),
).toContain('secret reasoning')
expect(
JSON.stringify(anthropicAssistantMessages.map(message => message.message?.content)),
).not.toContain('only hidden reasoning')
})

View File

@@ -13,7 +13,6 @@ const originalSimple = process.env.CLAUDE_CODE_SIMPLE
const sessionId = '00000000-0000-4000-8000-000000001999'
const ts = '2026-04-02T00:00:00.000Z'
function id(n: number): string {
return `00000000-0000-4000-8000-${String(n).padStart(12, '0')}`
}
@@ -77,3 +76,4 @@ test('loadConversationForResume rejects oversized reconstructed transcripts', as
'Reconstructed transcript is too large to resume safely',
)
})

View File

@@ -24,7 +24,6 @@ import {
type FileHistorySnapshot,
} from './fileHistory.js'
import { logError } from './log.js'
import { getAPIProvider } from './model/providers.js'
import {
createAssistantMessage,
createUserMessage,
@@ -178,25 +177,6 @@ export type DeserializeResult = {
turnInterruptionState: TurnInterruptionState
}
/**
* Remove thinking/redacted_thinking content blocks from assistant messages.
* Messages that become empty after stripping are removed entirely.
*/
function stripThinkingBlocks(messages: NormalizedMessage[]): NormalizedMessage[] {
return messages.reduce<NormalizedMessage[]>((acc, msg) => {
if (msg.type !== 'assistant' || !Array.isArray(msg.message?.content)) {
acc.push(msg)
return acc
}
const filtered = msg.message.content.filter(
(block: { type?: string }) => block.type !== 'thinking' && block.type !== 'redacted_thinking',
)
if (filtered.length === 0) return acc
acc.push({ ...msg, message: { ...msg.message, content: filtered } })
return acc
}, [])
}
/**
* Deserializes messages from a log file into the format expected by the REPL.
* Filters unresolved tool uses, orphaned thinking messages, and appends a
@@ -247,19 +227,10 @@ export function deserializeMessagesWithInterruptDetection(
filteredToolUses,
) as NormalizedMessage[]
// Strip thinking/redacted_thinking content blocks from assistant messages
// when resuming against a 3P provider. These Anthropic-specific blocks cause
// 400 errors or context corruption on OpenAI-compatible providers (issue #248 finding 5).
const provider = getAPIProvider()
const isThirdPartyProvider = provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex' && provider !== 'foundry'
const thinkingStripped = isThirdPartyProvider
? stripThinkingBlocks(filteredThinking)
: filteredThinking
// Filter out assistant messages with only whitespace text content.
// This can happen when model outputs "\n\n" before thinking, user cancels mid-stream.
const filteredMessages = filterWhitespaceOnlyAssistantMessages(
thinkingStripped,
filteredThinking,
) as NormalizedMessage[]
const internalState = detectTurnInterruption(filteredMessages)

View File

@@ -8,6 +8,7 @@ import {
} from './managedEnvConstants.js'
import { clearMTLSCache } from './mtls.js'
import { clearProxyCache, configureGlobalAgents } from './proxy.js'
import { filterSettingsEnvForExplicitProvider } from './providerEnvSelection.js'
import { applyActiveProviderProfileFromConfig } from './providerProfiles.js'
import { isSettingSourceEnabled } from './settings/constants.js'
import {
@@ -87,7 +88,9 @@ function filterSettingsEnv(
env: Record<string, string> | undefined,
): Record<string, string> {
return withoutCcdSpawnEnvKeys(
withoutHostManagedProviderVars(withoutSSHTunnelVars(env)),
filterSettingsEnvForExplicitProvider(
withoutHostManagedProviderVars(withoutSSHTunnelVars(env)),
),
)
}

View File

@@ -44,10 +44,6 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'mistral-large-latest': 131_072,
'mistral-small-latest': 131_072,
// MiniMax
'MiniMax-M2.7': 204_800,
'minimax-m2.7': 204_800,
// Google (via OpenRouter)
'google/gemini-2.0-flash':1_048_576,
'google/gemini-2.5-pro': 1_048_576,
@@ -114,10 +110,6 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'mistral-large-latest': 32_768,
'mistral-small-latest': 32_768,
// MiniMax
'MiniMax-M2.7': 131_072,
'minimax-m2.7': 131_072,
// Google (via OpenRouter)
'google/gemini-2.0-flash': 8_192,
'google/gemini-2.5-pro': 65_536,

View File

@@ -0,0 +1,116 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test'
import { filterSettingsEnvForExplicitProvider } from './providerEnvSelection.js'
const originalEnv = { ...process.env }
const RESET_KEYS = [
'CLAUDE_CODE_EXPLICIT_PROVIDER',
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'CLAUDE_CODE_USE_FOUNDRY',
] as const
beforeEach(() => {
for (const key of RESET_KEYS) {
delete process.env[key]
}
})
afterEach(() => {
for (const key of RESET_KEYS) {
if (originalEnv[key] === undefined) delete process.env[key]
else process.env[key] = originalEnv[key]
}
})
describe('filterSettingsEnvForExplicitProvider', () => {
test('does not treat plain provider flags as an explicit CLI override', () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1'
expect(
filterSettingsEnvForExplicitProvider({
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'gpt-4o',
OTHER: 'keep-me',
}),
).toEqual({
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'gpt-4o',
OTHER: 'keep-me',
})
})
test('strips settings-sourced provider flags when CLI provider is explicit', () => {
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'openai'
expect(
filterSettingsEnvForExplicitProvider({
CLAUDE_CODE_USE_GITHUB: '1',
CLAUDE_CODE_USE_OPENAI: '1',
OTHER: 'keep-me',
}),
).toEqual({ OTHER: 'keep-me' })
})
test('strips a stale GitHub model when explicit provider is not github', () => {
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'openai'
expect(
filterSettingsEnvForExplicitProvider({
OPENAI_MODEL: 'github:copilot',
OTHER: 'keep-me',
}),
).toEqual({ OTHER: 'keep-me' })
})
test('keeps a normal OpenAI model when explicit provider is openai', () => {
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'openai'
expect(
filterSettingsEnvForExplicitProvider({
OPENAI_MODEL: 'gpt-4o',
OTHER: 'keep-me',
}),
).toEqual({ OPENAI_MODEL: 'gpt-4o', OTHER: 'keep-me' })
})
test('strips a non-GitHub OpenAI model when explicit provider is github', () => {
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'github'
expect(
filterSettingsEnvForExplicitProvider({
OPENAI_MODEL: 'gpt-4o',
OTHER: 'keep-me',
}),
).toEqual({ OTHER: 'keep-me' })
})
test('preserves anthropic startup intent by stripping stale GitHub/OpenAI settings', () => {
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'anthropic'
expect(
filterSettingsEnvForExplicitProvider({
CLAUDE_CODE_USE_GITHUB: '1',
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'github:copilot',
OTHER: 'keep-me',
}),
).toEqual({ OTHER: 'keep-me' })
})
test('preserves explicit ollama startup intent by stripping OpenAI routing settings', () => {
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'ollama'
expect(
filterSettingsEnvForExplicitProvider({
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_API_KEY: 'sk-test',
OTHER: 'keep-me',
}),
).toEqual({ OTHER: 'keep-me' })
})
})

View File

@@ -0,0 +1,63 @@
export const EXPLICIT_PROVIDER_ENV_VAR = 'CLAUDE_CODE_EXPLICIT_PROVIDER'
const PROVIDER_FLAG_KEYS = [
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'CLAUDE_CODE_USE_FOUNDRY',
] as const
export function clearProviderSelectionFlags(
env: NodeJS.ProcessEnv = process.env,
): void {
for (const key of PROVIDER_FLAG_KEYS) {
delete env[key]
}
}
function getExplicitProvider(processEnv: NodeJS.ProcessEnv): string | undefined {
return processEnv[EXPLICIT_PROVIDER_ENV_VAR]?.trim() || undefined
}
function isGithubModel(model: string | undefined): boolean {
return (model ?? '').trim().toLowerCase().startsWith('github:')
}
export function filterSettingsEnvForExplicitProvider(
env: Record<string, string> | undefined,
processEnv: NodeJS.ProcessEnv = process.env,
): Record<string, string> {
if (!env) return {}
const explicitProvider = getExplicitProvider(processEnv)
if (!explicitProvider) {
return env
}
const filtered = { ...env }
for (const key of PROVIDER_FLAG_KEYS) {
delete filtered[key]
}
if (explicitProvider === 'ollama') {
delete filtered.OPENAI_BASE_URL
delete filtered.OPENAI_MODEL
delete filtered.OPENAI_API_KEY
return filtered
}
if (explicitProvider === 'github') {
if (!isGithubModel(filtered.OPENAI_MODEL)) {
delete filtered.OPENAI_MODEL
}
return filtered
}
if (isGithubModel(filtered.OPENAI_MODEL)) {
delete filtered.OPENAI_MODEL
}
return filtered
}

View File

@@ -9,11 +9,13 @@ import {
const originalEnv = { ...process.env }
const RESET_KEYS = [
'CLAUDE_CODE_EXPLICIT_PROVIDER',
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'CLAUDE_CODE_USE_FOUNDRY',
'OPENAI_BASE_URL',
'OPENAI_API_KEY',
'OPENAI_MODEL',
@@ -83,6 +85,16 @@ describe('applyProviderFlag - openai', () => {
applyProviderFlag('openai', ['--model', 'gpt-4o'])
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
})
test('clears a previously persisted GitHub flag', () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1'
const result = applyProviderFlag('openai', [])
expect(result.error).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
})
})
describe('applyProviderFlag - gemini', () => {
@@ -104,6 +116,16 @@ describe('applyProviderFlag - github', () => {
expect(result.error).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBe('1')
})
test('clears a previously set OpenAI flag', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
const result = applyProviderFlag('github', [])
expect(result.error).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBe('1')
})
})
describe('applyProviderFlag - bedrock', () => {
@@ -151,6 +173,19 @@ describe('applyProviderFlag - invalid provider', () => {
})
})
describe('applyProviderFlag - anthropic', () => {
test('clears third-party provider flags', () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
const result = applyProviderFlag('anthropic', [])
expect(result.error).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
})
})
describe('applyProviderFlagFromArgs', () => {
test('applies ollama provider and model from argv in one step', () => {
const result = applyProviderFlagFromArgs([

View File

@@ -1,3 +1,8 @@
import {
clearProviderSelectionFlags,
EXPLICIT_PROVIDER_ENV_VAR,
} from './providerEnvSelection.js'
/**
* --provider CLI flag support.
*
@@ -77,6 +82,9 @@ export function applyProviderFlag(
}
}
clearProviderSelectionFlags()
process.env[EXPLICIT_PROVIDER_ENV_VAR] = provider
const model = parseModelFlag(args)
switch (provider as ProviderFlagName) {

View File

@@ -485,6 +485,26 @@ test('buildStartupEnvFromProfile leaves explicit provider selections untouched',
assert.equal(env.OPENAI_API_KEY, undefined)
})
test('buildStartupEnvFromProfile preserves explicit anthropic startup selection', async () => {
const processEnv = {
CLAUDE_CODE_EXPLICIT_PROVIDER: 'anthropic',
}
const env = await buildStartupEnvFromProfile({
persisted: profile('openai', {
CLAUDE_CODE_USE_GITHUB: '1',
OPENAI_MODEL: 'github:copilot',
}),
processEnv,
})
assert.equal(env, processEnv)
assert.equal(env.CLAUDE_CODE_EXPLICIT_PROVIDER, 'anthropic')
assert.equal(env.CLAUDE_CODE_USE_OPENAI, undefined)
assert.equal(env.CLAUDE_CODE_USE_GITHUB, undefined)
assert.equal(env.OPENAI_MODEL, undefined)
})
test('buildStartupEnvFromProfile leaves profile-managed env untouched', async () => {
const processEnv = {
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',

View File

@@ -412,6 +412,10 @@ export function hasExplicitProviderSelection(
return true
}
if (processEnv.CLAUDE_CODE_EXPLICIT_PROVIDER?.trim()) {
return true
}
return (
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||

View File

@@ -9,6 +9,7 @@ async function importFreshProvidersModule() {
const originalEnv = { ...process.env }
const RESTORED_KEYS = [
'CLAUDE_CODE_EXPLICIT_PROVIDER',
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED',
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID',
'CLAUDE_CODE_USE_OPENAI',
@@ -142,6 +143,29 @@ describe('applyProviderProfileToProcessEnv', () => {
})
describe('applyActiveProviderProfileFromConfig', () => {
test('does not override explicit anthropic startup selection', async () => {
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderProfileModules()
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'anthropic'
const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [
buildProfile({
id: 'saved_github',
baseUrl: 'https://api.githubcopilot.com',
model: 'github:copilot',
}),
],
activeProviderProfileId: 'saved_github',
} as any)
expect(applied).toBeUndefined()
expect(process.env.CLAUDE_CODE_EXPLICIT_PROVIDER).toBe('anthropic')
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
expect(process.env.OPENAI_MODEL).toBeUndefined()
})
test('does not override explicit startup provider selection', async () => {
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderProfileModules()

View File

@@ -5,6 +5,7 @@ import {
type ProviderProfile,
} from './config.js'
import type { ModelOption } from './model/modelOptions.js'
import { EXPLICIT_PROVIDER_ENV_VAR } from './providerEnvSelection.js'
export type ProviderPreset =
| 'anthropic'
@@ -256,6 +257,7 @@ function hasProviderSelectionFlags(
processEnv: NodeJS.ProcessEnv = process.env,
): boolean {
return (
processEnv[EXPLICIT_PROVIDER_ENV_VAR] !== undefined ||
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||

View File

@@ -1,52 +1,11 @@
import { expect, test } from 'bun:test'
import path from 'path'
import { resolveRipgrepConfig, wrapRipgrepUnavailableError } from './ripgrep.js'
const MOCK_BUILTIN_PATH = path.normalize(
process.platform === 'win32'
? `vendor/ripgrep/${process.arch}-win32/rg.exe`
: `vendor/ripgrep/${process.arch}-${process.platform}/rg`,
)
test('ripgrepCommand falls back to system rg when builtin binary is missing', () => {
const config = resolveRipgrepConfig({
userWantsSystemRipgrep: false,
bundledMode: false,
builtinCommand: MOCK_BUILTIN_PATH,
builtinExists: false,
systemExecutablePath: '/usr/bin/rg',
processExecPath: '/fake/bun',
})
expect(config).toMatchObject({
mode: 'system',
command: 'rg',
args: [],
})
})
test('ripgrepCommand keeps builtin mode when bundled binary exists', () => {
const config = resolveRipgrepConfig({
userWantsSystemRipgrep: false,
bundledMode: false,
builtinCommand: MOCK_BUILTIN_PATH,
builtinExists: true,
systemExecutablePath: '/usr/bin/rg',
processExecPath: '/fake/bun',
})
expect(config).toMatchObject({
mode: 'builtin',
command: MOCK_BUILTIN_PATH,
args: [],
})
})
import { wrapRipgrepUnavailableError } from './ripgrep.ts'
test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
const error = wrapRipgrepUnavailableError(
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
{ mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe', args: [] },
{ mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe' },
'win32',
)
@@ -59,7 +18,7 @@ test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
test('wrapRipgrepUnavailableError explains missing system ripgrep', () => {
const error = wrapRipgrepUnavailableError(
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
{ mode: 'system', command: 'rg', args: [] },
{ mode: 'system', command: 'rg' },
'linux',
)

View File

@@ -1,6 +1,5 @@
import type { ChildProcess, ExecFileException } from 'child_process'
import { execFile, spawn } from 'child_process'
import { existsSync } from 'fs'
import memoize from 'lodash-es/memoize.js'
import { homedir } from 'os'
import * as path from 'path'
@@ -31,72 +30,40 @@ type RipgrepConfig = {
type RipgrepErrorLike = Pick<NodeJS.ErrnoException, 'code' | 'message'>
function isErrnoException(error: unknown): error is NodeJS.ErrnoException {
return error instanceof Error
}
const getRipgrepConfig = memoize((): RipgrepConfig => {
const userWantsSystemRipgrep = isEnvDefinedFalsy(
process.env.USE_BUILTIN_RIPGREP,
)
type ResolveRipgrepConfigArgs = {
userWantsSystemRipgrep: boolean
bundledMode: boolean
builtinCommand: string
builtinExists: boolean
systemExecutablePath: string
processExecPath?: string
}
export function resolveRipgrepConfig({
userWantsSystemRipgrep,
bundledMode,
builtinCommand,
builtinExists,
systemExecutablePath,
processExecPath = process.execPath,
}: ResolveRipgrepConfigArgs): RipgrepConfig {
if (userWantsSystemRipgrep && systemExecutablePath !== 'rg') {
// SECURITY: Use command name 'rg' instead of systemExecutablePath to prevent PATH hijacking
return { mode: 'system', command: 'rg', args: [] }
// Try system ripgrep if user wants it
if (userWantsSystemRipgrep) {
const { cmd: systemPath } = findExecutable('rg', [])
if (systemPath !== 'rg') {
// SECURITY: Use command name 'rg' instead of systemPath to prevent PATH hijacking
// If we used systemPath, a malicious ./rg.exe in current directory could be executed
// Using just 'rg' lets the OS resolve it safely with NoDefaultCurrentDirectoryInExePath protection
return { mode: 'system', command: 'rg', args: [] }
}
}
if (bundledMode) {
// In bundled (native) mode, ripgrep is statically compiled into bun-internal
// and dispatches based on argv[0]. We spawn ourselves with argv0='rg'.
if (isInBundledMode()) {
return {
mode: 'embedded',
command: processExecPath,
command: process.execPath,
args: ['--no-config'],
argv0: 'rg',
}
}
if (builtinExists) {
return { mode: 'builtin', command: builtinCommand, args: [] }
}
if (systemExecutablePath !== 'rg') {
return { mode: 'system', command: 'rg', args: [] }
}
return { mode: 'builtin', command: builtinCommand, args: [] }
}
const getRipgrepConfig = memoize((): RipgrepConfig => {
const userWantsSystemRipgrep = isEnvDefinedFalsy(
process.env.USE_BUILTIN_RIPGREP,
)
const bundledMode = isInBundledMode()
const rgRoot = path.resolve(__dirname, 'vendor', 'ripgrep')
const builtinCommand =
const command =
process.platform === 'win32'
? path.resolve(rgRoot, `${process.arch}-win32`, 'rg.exe')
: path.resolve(rgRoot, `${process.arch}-${process.platform}`, 'rg')
const builtinExists = existsSync(builtinCommand)
const { cmd: systemExecutablePath } = findExecutable('rg', [])
return resolveRipgrepConfig({
userWantsSystemRipgrep,
bundledMode,
builtinCommand,
builtinExists,
systemExecutablePath,
})
return { mode: 'builtin', command, args: [] }
})
export function ripgrepCommand(): {
@@ -357,9 +324,7 @@ async function ripGrepFileCount(
if (settled) return
settled = true
reject(
isErrnoException(err) && err.code === 'ENOENT'
? wrapRipgrepUnavailableError(err)
: err,
err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err,
)
})
})
@@ -423,9 +388,7 @@ export async function ripGrepStream(
if (settled) return
settled = true
reject(
isErrnoException(err) && err.code === 'ENOENT'
? wrapRipgrepUnavailableError(err)
: err,
err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err,
)
})
})
@@ -473,9 +436,7 @@ export async function ripGrep(
const CRITICAL_ERROR_CODES = ['ENOENT', 'EACCES', 'EPERM']
if (CRITICAL_ERROR_CODES.includes(error.code as string)) {
reject(
isErrnoException(error) && error.code === 'ENOENT'
? wrapRipgrepUnavailableError(error)
: error,
error.code === 'ENOENT' ? wrapRipgrepUnavailableError(error) : error,
)
return
}

View File

@@ -1,68 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { sanitizeSchemaForOpenAICompat } from './schemaSanitizer'
describe('sanitizeSchemaForOpenAICompat', () => {
test('preserves Grep-like properties.pattern while keeping it required', () => {
const schema = {
type: 'object',
properties: {
pattern: {
type: 'string',
description: 'The regular expression pattern to search for in file contents',
},
path: { type: 'string' },
glob: { type: 'string' },
},
required: ['pattern'],
}
const sanitized = sanitizeSchemaForOpenAICompat(schema)
const properties = sanitized.properties as Record<string, unknown> | undefined
expect(Object.keys(properties ?? {})).toEqual(['pattern', 'path', 'glob'])
expect(properties?.pattern).toEqual({
type: 'string',
description: 'The regular expression pattern to search for in file contents',
})
expect(sanitized.required).toEqual(['pattern'])
})
test('preserves Glob-like properties.pattern while keeping it required', () => {
const schema = {
type: 'object',
properties: {
pattern: {
type: 'string',
description: 'The glob pattern to match files against',
},
path: { type: 'string' },
},
required: ['pattern'],
}
const sanitized = sanitizeSchemaForOpenAICompat(schema)
const properties = sanitized.properties as Record<string, unknown> | undefined
expect(Object.keys(properties ?? {})).toEqual(['pattern', 'path'])
expect(properties?.pattern).toEqual({
type: 'string',
description: 'The glob pattern to match files against',
})
expect(sanitized.required).toEqual(['pattern'])
})
test('strips JSON Schema validator pattern from string schemas', () => {
const schema = {
type: 'string',
pattern: '^[a-z]+$',
minLength: 1,
}
const sanitized = sanitizeSchemaForOpenAICompat(schema)
expect(sanitized).toEqual({
type: 'string',
})
})
})

View File

@@ -33,15 +33,6 @@ function stripSchemaKeywords(schema: unknown, keywords: Set<string>): unknown {
const result: Record<string, unknown> = {}
for (const [key, value] of Object.entries(schema)) {
if (key === 'properties' && isSchemaRecord(value)) {
const sanitizedProps: Record<string, unknown> = {}
for (const [propName, propSchema] of Object.entries(value)) {
sanitizedProps[propName] = stripSchemaKeywords(propSchema, keywords)
}
result[key] = sanitizedProps
continue
}
if (keywords.has(key)) {
continue
}
@@ -224,13 +215,10 @@ export function sanitizeSchemaForOpenAICompat(
}
}
const properties = isSchemaRecord(record.properties)
? record.properties
: undefined
if (Array.isArray(record.required) && properties) {
if (Array.isArray(record.required) && isSchemaRecord(record.properties)) {
record.required = record.required.filter(
(value): value is string => typeof value === 'string' && value in properties,
(value): value is string =>
typeof value === 'string' && value in record.properties,
)
}