Compare commits
10 Commits
fix/383-ba
...
fix/provid
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b5dbb71a44 | ||
|
|
b2cabdd950 | ||
|
|
139610950c | ||
|
|
65dd19cf87 | ||
|
|
e365cb4010 | ||
|
|
52d33a87a0 | ||
|
|
b4bd95b477 | ||
|
|
1e057025d6 | ||
|
|
aff2bd87e4 | ||
|
|
72e6a945fe |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -10,3 +10,4 @@ GEMINI.md
|
||||
package-lock.json
|
||||
/.claude
|
||||
coverage/
|
||||
.worktrees/
|
||||
|
||||
@@ -52,7 +52,11 @@ async function renderFinalFrame(node: React.ReactNode): Promise<string> {
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
await instance.waitUntilExit()
|
||||
// Timeout guard: if render throws before exit effect fires, don't hang
|
||||
await Promise.race([
|
||||
instance.waitUntilExit(),
|
||||
new Promise<void>(resolve => setTimeout(resolve, 3000)),
|
||||
])
|
||||
return stripAnsi(extractLastFrame(getOutput()))
|
||||
}
|
||||
|
||||
|
||||
305
src/components/ProviderManager.test.tsx
Normal file
305
src/components/ProviderManager.test.tsx
Normal file
@@ -0,0 +1,305 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
const ORIGINAL_ENV = {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
}
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
options?: { timeoutMs?: number; intervalMs?: number },
|
||||
): Promise<void> {
|
||||
const timeoutMs = options?.timeoutMs ?? 2000
|
||||
const intervalMs = options?.intervalMs ?? 10
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
await Bun.sleep(intervalMs)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for ProviderManager test condition')
|
||||
}
|
||||
|
||||
function createDeferred<T>(): {
|
||||
promise: Promise<T>
|
||||
resolve: (value: T) => void
|
||||
} {
|
||||
let resolve!: (value: T) => void
|
||||
const promise = new Promise<T>(r => {
|
||||
resolve = r
|
||||
})
|
||||
return { promise, resolve }
|
||||
}
|
||||
|
||||
function mockProviderProfilesModule(): void {
|
||||
mock.module('../utils/providerProfiles.js', () => ({
|
||||
addProviderProfile: () => null,
|
||||
applyActiveProviderProfileFromConfig: () => {},
|
||||
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
|
||||
getActiveProviderProfile: () => null,
|
||||
getProviderPresetDefaults: () => ({
|
||||
provider: 'openai',
|
||||
name: 'Mock provider',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'mock-model',
|
||||
apiKey: '',
|
||||
}),
|
||||
getProviderProfiles: () => [],
|
||||
setActiveProviderProfile: () => null,
|
||||
updateProviderProfile: () => null,
|
||||
}))
|
||||
}
|
||||
|
||||
function mockProviderManagerDependencies(
|
||||
syncRead: () => string | undefined,
|
||||
asyncRead: () => Promise<string | undefined>,
|
||||
): void {
|
||||
mockProviderProfilesModule()
|
||||
|
||||
mock.module('../utils/githubModelsCredentials.js', () => ({
|
||||
clearGithubModelsToken: () => ({ success: true }),
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER: 'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED',
|
||||
hydrateGithubModelsTokenFromSecureStorage: () => {},
|
||||
readGithubModelsToken: syncRead,
|
||||
readGithubModelsTokenAsync: asyncRead,
|
||||
}))
|
||||
|
||||
mock.module('../utils/settings/settings.js', () => ({
|
||||
updateSettingsForSource: () => ({ error: null }),
|
||||
}))
|
||||
}
|
||||
|
||||
async function waitForFrameOutput(
|
||||
getOutput: () => string,
|
||||
predicate: (output: string) => boolean,
|
||||
timeoutMs = 2500,
|
||||
): Promise<string> {
|
||||
let output = ''
|
||||
|
||||
await waitForCondition(() => {
|
||||
output = stripAnsi(extractLastFrame(getOutput()))
|
||||
return predicate(output)
|
||||
}, { timeoutMs })
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
async function mountProviderManager(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: () => void
|
||||
}>,
|
||||
): Promise<{
|
||||
getOutput: () => string
|
||||
dispose: () => Promise<void>
|
||||
}> {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<ProviderManager
|
||||
mode="manage"
|
||||
onDone={() => {}}
|
||||
/>
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
return {
|
||||
getOutput,
|
||||
dispose: async () => {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(0)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function renderProviderManagerFrame(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: () => void
|
||||
}>,
|
||||
options?: {
|
||||
waitForOutput?: (output: string) => boolean
|
||||
timeoutMs?: number
|
||||
},
|
||||
): Promise<string> {
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
const output = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => {
|
||||
if (!options?.waitForOutput) {
|
||||
return frame.includes('Provider manager')
|
||||
}
|
||||
return options.waitForOutput(frame)
|
||||
},
|
||||
options?.timeoutMs ?? 2500,
|
||||
)
|
||||
|
||||
await mounted.dispose()
|
||||
return output
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
|
||||
for (const [key, value] of Object.entries(ORIGINAL_ENV)) {
|
||||
if (value === undefined) {
|
||||
delete process.env[key as keyof typeof ORIGINAL_ENV]
|
||||
} else {
|
||||
process.env[key as keyof typeof ORIGINAL_ENV] = value
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
test('ProviderManager resolves GitHub virtual provider from async storage without sync reads in render flow', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const syncRead = mock(() => {
|
||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const asyncRead = mock(async () => 'stored-token')
|
||||
|
||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const output = await renderProviderManagerFrame(ProviderManager, {
|
||||
waitForOutput: frame =>
|
||||
frame.includes('Provider manager') &&
|
||||
frame.includes('GitHub Models') &&
|
||||
frame.includes('token stored'),
|
||||
})
|
||||
|
||||
expect(output).toContain('Provider manager')
|
||||
expect(output).toContain('GitHub Models')
|
||||
expect(output).toContain('token stored')
|
||||
expect(output).not.toContain('No provider profiles configured yet.')
|
||||
|
||||
expect(syncRead).not.toHaveBeenCalled()
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const syncRead = mock(() => {
|
||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const deferredStoredToken = createDeferred<string | undefined>()
|
||||
const asyncRead = mock(async () => deferredStoredToken.promise)
|
||||
|
||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
|
||||
const firstFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Provider manager'),
|
||||
)
|
||||
|
||||
expect(firstFrame).toContain('Checking GitHub Models credentials...')
|
||||
expect(firstFrame).not.toContain('No provider profiles configured yet.')
|
||||
|
||||
deferredStoredToken.resolve('stored-token')
|
||||
|
||||
const resolvedFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('GitHub Models') && frame.includes('token stored'),
|
||||
)
|
||||
|
||||
expect(resolvedFrame).toContain('GitHub Models')
|
||||
expect(resolvedFrame).toContain('token stored')
|
||||
|
||||
await mounted.dispose()
|
||||
|
||||
expect(syncRead).not.toHaveBeenCalled()
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
@@ -20,6 +20,7 @@ import {
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER,
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
readGithubModelsToken,
|
||||
readGithubModelsTokenAsync,
|
||||
} from '../utils/githubModelsCredentials.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||
@@ -118,25 +119,38 @@ function profileSummary(profile: ProviderProfile, isActive: boolean): string {
|
||||
return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}`
|
||||
}
|
||||
|
||||
function getGithubCredentialSource(
|
||||
function getGithubCredentialSourceFromEnv(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): GithubCredentialSource {
|
||||
if (readGithubModelsToken()?.trim()) {
|
||||
return 'stored'
|
||||
}
|
||||
if (processEnv.GITHUB_TOKEN?.trim() || processEnv.GH_TOKEN?.trim()) {
|
||||
return 'env'
|
||||
}
|
||||
return 'none'
|
||||
}
|
||||
|
||||
async function resolveGithubCredentialSource(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): Promise<GithubCredentialSource> {
|
||||
const envSource = getGithubCredentialSourceFromEnv(processEnv)
|
||||
if (envSource !== 'none') {
|
||||
return envSource
|
||||
}
|
||||
|
||||
if (await readGithubModelsTokenAsync()) {
|
||||
return 'stored'
|
||||
}
|
||||
|
||||
return 'none'
|
||||
}
|
||||
|
||||
function isGithubProviderAvailable(
|
||||
credentialSource: GithubCredentialSource,
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): boolean {
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return true
|
||||
}
|
||||
return getGithubCredentialSource(processEnv) !== 'none'
|
||||
return credentialSource !== 'none'
|
||||
}
|
||||
|
||||
function getGithubProviderModel(
|
||||
@@ -164,19 +178,24 @@ function getGithubProviderSummary(
|
||||
}
|
||||
|
||||
export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const initialGithubCredentialSource = getGithubCredentialSourceFromEnv()
|
||||
const initialIsGithubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const initialHasGithubCredential = initialGithubCredentialSource !== 'none'
|
||||
|
||||
const [profiles, setProfiles] = React.useState(() => getProviderProfiles())
|
||||
const [activeProfileId, setActiveProfileId] = React.useState(
|
||||
() => getActiveProviderProfile()?.id,
|
||||
)
|
||||
const [githubProviderAvailable, setGithubProviderAvailable] = React.useState(() =>
|
||||
isGithubProviderAvailable(),
|
||||
const [githubProviderAvailable, setGithubProviderAvailable] = React.useState(
|
||||
() => isGithubProviderAvailable(initialGithubCredentialSource),
|
||||
)
|
||||
const [githubCredentialSource, setGithubCredentialSource] = React.useState<GithubCredentialSource>(
|
||||
() => getGithubCredentialSource(),
|
||||
)
|
||||
const [isGithubActive, setIsGithubActive] = React.useState(() =>
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB),
|
||||
() => initialGithubCredentialSource,
|
||||
)
|
||||
const [isGithubActive, setIsGithubActive] = React.useState(() => initialIsGithubActive)
|
||||
const [isGithubCredentialSourceResolved, setIsGithubCredentialSourceResolved] =
|
||||
React.useState(() => initialHasGithubCredential || initialIsGithubActive)
|
||||
const githubRefreshEpochRef = React.useRef(0)
|
||||
const [screen, setScreen] = React.useState<Screen>(
|
||||
mode === 'first-run' ? 'select-preset' : 'menu',
|
||||
)
|
||||
@@ -196,13 +215,48 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const currentStepKey = currentStep.key
|
||||
const currentValue = draft[currentStepKey]
|
||||
|
||||
const refreshGithubProviderState = React.useCallback((): void => {
|
||||
const envCredentialSource = getGithubCredentialSourceFromEnv()
|
||||
const githubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const canResolveFromEnv = githubActive || envCredentialSource !== 'none'
|
||||
|
||||
if (canResolveFromEnv) {
|
||||
githubRefreshEpochRef.current += 1
|
||||
setGithubCredentialSource(envCredentialSource)
|
||||
setGithubProviderAvailable(isGithubProviderAvailable(envCredentialSource))
|
||||
setIsGithubActive(githubActive)
|
||||
setIsGithubCredentialSourceResolved(true)
|
||||
return
|
||||
}
|
||||
|
||||
setIsGithubCredentialSourceResolved(false)
|
||||
const refreshEpoch = ++githubRefreshEpochRef.current
|
||||
void (async () => {
|
||||
const credentialSource = await resolveGithubCredentialSource()
|
||||
if (refreshEpoch !== githubRefreshEpochRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
setGithubCredentialSource(credentialSource)
|
||||
setGithubProviderAvailable(isGithubProviderAvailable(credentialSource))
|
||||
setIsGithubActive(isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB))
|
||||
setIsGithubCredentialSourceResolved(true)
|
||||
})()
|
||||
}, [])
|
||||
|
||||
React.useEffect(() => {
|
||||
refreshGithubProviderState()
|
||||
|
||||
return () => {
|
||||
githubRefreshEpochRef.current += 1
|
||||
}
|
||||
}, [refreshGithubProviderState])
|
||||
|
||||
function refreshProfiles(): void {
|
||||
const nextProfiles = getProviderProfiles()
|
||||
setProfiles(nextProfiles)
|
||||
setActiveProfileId(getActiveProviderProfile()?.id)
|
||||
setGithubProviderAvailable(isGithubProviderAvailable())
|
||||
setGithubCredentialSource(getGithubCredentialSource())
|
||||
setIsGithubActive(isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB))
|
||||
refreshGithubProviderState()
|
||||
}
|
||||
|
||||
function clearStartupProviderOverrideFromUserSettings(): string | null {
|
||||
@@ -640,7 +694,11 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
{statusMessage && <Text>{statusMessage}</Text>}
|
||||
<Box flexDirection="column">
|
||||
{profiles.length === 0 && !githubProviderAvailable ? (
|
||||
<Text dimColor>No provider profiles configured yet.</Text>
|
||||
isGithubCredentialSourceResolved ? (
|
||||
<Text dimColor>No provider profiles configured yet.</Text>
|
||||
) : (
|
||||
<Text dimColor>Checking GitHub Models credentials...</Text>
|
||||
)
|
||||
) : (
|
||||
<>
|
||||
{profiles.map(profile => (
|
||||
|
||||
@@ -40,7 +40,7 @@ export class GrpcServer {
|
||||
grpc.ServerCredentials.createInsecure(),
|
||||
(error, boundPort) => {
|
||||
if (error) {
|
||||
console.error('Failed to start gRPC server', error)
|
||||
console.error('Failed to start gRPC server')
|
||||
return
|
||||
}
|
||||
console.log(`gRPC Server running at ${host}:${boundPort}`)
|
||||
@@ -225,7 +225,7 @@ export class GrpcServer {
|
||||
call.end()
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error("Error processing stream:", err)
|
||||
console.error('Error processing stream')
|
||||
call.write({
|
||||
error: {
|
||||
message: err.message || "Internal server error",
|
||||
|
||||
@@ -366,14 +366,12 @@ const reconciler = createReconciler<
|
||||
createTextInstance(
|
||||
text: string,
|
||||
_root: DOMElement,
|
||||
hostContext: HostContext,
|
||||
_hostContext: HostContext,
|
||||
): TextNode {
|
||||
if (!hostContext.isInsideText) {
|
||||
throw new Error(
|
||||
`Text string "${text}" must be rendered inside <Text> component`,
|
||||
)
|
||||
}
|
||||
|
||||
// react-compiler memoization can reuse cached <Text> elements without
|
||||
// re-traversing getChildHostContext, so hostContext.isInsideText may be
|
||||
// stale. Always create the text node — Ink will render it correctly
|
||||
// regardless of the context tracking state.
|
||||
return createTextNode(text)
|
||||
},
|
||||
resetTextContent() {},
|
||||
|
||||
@@ -27,6 +27,21 @@ async function flushClipboardCopy(): Promise<void> {
|
||||
await new Promise(resolve => setTimeout(resolve, 0))
|
||||
}
|
||||
|
||||
async function waitForExecCall(
|
||||
command: string,
|
||||
attempts = 20,
|
||||
): Promise<(typeof execFileNoThrowMock.mock.calls)[number] | undefined> {
|
||||
for (let attempt = 0; attempt < attempts; attempt++) {
|
||||
const call = execFileNoThrowMock.mock.calls.find(([cmd]) => cmd === command)
|
||||
if (call) {
|
||||
return call
|
||||
}
|
||||
await flushClipboardCopy()
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
describe('Windows clipboard fallback', () => {
|
||||
beforeEach(() => {
|
||||
execFileNoThrowMock.mockClear()
|
||||
@@ -62,9 +77,7 @@ describe('Windows clipboard fallback', () => {
|
||||
await setClipboard('Привет мир')
|
||||
await flushClipboardCopy()
|
||||
|
||||
const windowsCall = execFileNoThrowMock.mock.calls.find(
|
||||
([cmd]) => cmd === 'powershell',
|
||||
)
|
||||
const windowsCall = await waitForExecCall('powershell')
|
||||
|
||||
expect(windowsCall?.[2]).toMatchObject({
|
||||
stdin: 'ignore',
|
||||
|
||||
@@ -238,6 +238,7 @@ import { usePromptsFromClaudeInChrome } from 'src/hooks/usePromptsFromClaudeInCh
|
||||
import { getTipToShowOnSpinner, recordShownTip } from 'src/services/tips/tipScheduler.js';
|
||||
import type { Theme } from 'src/utils/theme.js';
|
||||
import { isPromptTypingSuppressionActive } from './replInputSuppression.js';
|
||||
import { shouldStartStartupChecks } from './replStartupGates.js';
|
||||
import { checkAndDisableBypassPermissionsIfNeeded, checkAndDisableAutoModeIfNeeded, useKickOffCheckAndDisableBypassPermissionsIfNeeded, useKickOffCheckAndDisableAutoModeIfNeeded } from 'src/utils/permissions/bypassPermissionsKillswitch.js';
|
||||
import { SandboxManager } from 'src/utils/sandbox/sandbox-adapter.js';
|
||||
import { SANDBOX_NETWORK_ACCESS_TOOL_NAME } from 'src/cli/structuredIO.js';
|
||||
@@ -784,19 +785,6 @@ export function REPL({
|
||||
});
|
||||
const tasksV2 = useTasksV2WithCollapseEffect();
|
||||
|
||||
// Start background plugin installations
|
||||
|
||||
// SECURITY: This code is guaranteed to run ONLY after the "trust this folder" dialog
|
||||
// has been confirmed by the user. The trust dialog is shown in cli.tsx (line ~387)
|
||||
// before the REPL component is rendered. The dialog blocks execution until the user
|
||||
// accepts, and only then is the REPL component mounted and this effect runs.
|
||||
// This ensures that plugin installations from repository and user settings only
|
||||
// happen after explicit user consent to trust the current working directory.
|
||||
useEffect(() => {
|
||||
if (isRemoteSession) return;
|
||||
void performStartupChecks(setAppState);
|
||||
}, [setAppState, isRemoteSession]);
|
||||
|
||||
// Allow Claude in Chrome MCP to send prompts through MCP notifications
|
||||
// and sync permission mode changes to the Chrome extension
|
||||
usePromptsFromClaudeInChrome(isRemoteSession ? EMPTY_MCP_CLIENTS : mcpClients, toolPermissionContext.mode);
|
||||
@@ -1337,6 +1325,7 @@ export function REPL({
|
||||
const [inputValue, setInputValueRaw] = useState(() => consumeEarlyInput());
|
||||
const inputValueRef = useRef(inputValue);
|
||||
inputValueRef.current = inputValue;
|
||||
const startupChecksStartedRef = useRef(false);
|
||||
const promptTypingSuppressionActive = isPromptTypingSuppressionActive(isPromptInputActive, inputValue);
|
||||
const insertTextRef = useRef<{
|
||||
insert: (text: string) => void;
|
||||
@@ -1344,6 +1333,24 @@ export function REPL({
|
||||
cursorOffset: number;
|
||||
} | null>(null);
|
||||
|
||||
// Start background plugin installations after the initial input window is idle.
|
||||
// SECURITY: This still runs only after the "trust this folder" dialog has been
|
||||
// confirmed because the REPL is not mounted until that dialog completes.
|
||||
useEffect(() => {
|
||||
if (
|
||||
!shouldStartStartupChecks({
|
||||
isRemoteSession,
|
||||
promptTypingSuppressionActive,
|
||||
startupChecksStarted: startupChecksStartedRef.current,
|
||||
})
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
startupChecksStartedRef.current = true;
|
||||
void performStartupChecks(setAppState);
|
||||
}, [isRemoteSession, promptTypingSuppressionActive, setAppState]);
|
||||
|
||||
// Wrap setInputValue to co-locate suppression state updates.
|
||||
// Both setState calls happen in the same synchronous context so React
|
||||
// batches them into a single render, eliminating the extra render that
|
||||
|
||||
44
src/screens/replStartupGates.test.ts
Normal file
44
src/screens/replStartupGates.test.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { shouldStartStartupChecks } from './replStartupGates.js'
|
||||
|
||||
describe('shouldStartStartupChecks', () => {
|
||||
test('returns false for remote sessions', () => {
|
||||
expect(
|
||||
shouldStartStartupChecks({
|
||||
isRemoteSession: true,
|
||||
promptTypingSuppressionActive: false,
|
||||
startupChecksStarted: false,
|
||||
}),
|
||||
).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false while prompt typing suppression is active', () => {
|
||||
expect(
|
||||
shouldStartStartupChecks({
|
||||
isRemoteSession: false,
|
||||
promptTypingSuppressionActive: true,
|
||||
startupChecksStarted: false,
|
||||
}),
|
||||
).toBe(false)
|
||||
})
|
||||
|
||||
test('returns true once local startup is idle and checks have not started', () => {
|
||||
expect(
|
||||
shouldStartStartupChecks({
|
||||
isRemoteSession: false,
|
||||
promptTypingSuppressionActive: false,
|
||||
startupChecksStarted: false,
|
||||
}),
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false after startup checks have already started', () => {
|
||||
expect(
|
||||
shouldStartStartupChecks({
|
||||
isRemoteSession: false,
|
||||
promptTypingSuppressionActive: false,
|
||||
startupChecksStarted: true,
|
||||
}),
|
||||
).toBe(false)
|
||||
})
|
||||
})
|
||||
11
src/screens/replStartupGates.ts
Normal file
11
src/screens/replStartupGates.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
export function shouldStartStartupChecks(options: {
|
||||
isRemoteSession: boolean
|
||||
promptTypingSuppressionActive: boolean
|
||||
startupChecksStarted: boolean
|
||||
}): boolean {
|
||||
return (
|
||||
!options.isRemoteSession &&
|
||||
!options.promptTypingSuppressionActive &&
|
||||
!options.startupChecksStarted
|
||||
)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -42,6 +42,10 @@ import {
|
||||
} from './providerConfig.js'
|
||||
import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js'
|
||||
import { redactSecretValueForDisplay } from '../../utils/providerProfile.js'
|
||||
import {
|
||||
normalizeToolArguments,
|
||||
hasToolFieldMapping,
|
||||
} from './toolArgumentNormalization.js'
|
||||
|
||||
type SecretValueSource = Partial<{
|
||||
OPENAI_API_KEY: string
|
||||
@@ -56,11 +60,22 @@ const GITHUB_API_VERSION = '2022-11-28'
|
||||
const GITHUB_429_MAX_RETRIES = 3
|
||||
const GITHUB_429_BASE_DELAY_SEC = 1
|
||||
const GITHUB_429_MAX_DELAY_SEC = 32
|
||||
const GEMINI_API_HOST = 'generativelanguage.googleapis.com'
|
||||
|
||||
function isGithubModelsMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
}
|
||||
|
||||
function hasGeminiApiHost(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
|
||||
try {
|
||||
return new URL(baseUrl).hostname.toLowerCase() === GEMINI_API_HOST
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
function formatRetryAfterHint(response: Response): string {
|
||||
const ra = response.headers.get('retry-after')
|
||||
return ra ? ` (Retry-After: ${ra})` : ''
|
||||
@@ -197,6 +212,13 @@ function convertContentBlocks(
|
||||
return parts
|
||||
}
|
||||
|
||||
function isGeminiMode(): boolean {
|
||||
return (
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
hasGeminiApiHost(process.env.OPENAI_BASE_URL)
|
||||
)
|
||||
}
|
||||
|
||||
function convertMessages(
|
||||
messages: Array<{ role: string; message?: { role?: string; content?: unknown }; content?: unknown }>,
|
||||
system: unknown,
|
||||
@@ -248,6 +270,7 @@ function convertMessages(
|
||||
// Check for tool_use blocks
|
||||
if (Array.isArray(content)) {
|
||||
const toolUses = content.filter((b: { type?: string }) => b.type === 'tool_use')
|
||||
const thinkingBlock = content.find((b: { type?: string }) => b.type === 'thinking')
|
||||
const textContent = content.filter(
|
||||
(b: { type?: string }) => b.type !== 'tool_use' && b.type !== 'thinking',
|
||||
)
|
||||
@@ -267,18 +290,46 @@ function convertMessages(
|
||||
name?: string
|
||||
input?: unknown
|
||||
extra_content?: Record<string, unknown>
|
||||
}) => ({
|
||||
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tu.name ?? 'unknown',
|
||||
arguments:
|
||||
typeof tu.input === 'string'
|
||||
? tu.input
|
||||
: JSON.stringify(tu.input ?? {}),
|
||||
},
|
||||
...(tu.extra_content ? { extra_content: tu.extra_content } : {}),
|
||||
}),
|
||||
signature?: string
|
||||
}, index) => {
|
||||
const toolCall: NonNullable<OpenAIMessage['tool_calls']>[number] = {
|
||||
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tu.name ?? 'unknown',
|
||||
arguments:
|
||||
typeof tu.input === 'string'
|
||||
? tu.input
|
||||
: JSON.stringify(tu.input ?? {}),
|
||||
},
|
||||
}
|
||||
|
||||
// Preserve existing extra_content if present
|
||||
if (tu.extra_content) {
|
||||
toolCall.extra_content = { ...tu.extra_content }
|
||||
}
|
||||
|
||||
// Handle Gemini thought_signature
|
||||
if (isGeminiMode()) {
|
||||
// If the model provided a signature in the tool_use block itself (e.g. from a previous Turn/Step)
|
||||
// Use thinkingBlock.signature for ALL tool calls in the same assistant turn if available.
|
||||
// The API requires the same signature on every replayed function call part in a parallel set.
|
||||
const signature = tu.signature ?? (thinkingBlock as any)?.signature
|
||||
|
||||
// Merge into existing google-specific metadata if present
|
||||
const existingGoogle = (toolCall.extra_content?.google as Record<string, unknown>) ?? {}
|
||||
|
||||
toolCall.extra_content = {
|
||||
...toolCall.extra_content,
|
||||
google: {
|
||||
...existingGoogle,
|
||||
thought_signature: signature ?? "skip_thought_signature_validator"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return toolCall
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -397,7 +448,7 @@ function normalizeSchemaForOpenAI(
|
||||
function convertTools(
|
||||
tools: Array<{ name: string; description?: string; input_schema?: Record<string, unknown> }>,
|
||||
): OpenAITool[] {
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const isGemini = isGeminiMode()
|
||||
|
||||
return tools
|
||||
.filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI
|
||||
@@ -439,6 +490,7 @@ interface OpenAIStreamChunk {
|
||||
delta: {
|
||||
role?: string
|
||||
content?: string | null
|
||||
reasoning_content?: string | null
|
||||
tool_calls?: Array<{
|
||||
index: number
|
||||
id?: string
|
||||
@@ -476,6 +528,30 @@ function convertChunkUsage(
|
||||
}
|
||||
}
|
||||
|
||||
const JSON_REPAIR_SUFFIXES = [
|
||||
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
|
||||
]
|
||||
|
||||
function repairPossiblyTruncatedObjectJson(raw: string): string | null {
|
||||
try {
|
||||
const parsed = JSON.parse(raw)
|
||||
return parsed && typeof parsed === 'object' && !Array.isArray(parsed)
|
||||
? raw
|
||||
: null
|
||||
} catch {
|
||||
for (const combo of JSON_REPAIR_SUFFIXES) {
|
||||
try {
|
||||
const repaired = raw + combo
|
||||
const parsed = JSON.parse(repaired)
|
||||
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||
return repaired
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Async generator that transforms an OpenAI SSE stream into
|
||||
* Anthropic-format BetaRawMessageStreamEvent objects.
|
||||
@@ -486,8 +562,19 @@ async function* openaiStreamToAnthropic(
|
||||
): AsyncGenerator<AnthropicStreamEvent> {
|
||||
const messageId = makeMessageId()
|
||||
let contentBlockIndex = 0
|
||||
const activeToolCalls = new Map<number, { id: string; name: string; index: number; jsonBuffer: string }>()
|
||||
const activeToolCalls = new Map<
|
||||
number,
|
||||
{
|
||||
id: string
|
||||
name: string
|
||||
index: number
|
||||
jsonBuffer: string
|
||||
normalizeAtStop: boolean
|
||||
}
|
||||
>()
|
||||
let hasEmittedContentStart = false
|
||||
let hasEmittedThinkingStart = false
|
||||
let hasClosedThinking = false
|
||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||
let hasEmittedFinalUsage = false
|
||||
let hasProcessedFinishReason = false
|
||||
@@ -544,9 +631,34 @@ async function* openaiStreamToAnthropic(
|
||||
for (const choice of chunk.choices ?? []) {
|
||||
const delta = choice.delta
|
||||
|
||||
// Reasoning models (e.g. GLM-5, DeepSeek) may stream chain-of-thought
|
||||
// in `reasoning_content` before the actual reply appears in `content`.
|
||||
// Emit reasoning as a thinking block and content as a text block.
|
||||
if (delta.reasoning_content != null && delta.reasoning_content !== '') {
|
||||
if (!hasEmittedThinkingStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: contentBlockIndex,
|
||||
content_block: { type: 'thinking', thinking: '' },
|
||||
}
|
||||
hasEmittedThinkingStart = true
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'thinking_delta', thinking: delta.reasoning_content },
|
||||
}
|
||||
}
|
||||
|
||||
// Text content — use != null to distinguish absent field from empty string,
|
||||
// some providers send "" as first delta to signal streaming start
|
||||
if (delta.content != null) {
|
||||
if (delta.content != null && delta.content !== '') {
|
||||
// Close thinking block if transitioning from reasoning to content
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
if (!hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
@@ -566,7 +678,12 @@ async function* openaiStreamToAnthropic(
|
||||
if (delta.tool_calls) {
|
||||
for (const tc of delta.tool_calls) {
|
||||
if (tc.id && tc.function?.name) {
|
||||
// New tool call starting
|
||||
// New tool call starting — close any open thinking block first
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
@@ -577,11 +694,14 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
|
||||
const toolBlockIndex = contentBlockIndex
|
||||
const initialArguments = tc.function.arguments ?? ''
|
||||
const normalizeAtStop = hasToolFieldMapping(tc.function.name)
|
||||
activeToolCalls.set(tc.index, {
|
||||
id: tc.id,
|
||||
name: tc.function.name,
|
||||
index: toolBlockIndex,
|
||||
jsonBuffer: tc.function.arguments ?? '',
|
||||
jsonBuffer: initialArguments,
|
||||
normalizeAtStop,
|
||||
})
|
||||
|
||||
yield {
|
||||
@@ -593,12 +713,19 @@ async function* openaiStreamToAnthropic(
|
||||
name: tc.function.name,
|
||||
input: {},
|
||||
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
||||
// Extract Gemini signature from extra_content
|
||||
...((tc.extra_content?.google as any)?.thought_signature
|
||||
? {
|
||||
signature: (tc.extra_content.google as any)
|
||||
.thought_signature,
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
}
|
||||
contentBlockIndex++
|
||||
|
||||
// Emit any initial arguments
|
||||
if (tc.function.arguments) {
|
||||
if (tc.function.arguments && !normalizeAtStop) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: toolBlockIndex,
|
||||
@@ -615,6 +742,11 @@ async function* openaiStreamToAnthropic(
|
||||
if (tc.function.arguments) {
|
||||
active.jsonBuffer += tc.function.arguments
|
||||
}
|
||||
|
||||
if (active.normalizeAtStop) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: active.index,
|
||||
@@ -633,6 +765,12 @@ async function* openaiStreamToAnthropic(
|
||||
if (choice.finish_reason && !hasProcessedFinishReason) {
|
||||
hasProcessedFinishReason = true
|
||||
|
||||
// Close any open thinking block that wasn't closed by content transition
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
// Close any open content blocks
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
@@ -642,16 +780,44 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
// Close active tool calls
|
||||
for (const [, tc] of activeToolCalls) {
|
||||
if (tc.normalizeAtStop) {
|
||||
let partialJson: string
|
||||
if (choice.finish_reason === 'length') {
|
||||
// Truncated by max tokens — preserve raw buffer to avoid
|
||||
// turning an incomplete tool call into an executable command
|
||||
partialJson = tc.jsonBuffer
|
||||
} else {
|
||||
const repairedStructuredJson = repairPossiblyTruncatedObjectJson(
|
||||
tc.jsonBuffer,
|
||||
)
|
||||
if (repairedStructuredJson) {
|
||||
partialJson = repairedStructuredJson
|
||||
} else {
|
||||
partialJson = JSON.stringify(
|
||||
normalizeToolArguments(tc.name, tc.jsonBuffer),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: tc.index,
|
||||
delta: {
|
||||
type: 'input_json_delta',
|
||||
partial_json: partialJson,
|
||||
},
|
||||
}
|
||||
yield { type: 'content_block_stop', index: tc.index }
|
||||
continue
|
||||
}
|
||||
|
||||
let suffixToAdd = ''
|
||||
if (tc.jsonBuffer) {
|
||||
try {
|
||||
JSON.parse(tc.jsonBuffer)
|
||||
} catch {
|
||||
const str = tc.jsonBuffer.trimEnd()
|
||||
const combinations = [
|
||||
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
|
||||
]
|
||||
for (const combo of combinations) {
|
||||
for (const combo of JSON_REPAIR_SUFFIXES) {
|
||||
try {
|
||||
JSON.parse(str + combo)
|
||||
suffixToAdd = combo
|
||||
@@ -930,7 +1096,7 @@ class OpenAIShimMessages {
|
||||
...(options?.headers ?? {}),
|
||||
}
|
||||
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const isGemini = isGeminiMode()
|
||||
const apiKey =
|
||||
this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
||||
@@ -1043,6 +1209,7 @@ class OpenAIShimMessages {
|
||||
| string
|
||||
| null
|
||||
| Array<{ type?: string; text?: string }>
|
||||
reasoning_content?: string | null
|
||||
tool_calls?: Array<{
|
||||
id: string
|
||||
function: { name: string; arguments: string }
|
||||
@@ -1064,7 +1231,17 @@ class OpenAIShimMessages {
|
||||
const choice = data.choices?.[0]
|
||||
const content: Array<Record<string, unknown>> = []
|
||||
|
||||
const rawContent = choice?.message?.content
|
||||
// Some reasoning models (e.g. GLM-5) put their reply in reasoning_content
|
||||
// while content stays null — emit reasoning as a thinking block, then
|
||||
// fall back to it for visible text if content is empty.
|
||||
const reasoningText = choice?.message?.reasoning_content
|
||||
if (typeof reasoningText === 'string' && reasoningText) {
|
||||
content.push({ type: 'thinking', thinking: reasoningText })
|
||||
}
|
||||
const rawContent =
|
||||
choice?.message?.content !== '' && choice?.message?.content != null
|
||||
? choice?.message?.content
|
||||
: choice?.message?.reasoning_content
|
||||
if (typeof rawContent === 'string' && rawContent) {
|
||||
content.push({ type: 'text', text: rawContent })
|
||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||
@@ -1087,18 +1264,20 @@ class OpenAIShimMessages {
|
||||
|
||||
if (choice?.message?.tool_calls) {
|
||||
for (const tc of choice.message.tool_calls) {
|
||||
let input: unknown
|
||||
try {
|
||||
input = JSON.parse(tc.function.arguments)
|
||||
} catch {
|
||||
input = { raw: tc.function.arguments }
|
||||
}
|
||||
const input = normalizeToolArguments(
|
||||
tc.function.name,
|
||||
tc.function.arguments,
|
||||
)
|
||||
content.push({
|
||||
type: 'tool_use',
|
||||
id: tc.id,
|
||||
name: tc.function.name,
|
||||
input,
|
||||
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
||||
// Extract Gemini signature from extra_content
|
||||
...((tc.extra_content?.google as any)?.thought_signature
|
||||
? { signature: (tc.extra_content.google as any).thought_signature }
|
||||
: {}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
180
src/services/api/toolArgumentNormalization.test.ts
Normal file
180
src/services/api/toolArgumentNormalization.test.ts
Normal file
@@ -0,0 +1,180 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { normalizeToolArguments } from './toolArgumentNormalization'
|
||||
|
||||
describe('normalizeToolArguments', () => {
|
||||
describe('Bash tool', () => {
|
||||
test('wraps plain string into { command }', () => {
|
||||
expect(normalizeToolArguments('Bash', 'pwd')).toEqual({ command: 'pwd' })
|
||||
})
|
||||
|
||||
test('wraps multi-word command', () => {
|
||||
expect(normalizeToolArguments('Bash', 'ls -la /tmp')).toEqual({
|
||||
command: 'ls -la /tmp',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Bash', '{"command":"echo hi"}'),
|
||||
).toEqual({ command: 'echo hi' })
|
||||
})
|
||||
|
||||
test('returns empty object for blank string', () => {
|
||||
expect(normalizeToolArguments('Bash', '')).toEqual({})
|
||||
expect(normalizeToolArguments('Bash', ' ')).toEqual({})
|
||||
})
|
||||
|
||||
test('returns parsed blank for JSON-encoded blank string', () => {
|
||||
expect(normalizeToolArguments('Bash', '""')).toEqual('')
|
||||
expect(normalizeToolArguments('Bash', '" "')).toEqual(' ')
|
||||
})
|
||||
|
||||
test('returns empty object for malformed structured object literal', () => {
|
||||
expect(normalizeToolArguments('Bash', '{ "command": "pwd"')).toEqual({})
|
||||
})
|
||||
|
||||
test.each([
|
||||
['{command:"pwd"}'],
|
||||
["{'command':'pwd'}"],
|
||||
['{command: pwd}'],
|
||||
])(
|
||||
'returns empty object for malformed object-shaped string %s (does not wrap into command)',
|
||||
(input) => {
|
||||
expect(normalizeToolArguments('Bash', input)).toEqual({})
|
||||
},
|
||||
)
|
||||
|
||||
test.each([
|
||||
['false', false],
|
||||
['null', null],
|
||||
['[]', [] as unknown[]],
|
||||
['0', 0],
|
||||
['true', true],
|
||||
['123', 123],
|
||||
])(
|
||||
'preserves JSON literal %s as-is (does not wrap into command)',
|
||||
(input, expected) => {
|
||||
expect(normalizeToolArguments('Bash', input)).toEqual(expected)
|
||||
},
|
||||
)
|
||||
|
||||
test('wraps JSON-encoded string into { command }', () => {
|
||||
expect(normalizeToolArguments('Bash', '"pwd"')).toEqual({
|
||||
command: 'pwd',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('undefined arguments', () => {
|
||||
test('returns empty object for undefined', () => {
|
||||
expect(normalizeToolArguments('Bash', undefined)).toEqual({})
|
||||
expect(normalizeToolArguments('UnknownTool', undefined)).toEqual({})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Read tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Read', '/home/user/file.txt')).toEqual({
|
||||
file_path: '/home/user/file.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('wraps JSON-encoded string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Read', '"/home/user/file.txt"')).toEqual({
|
||||
file_path: '/home/user/file.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Read', '{"file_path":"/tmp/f.txt","limit":10}'),
|
||||
).toEqual({ file_path: '/tmp/f.txt', limit: 10 })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Write tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Write', '/tmp/out.txt')).toEqual({
|
||||
file_path: '/tmp/out.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments(
|
||||
'Write',
|
||||
'{"file_path":"/tmp/out.txt","content":"hello"}',
|
||||
),
|
||||
).toEqual({ file_path: '/tmp/out.txt', content: 'hello' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Edit tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Edit', '/tmp/edit.ts')).toEqual({
|
||||
file_path: '/tmp/edit.ts',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments(
|
||||
'Edit',
|
||||
'{"file_path":"/tmp/f.ts","old_string":"a","new_string":"b"}',
|
||||
),
|
||||
).toEqual({ file_path: '/tmp/f.ts', old_string: 'a', new_string: 'b' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Glob tool', () => {
|
||||
test('wraps plain string into { pattern }', () => {
|
||||
expect(normalizeToolArguments('Glob', '**/*.ts')).toEqual({
|
||||
pattern: '**/*.ts',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Glob', '{"pattern":"*.js","path":"/src"}'),
|
||||
).toEqual({ pattern: '*.js', path: '/src' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Grep tool', () => {
|
||||
test('wraps plain string into { pattern }', () => {
|
||||
expect(normalizeToolArguments('Grep', 'TODO')).toEqual({
|
||||
pattern: 'TODO',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Grep', '{"pattern":"fixme","path":"/src"}'),
|
||||
).toEqual({ pattern: 'fixme', path: '/src' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('unknown tools', () => {
|
||||
test('returns empty object for plain string (no known field mapping)', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', 'some value')).toEqual({})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('UnknownTool', '{"key":"val"}'),
|
||||
).toEqual({ key: 'val' })
|
||||
})
|
||||
|
||||
test('preserves JSON literals as-is', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', 'false')).toEqual(false)
|
||||
expect(normalizeToolArguments('UnknownTool', 'null')).toEqual(null)
|
||||
expect(normalizeToolArguments('UnknownTool', '[]')).toEqual([])
|
||||
})
|
||||
|
||||
test('returns parsed string for JSON-encoded string on unknown tools', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', '"hello"')).toEqual(
|
||||
'hello',
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
69
src/services/api/toolArgumentNormalization.ts
Normal file
69
src/services/api/toolArgumentNormalization.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
const STRING_ARGUMENT_TOOL_FIELDS: Record<string, string> = {
|
||||
Bash: 'command',
|
||||
Read: 'file_path',
|
||||
Write: 'file_path',
|
||||
Edit: 'file_path',
|
||||
Glob: 'pattern',
|
||||
Grep: 'pattern',
|
||||
}
|
||||
|
||||
function isBlankString(value: string): boolean {
|
||||
return value.trim().length === 0
|
||||
}
|
||||
|
||||
function isLikelyStructuredObjectLiteral(value: string): boolean {
|
||||
// Match object-like patterns with key-value syntax:
|
||||
// {"key":, {key:, {'key':, { "key" :, etc.
|
||||
// But NOT bash compound commands like { pwd; } or { echo hi; }
|
||||
return /^\s*\{\s*['"]?\w+['"]?\s*:/.test(value)
|
||||
}
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return typeof value === 'object' && value !== null && !Array.isArray(value)
|
||||
}
|
||||
|
||||
function getPlainStringToolArgumentField(toolName: string): string | null {
|
||||
return STRING_ARGUMENT_TOOL_FIELDS[toolName] ?? null
|
||||
}
|
||||
|
||||
export function hasToolFieldMapping(toolName: string): boolean {
|
||||
return toolName in STRING_ARGUMENT_TOOL_FIELDS
|
||||
}
|
||||
|
||||
function wrapPlainStringToolArguments(
|
||||
toolName: string,
|
||||
value: string,
|
||||
): Record<string, string> | null {
|
||||
const field = getPlainStringToolArgumentField(toolName)
|
||||
if (!field) return null
|
||||
return { [field]: value }
|
||||
}
|
||||
|
||||
export function normalizeToolArguments(
|
||||
toolName: string,
|
||||
rawArguments: string | undefined,
|
||||
): unknown {
|
||||
if (rawArguments === undefined) return {}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(rawArguments)
|
||||
if (isRecord(parsed)) {
|
||||
return parsed
|
||||
}
|
||||
// Parsed as a non-object JSON value (string, number, boolean, null, array)
|
||||
if (typeof parsed === 'string' && !isBlankString(parsed)) {
|
||||
return wrapPlainStringToolArguments(toolName, parsed) ?? parsed
|
||||
}
|
||||
// For blank strings, booleans, null, arrays — pass through as-is
|
||||
// and let Zod schema validation produce a meaningful error
|
||||
return parsed
|
||||
} catch {
|
||||
// rawArguments is not valid JSON — treat as a plain string
|
||||
if (isBlankString(rawArguments) || isLikelyStructuredObjectLiteral(rawArguments)) {
|
||||
// Blank or looks like a malformed object literal — don't wrap into
|
||||
// a tool field to avoid turning garbage into executable input
|
||||
return {}
|
||||
}
|
||||
return wrapPlainStringToolArguments(toolName, rawArguments) ?? {}
|
||||
}
|
||||
}
|
||||
127
src/services/compact/microCompact.test.ts
Normal file
127
src/services/compact/microCompact.test.ts
Normal file
@@ -0,0 +1,127 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import type { Message } from '../../types/message.js'
|
||||
import { createAssistantMessage, createUserMessage } from '../../utils/messages.js'
|
||||
|
||||
// We test the exported collectCompactableToolIds behavior indirectly via
|
||||
// the public microcompactMessages + time-based path. But first we need to
|
||||
// verify the core predicate: MCP tools (prefixed 'mcp__') should be
|
||||
// compactable alongside the built-in tool set.
|
||||
|
||||
// Import internals we can test
|
||||
import { evaluateTimeBasedTrigger } from './microCompact.js'
|
||||
|
||||
/**
|
||||
* Helper: build a minimal assistant message with a tool_use block.
|
||||
*/
|
||||
function assistantWithToolUse(toolName: string, toolId: string): Message {
|
||||
return createAssistantMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use' as const,
|
||||
id: toolId,
|
||||
name: toolName,
|
||||
input: {},
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: build a user message with a tool_result block.
|
||||
*/
|
||||
function userWithToolResult(toolId: string, output: string): Message {
|
||||
return createUserMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result' as const,
|
||||
tool_use_id: toolId,
|
||||
content: output,
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
describe('microCompact MCP tool compaction', () => {
|
||||
// We can't easily unit-test the private isCompactableTool directly,
|
||||
// but we can test the full time-based microcompact path which exercises
|
||||
// collectCompactableToolIds → isCompactableTool under the hood.
|
||||
// The time-based path is the simplest to trigger: it content-clears
|
||||
// old tool results when the gap since last assistant message exceeds
|
||||
// the threshold.
|
||||
|
||||
// However, evaluateTimeBasedTrigger depends on config (GrowthBook).
|
||||
// So instead, let's test the observable behavior by importing the
|
||||
// microcompactMessages function and checking that MCP tool_use blocks
|
||||
// are collected.
|
||||
|
||||
// Since collectCompactableToolIds is not exported, we test the predicate
|
||||
// behavior by verifying that the module loads without error and that
|
||||
// built-in and MCP tools are treated consistently.
|
||||
|
||||
test('module exports load correctly', async () => {
|
||||
const mod = await import('./microCompact.js')
|
||||
expect(mod.microcompactMessages).toBeFunction()
|
||||
expect(mod.estimateMessageTokens).toBeFunction()
|
||||
expect(mod.evaluateTimeBasedTrigger).toBeFunction()
|
||||
})
|
||||
|
||||
test('estimateMessageTokens counts MCP tool_use blocks', async () => {
|
||||
const { estimateMessageTokens } = await import('./microCompact.js')
|
||||
|
||||
const builtinMessages: Message[] = [
|
||||
assistantWithToolUse('Read', 'tool-builtin-1'),
|
||||
userWithToolResult('tool-builtin-1', 'file contents here'),
|
||||
]
|
||||
|
||||
const mcpMessages: Message[] = [
|
||||
assistantWithToolUse('mcp__github__get_file_contents', 'tool-mcp-1'),
|
||||
userWithToolResult('tool-mcp-1', 'file contents here'),
|
||||
]
|
||||
|
||||
const builtinTokens = estimateMessageTokens(builtinMessages)
|
||||
const mcpTokens = estimateMessageTokens(mcpMessages)
|
||||
|
||||
// Both should produce non-zero estimates
|
||||
expect(builtinTokens).toBeGreaterThan(0)
|
||||
expect(mcpTokens).toBeGreaterThan(0)
|
||||
|
||||
// The tool_result content is identical, so token estimates should be
|
||||
// similar (tool_use name differs slightly, so not exactly equal)
|
||||
expect(Math.abs(builtinTokens - mcpTokens)).toBeLessThan(50)
|
||||
})
|
||||
|
||||
test('microcompactMessages processes MCP tools without error', async () => {
|
||||
const { microcompactMessages } = await import('./microCompact.js')
|
||||
|
||||
const messages: Message[] = [
|
||||
assistantWithToolUse('mcp__slack__send_message', 'tool-mcp-2'),
|
||||
userWithToolResult('tool-mcp-2', 'Message sent successfully'),
|
||||
assistantWithToolUse('mcp__github__create_pull_request', 'tool-mcp-3'),
|
||||
userWithToolResult('tool-mcp-3', JSON.stringify({ number: 42, url: 'https://github.com/org/repo/pull/42' })),
|
||||
]
|
||||
|
||||
// Should not throw — MCP tools should be handled gracefully
|
||||
const result = await microcompactMessages(messages)
|
||||
expect(result).toBeDefined()
|
||||
expect(result.messages).toBeDefined()
|
||||
expect(result.messages.length).toBe(messages.length)
|
||||
})
|
||||
|
||||
test('microcompactMessages processes mixed built-in and MCP tools', async () => {
|
||||
const { microcompactMessages } = await import('./microCompact.js')
|
||||
|
||||
const messages: Message[] = [
|
||||
assistantWithToolUse('Read', 'tool-read-1'),
|
||||
userWithToolResult('tool-read-1', 'some file content'),
|
||||
assistantWithToolUse('mcp__playwright__screenshot', 'tool-mcp-4'),
|
||||
userWithToolResult('tool-mcp-4', 'base64-encoded-screenshot-data'.repeat(100)),
|
||||
assistantWithToolUse('Bash', 'tool-bash-1'),
|
||||
userWithToolResult('tool-bash-1', 'command output'),
|
||||
]
|
||||
|
||||
const result = await microcompactMessages(messages)
|
||||
expect(result).toBeDefined()
|
||||
expect(result.messages.length).toBe(messages.length)
|
||||
})
|
||||
})
|
||||
@@ -37,7 +37,7 @@ export const TIME_BASED_MC_CLEARED_MESSAGE = '[Old tool result content cleared]'
|
||||
|
||||
const IMAGE_MAX_TOKEN_SIZE = 2000
|
||||
|
||||
// Only compact these tools
|
||||
// Only compact these built-in tools (MCP tools are also compactable via prefix match)
|
||||
const COMPACTABLE_TOOLS = new Set<string>([
|
||||
FILE_READ_TOOL_NAME,
|
||||
...SHELL_TOOL_NAMES,
|
||||
@@ -49,7 +49,13 @@ const COMPACTABLE_TOOLS = new Set<string>([
|
||||
FILE_WRITE_TOOL_NAME,
|
||||
])
|
||||
|
||||
// --- Cached microcompact state (internal-only, gated by feature('CACHED_MICROCOMPACT')) ---
|
||||
const MCP_TOOL_PREFIX = 'mcp__'
|
||||
|
||||
function isCompactableTool(name: string): boolean {
|
||||
return COMPACTABLE_TOOLS.has(name) || name.startsWith(MCP_TOOL_PREFIX)
|
||||
}
|
||||
|
||||
// --- Cached microcompact state (gated by feature('CACHED_MICROCOMPACT')) ---
|
||||
|
||||
// Lazy-initialized cached MC module and state to avoid importing in external builds.
|
||||
// The imports and state live inside feature() checks for dead code elimination.
|
||||
@@ -231,7 +237,7 @@ function collectCompactableToolIds(messages: Message[]): string[] {
|
||||
Array.isArray(message.message.content)
|
||||
) {
|
||||
for (const block of message.message.content) {
|
||||
if (block.type === 'tool_use' && COMPACTABLE_TOOLS.has(block.name)) {
|
||||
if (block.type === 'tool_use' && isCompactableTool(block.name)) {
|
||||
ids.push(block.id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,10 @@ import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
import { extractDraggedFilePaths } from './dragDropPaths.js'
|
||||
|
||||
function escapeFinderDraggedPath(filePath: string): string {
|
||||
return filePath.replace(/([\\ ])/g, '\\$1')
|
||||
}
|
||||
|
||||
describe('extractDraggedFilePaths', () => {
|
||||
// Paths that exist on any system.
|
||||
const thisFile = import.meta.path
|
||||
@@ -80,6 +84,12 @@ describe('extractDraggedFilePaths', () => {
|
||||
})
|
||||
})
|
||||
|
||||
test('escapeFinderDraggedPath escapes spaces and backslashes', () => {
|
||||
expect(escapeFinderDraggedPath('/tmp/my\\notes file.txt')).toBe(
|
||||
'/tmp/my\\\\notes\\ file.txt',
|
||||
)
|
||||
})
|
||||
|
||||
// Backslash-escaped paths are a Finder/macOS + Linux convention — on
|
||||
// Windows the shell-escape step is skipped, so these cases do not apply.
|
||||
if (process.platform !== 'win32') {
|
||||
@@ -92,7 +102,7 @@ describe('extractDraggedFilePaths', () => {
|
||||
|
||||
test('resolves an escaped real file with a space in its name', () => {
|
||||
// Raw form matches what a terminal delivers on Finder drag.
|
||||
const escaped = spacedFile.replace(/ /g, '\\ ')
|
||||
const escaped = escapeFinderDraggedPath(spacedFile)
|
||||
expect(extractDraggedFilePaths(escaped)).toEqual([spacedFile])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -41,7 +41,7 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
|
||||
}))
|
||||
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
||||
'./githubModelsCredentials.js'
|
||||
'./githubModelsCredentials.js?hydrate=sets-token'
|
||||
)
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
expect(process.env.GITHUB_TOKEN).toBe('stored-secret')
|
||||
@@ -62,7 +62,7 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
|
||||
}))
|
||||
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
||||
'./githubModelsCredentials.js'
|
||||
'./githubModelsCredentials.js?hydrate=preserve-existing'
|
||||
)
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
expect(process.env.GITHUB_TOKEN).toBe('already')
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
clearGithubModelsToken,
|
||||
readGithubModelsToken,
|
||||
saveGithubModelsToken,
|
||||
} from './githubModelsCredentials.js'
|
||||
|
||||
describe('readGithubModelsToken', () => {
|
||||
test('returns undefined in bare mode', () => {
|
||||
test('returns undefined in bare mode', async () => {
|
||||
const { readGithubModelsToken } = await import(
|
||||
'./githubModelsCredentials.js?read-bare-mode'
|
||||
)
|
||||
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
expect(readGithubModelsToken()).toBeUndefined()
|
||||
@@ -20,7 +18,11 @@ describe('readGithubModelsToken', () => {
|
||||
})
|
||||
|
||||
describe('saveGithubModelsToken / clearGithubModelsToken', () => {
|
||||
test('save returns failure in bare mode', () => {
|
||||
test('save returns failure in bare mode', async () => {
|
||||
const { saveGithubModelsToken } = await import(
|
||||
'./githubModelsCredentials.js?save-bare-mode'
|
||||
)
|
||||
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
const r = saveGithubModelsToken('abc')
|
||||
@@ -33,7 +35,11 @@ describe('saveGithubModelsToken / clearGithubModelsToken', () => {
|
||||
}
|
||||
})
|
||||
|
||||
test('clear succeeds in bare mode', () => {
|
||||
test('clear succeeds in bare mode', async () => {
|
||||
const { clearGithubModelsToken } = await import(
|
||||
'./githubModelsCredentials.js?clear-bare-mode'
|
||||
)
|
||||
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
expect(clearGithubModelsToken().success).toBe(true)
|
||||
|
||||
@@ -23,6 +23,19 @@ export function readGithubModelsToken(): string | undefined {
|
||||
}
|
||||
}
|
||||
|
||||
export async function readGithubModelsTokenAsync(): Promise<string | undefined> {
|
||||
if (isBareMode()) return undefined
|
||||
try {
|
||||
const data = (await getSecureStorage().readAsync()) as
|
||||
| ({ githubModels?: GithubModelsCredentialBlob } & Record<string, unknown>)
|
||||
| null
|
||||
const t = data?.githubModels?.accessToken?.trim()
|
||||
return t || undefined
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If GitHub Models mode is on and no token is in the environment, copy the
|
||||
* stored token into process.env so the OpenAI shim and validation see it.
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
} from './managedEnvConstants.js'
|
||||
import { clearMTLSCache } from './mtls.js'
|
||||
import { clearProxyCache, configureGlobalAgents } from './proxy.js'
|
||||
import { filterSettingsEnvForExplicitProvider } from './providerEnvSelection.js'
|
||||
import { applyActiveProviderProfileFromConfig } from './providerProfiles.js'
|
||||
import { isSettingSourceEnabled } from './settings/constants.js'
|
||||
import {
|
||||
@@ -87,7 +88,9 @@ function filterSettingsEnv(
|
||||
env: Record<string, string> | undefined,
|
||||
): Record<string, string> {
|
||||
return withoutCcdSpawnEnvKeys(
|
||||
withoutHostManagedProviderVars(withoutSSHTunnelVars(env)),
|
||||
filterSettingsEnvForExplicitProvider(
|
||||
withoutHostManagedProviderVars(withoutSSHTunnelVars(env)),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,17 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import { resetModelStringsForTestingOnly } from '../../bootstrap/state.js'
|
||||
import { saveGlobalConfig } from '../config.js'
|
||||
|
||||
async function importFreshModelOptionsModule() {
|
||||
mock.restore()
|
||||
mock.module('./providers.js', () => ({
|
||||
getAPIProvider: () => 'github',
|
||||
}))
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
return import(`./modelOptions.js?ts=${nonce}`)
|
||||
}
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
@@ -14,6 +24,20 @@ const originalEnv = {
|
||||
ANTHROPIC_CUSTOM_MODEL_OPTION: process.env.ANTHROPIC_CUSTOM_MODEL_OPTION,
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
mock.restore()
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.ANTHROPIC_CUSTOM_MODEL_OPTION
|
||||
resetModelStringsForTestingOnly()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
@@ -34,17 +58,9 @@ afterEach(() => {
|
||||
providerProfiles: [],
|
||||
activeProviderProfileId: undefined,
|
||||
}))
|
||||
resetModelStringsForTestingOnly()
|
||||
})
|
||||
|
||||
async function importFreshModelOptionsModule() {
|
||||
mock.restore()
|
||||
mock.module('./providers.js', () => ({
|
||||
getAPIProvider: () => 'github',
|
||||
}))
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
return import(`./modelOptions.js?ts=${nonce}`)
|
||||
}
|
||||
|
||||
test('GitHub provider exposes only default + GitHub model in /model options', async () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
@@ -58,7 +74,9 @@ test('GitHub provider exposes only default + GitHub model in /model options', as
|
||||
|
||||
const { getModelOptions } = await importFreshModelOptionsModule()
|
||||
const options = getModelOptions(false)
|
||||
const nonDefault = options.filter(option => option.value !== null)
|
||||
const nonDefault = options.filter(
|
||||
(option: { value: unknown }) => option.value !== null,
|
||||
)
|
||||
|
||||
expect(nonDefault.length).toBe(1)
|
||||
expect(nonDefault[0]?.value).toBe('github:copilot')
|
||||
|
||||
116
src/utils/providerEnvSelection.test.ts
Normal file
116
src/utils/providerEnvSelection.test.ts
Normal file
@@ -0,0 +1,116 @@
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test'
|
||||
import { filterSettingsEnvForExplicitProvider } from './providerEnvSelection.js'
|
||||
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
const RESET_KEYS = [
|
||||
'CLAUDE_CODE_EXPLICIT_PROVIDER',
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
] as const
|
||||
|
||||
beforeEach(() => {
|
||||
for (const key of RESET_KEYS) {
|
||||
delete process.env[key]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const key of RESET_KEYS) {
|
||||
if (originalEnv[key] === undefined) delete process.env[key]
|
||||
else process.env[key] = originalEnv[key]
|
||||
}
|
||||
})
|
||||
|
||||
describe('filterSettingsEnvForExplicitProvider', () => {
|
||||
test('does not treat plain provider flags as an explicit CLI override', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
|
||||
expect(
|
||||
filterSettingsEnvForExplicitProvider({
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OTHER: 'keep-me',
|
||||
}),
|
||||
).toEqual({
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OTHER: 'keep-me',
|
||||
})
|
||||
})
|
||||
|
||||
test('strips settings-sourced provider flags when CLI provider is explicit', () => {
|
||||
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'openai'
|
||||
|
||||
expect(
|
||||
filterSettingsEnvForExplicitProvider({
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OTHER: 'keep-me',
|
||||
}),
|
||||
).toEqual({ OTHER: 'keep-me' })
|
||||
})
|
||||
|
||||
test('strips a stale GitHub model when explicit provider is not github', () => {
|
||||
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'openai'
|
||||
|
||||
expect(
|
||||
filterSettingsEnvForExplicitProvider({
|
||||
OPENAI_MODEL: 'github:copilot',
|
||||
OTHER: 'keep-me',
|
||||
}),
|
||||
).toEqual({ OTHER: 'keep-me' })
|
||||
})
|
||||
|
||||
test('keeps a normal OpenAI model when explicit provider is openai', () => {
|
||||
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'openai'
|
||||
|
||||
expect(
|
||||
filterSettingsEnvForExplicitProvider({
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OTHER: 'keep-me',
|
||||
}),
|
||||
).toEqual({ OPENAI_MODEL: 'gpt-4o', OTHER: 'keep-me' })
|
||||
})
|
||||
|
||||
test('strips a non-GitHub OpenAI model when explicit provider is github', () => {
|
||||
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'github'
|
||||
|
||||
expect(
|
||||
filterSettingsEnvForExplicitProvider({
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OTHER: 'keep-me',
|
||||
}),
|
||||
).toEqual({ OTHER: 'keep-me' })
|
||||
})
|
||||
|
||||
test('preserves anthropic startup intent by stripping stale GitHub/OpenAI settings', () => {
|
||||
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'anthropic'
|
||||
|
||||
expect(
|
||||
filterSettingsEnvForExplicitProvider({
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'github:copilot',
|
||||
OTHER: 'keep-me',
|
||||
}),
|
||||
).toEqual({ OTHER: 'keep-me' })
|
||||
})
|
||||
|
||||
test('preserves explicit ollama startup intent by stripping OpenAI routing settings', () => {
|
||||
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'ollama'
|
||||
|
||||
expect(
|
||||
filterSettingsEnvForExplicitProvider({
|
||||
OPENAI_BASE_URL: 'https://api.openai.com/v1',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OPENAI_API_KEY: 'sk-test',
|
||||
OTHER: 'keep-me',
|
||||
}),
|
||||
).toEqual({ OTHER: 'keep-me' })
|
||||
})
|
||||
})
|
||||
63
src/utils/providerEnvSelection.ts
Normal file
63
src/utils/providerEnvSelection.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
export const EXPLICIT_PROVIDER_ENV_VAR = 'CLAUDE_CODE_EXPLICIT_PROVIDER'
|
||||
|
||||
const PROVIDER_FLAG_KEYS = [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
] as const
|
||||
|
||||
export function clearProviderSelectionFlags(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): void {
|
||||
for (const key of PROVIDER_FLAG_KEYS) {
|
||||
delete env[key]
|
||||
}
|
||||
}
|
||||
|
||||
function getExplicitProvider(processEnv: NodeJS.ProcessEnv): string | undefined {
|
||||
return processEnv[EXPLICIT_PROVIDER_ENV_VAR]?.trim() || undefined
|
||||
}
|
||||
|
||||
function isGithubModel(model: string | undefined): boolean {
|
||||
return (model ?? '').trim().toLowerCase().startsWith('github:')
|
||||
}
|
||||
|
||||
export function filterSettingsEnvForExplicitProvider(
|
||||
env: Record<string, string> | undefined,
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): Record<string, string> {
|
||||
if (!env) return {}
|
||||
|
||||
const explicitProvider = getExplicitProvider(processEnv)
|
||||
if (!explicitProvider) {
|
||||
return env
|
||||
}
|
||||
|
||||
const filtered = { ...env }
|
||||
for (const key of PROVIDER_FLAG_KEYS) {
|
||||
delete filtered[key]
|
||||
}
|
||||
|
||||
if (explicitProvider === 'ollama') {
|
||||
delete filtered.OPENAI_BASE_URL
|
||||
delete filtered.OPENAI_MODEL
|
||||
delete filtered.OPENAI_API_KEY
|
||||
return filtered
|
||||
}
|
||||
|
||||
if (explicitProvider === 'github') {
|
||||
if (!isGithubModel(filtered.OPENAI_MODEL)) {
|
||||
delete filtered.OPENAI_MODEL
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
if (isGithubModel(filtered.OPENAI_MODEL)) {
|
||||
delete filtered.OPENAI_MODEL
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { describe, expect, test, afterEach } from 'bun:test'
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
parseProviderFlag,
|
||||
applyProviderFlag,
|
||||
@@ -8,18 +8,28 @@ import {
|
||||
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
const RESET_KEYS = [
|
||||
'CLAUDE_CODE_EXPLICIT_PROVIDER',
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_MODEL',
|
||||
] as const
|
||||
|
||||
beforeEach(() => {
|
||||
for (const key of RESET_KEYS) {
|
||||
delete process.env[key]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const key of [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_MODEL',
|
||||
]) {
|
||||
for (const key of RESET_KEYS) {
|
||||
if (originalEnv[key] === undefined) delete process.env[key]
|
||||
else process.env[key] = originalEnv[key]
|
||||
}
|
||||
@@ -75,6 +85,16 @@ describe('applyProviderFlag - openai', () => {
|
||||
applyProviderFlag('openai', ['--model', 'gpt-4o'])
|
||||
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
|
||||
})
|
||||
|
||||
test('clears a previously persisted GitHub flag', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
|
||||
const result = applyProviderFlag('openai', [])
|
||||
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - gemini', () => {
|
||||
@@ -96,6 +116,16 @@ describe('applyProviderFlag - github', () => {
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
||||
})
|
||||
|
||||
test('clears a previously set OpenAI flag', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
|
||||
const result = applyProviderFlag('github', [])
|
||||
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - bedrock', () => {
|
||||
@@ -143,6 +173,19 @@ describe('applyProviderFlag - invalid provider', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlag - anthropic', () => {
|
||||
test('clears third-party provider flags', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
|
||||
const result = applyProviderFlag('anthropic', [])
|
||||
|
||||
expect(result.error).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyProviderFlagFromArgs', () => {
|
||||
test('applies ollama provider and model from argv in one step', () => {
|
||||
const result = applyProviderFlagFromArgs([
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
import {
|
||||
clearProviderSelectionFlags,
|
||||
EXPLICIT_PROVIDER_ENV_VAR,
|
||||
} from './providerEnvSelection.js'
|
||||
|
||||
/**
|
||||
* --provider CLI flag support.
|
||||
*
|
||||
@@ -77,6 +82,9 @@ export function applyProviderFlag(
|
||||
}
|
||||
}
|
||||
|
||||
clearProviderSelectionFlags()
|
||||
process.env[EXPLICIT_PROVIDER_ENV_VAR] = provider
|
||||
|
||||
const model = parseModelFlag(args)
|
||||
|
||||
switch (provider as ProviderFlagName) {
|
||||
|
||||
@@ -485,6 +485,26 @@ test('buildStartupEnvFromProfile leaves explicit provider selections untouched',
|
||||
assert.equal(env.OPENAI_API_KEY, undefined)
|
||||
})
|
||||
|
||||
test('buildStartupEnvFromProfile preserves explicit anthropic startup selection', async () => {
|
||||
const processEnv = {
|
||||
CLAUDE_CODE_EXPLICIT_PROVIDER: 'anthropic',
|
||||
}
|
||||
|
||||
const env = await buildStartupEnvFromProfile({
|
||||
persisted: profile('openai', {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: 'github:copilot',
|
||||
}),
|
||||
processEnv,
|
||||
})
|
||||
|
||||
assert.equal(env, processEnv)
|
||||
assert.equal(env.CLAUDE_CODE_EXPLICIT_PROVIDER, 'anthropic')
|
||||
assert.equal(env.CLAUDE_CODE_USE_OPENAI, undefined)
|
||||
assert.equal(env.CLAUDE_CODE_USE_GITHUB, undefined)
|
||||
assert.equal(env.OPENAI_MODEL, undefined)
|
||||
})
|
||||
|
||||
test('buildStartupEnvFromProfile leaves profile-managed env untouched', async () => {
|
||||
const processEnv = {
|
||||
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',
|
||||
|
||||
@@ -412,6 +412,10 @@ export function hasExplicitProviderSelection(
|
||||
return true
|
||||
}
|
||||
|
||||
if (processEnv.CLAUDE_CODE_EXPLICIT_PROVIDER?.trim()) {
|
||||
return true
|
||||
}
|
||||
|
||||
return (
|
||||
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
|
||||
|
||||
@@ -2,9 +2,14 @@ import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
|
||||
import type { ProviderProfile } from './config.js'
|
||||
|
||||
async function importFreshProvidersModule() {
|
||||
return import(`./model/providers.ts?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
const RESTORED_KEYS = [
|
||||
'CLAUDE_CODE_EXPLICIT_PROVIDER',
|
||||
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED',
|
||||
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID',
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
@@ -96,24 +101,26 @@ function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile
|
||||
|
||||
describe('applyProviderProfileToProcessEnv', () => {
|
||||
test('openai profile clears competing gemini/github flags', async () => {
|
||||
const { applyProviderProfileToProcessEnv, getAPIProvider } =
|
||||
const { applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
|
||||
applyProviderProfileToProcessEnv(buildProfile())
|
||||
const { getAPIProvider: getFreshAPIProvider } =
|
||||
await importFreshProvidersModule()
|
||||
|
||||
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe(
|
||||
'provider_test',
|
||||
)
|
||||
expect(getAPIProvider()).toBe('openai')
|
||||
expect(getFreshAPIProvider()).toBe('openai')
|
||||
})
|
||||
|
||||
test('anthropic profile clears competing gemini/github flags', async () => {
|
||||
const { applyProviderProfileToProcessEnv, getAPIProvider } =
|
||||
const { applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
@@ -125,15 +132,40 @@ describe('applyProviderProfileToProcessEnv', () => {
|
||||
model: 'claude-sonnet-4-6',
|
||||
}),
|
||||
)
|
||||
const { getAPIProvider: getFreshAPIProvider } =
|
||||
await importFreshProvidersModule()
|
||||
|
||||
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||
expect(getAPIProvider()).toBe('firstParty')
|
||||
expect(getFreshAPIProvider()).toBe('firstParty')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyActiveProviderProfileFromConfig', () => {
|
||||
test('does not override explicit anthropic startup selection', async () => {
|
||||
const { applyActiveProviderProfileFromConfig } =
|
||||
await importFreshProviderProfileModules()
|
||||
process.env.CLAUDE_CODE_EXPLICIT_PROVIDER = 'anthropic'
|
||||
|
||||
const applied = applyActiveProviderProfileFromConfig({
|
||||
providerProfiles: [
|
||||
buildProfile({
|
||||
id: 'saved_github',
|
||||
baseUrl: 'https://api.githubcopilot.com',
|
||||
model: 'github:copilot',
|
||||
}),
|
||||
],
|
||||
activeProviderProfileId: 'saved_github',
|
||||
} as any)
|
||||
|
||||
expect(applied).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_EXPLICIT_PROVIDER).toBe('anthropic')
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
|
||||
expect(process.env.OPENAI_MODEL).toBeUndefined()
|
||||
})
|
||||
|
||||
test('does not override explicit startup provider selection', async () => {
|
||||
const { applyActiveProviderProfileFromConfig } =
|
||||
await importFreshProviderProfileModules()
|
||||
@@ -177,7 +209,7 @@ describe('applyActiveProviderProfileFromConfig', () => {
|
||||
} as any)
|
||||
|
||||
expect(applied).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
|
||||
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
|
||||
})
|
||||
@@ -267,7 +299,7 @@ describe('applyActiveProviderProfileFromConfig', () => {
|
||||
} as any)
|
||||
|
||||
expect(applied?.id).toBe('saved_openai')
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
|
||||
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
|
||||
})
|
||||
@@ -286,10 +318,10 @@ describe('persistActiveProviderProfileModel', () => {
|
||||
model: 'kimi-k2.5:cloud',
|
||||
})
|
||||
|
||||
saveMockGlobalConfig(current => ({
|
||||
...current,
|
||||
providerProfiles: [activeProfile],
|
||||
activeProviderProfileId: activeProfile.id,
|
||||
saveMockGlobalConfig(current => ({
|
||||
...current,
|
||||
providerProfiles: [activeProfile],
|
||||
activeProviderProfileId: activeProfile.id,
|
||||
}))
|
||||
applyProviderProfileToProcessEnv(activeProfile)
|
||||
|
||||
@@ -303,7 +335,7 @@ describe('persistActiveProviderProfileModel', () => {
|
||||
)
|
||||
|
||||
const saved = getProviderProfiles().find(
|
||||
profile => profile.id === activeProfile.id,
|
||||
(profile: ProviderProfile) => profile.id === activeProfile.id,
|
||||
)
|
||||
expect(saved?.model).toBe('minimax-m2.5:cloud')
|
||||
})
|
||||
@@ -333,7 +365,7 @@ describe('persistActiveProviderProfileModel', () => {
|
||||
|
||||
expect(process.env.OPENAI_MODEL).toBe('cli-model')
|
||||
const saved = getProviderProfiles().find(
|
||||
profile => profile.id === activeProfile.id,
|
||||
(profile: ProviderProfile) => profile.id === activeProfile.id,
|
||||
)
|
||||
expect(saved?.model).toBe('minimax-m2.5:cloud')
|
||||
})
|
||||
@@ -414,7 +446,7 @@ describe('deleteProviderProfile', () => {
|
||||
expect(result.activeProfileId).toBeUndefined()
|
||||
|
||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
|
||||
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
|
||||
})
|
||||
|
||||
@@ -5,6 +5,7 @@ import {
|
||||
type ProviderProfile,
|
||||
} from './config.js'
|
||||
import type { ModelOption } from './model/modelOptions.js'
|
||||
import { EXPLICIT_PROVIDER_ENV_VAR } from './providerEnvSelection.js'
|
||||
|
||||
export type ProviderPreset =
|
||||
| 'anthropic'
|
||||
@@ -256,6 +257,7 @@ function hasProviderSelectionFlags(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): boolean {
|
||||
return (
|
||||
processEnv[EXPLICIT_PROVIDER_ENV_VAR] !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
|
||||
|
||||
@@ -97,8 +97,12 @@ export function renderToAnsiString(node: React.ReactNode, columns?: number): Pro
|
||||
patchConsole: false
|
||||
});
|
||||
|
||||
// Wait for the component to exit naturally
|
||||
await instance.waitUntilExit();
|
||||
// Wait for the component to exit naturally, with a timeout guard so
|
||||
// tests never hang indefinitely if a render error prevents exit().
|
||||
await Promise.race([
|
||||
instance.waitUntilExit(),
|
||||
new Promise<void>(resolve => setTimeout(resolve, 3000)),
|
||||
]);
|
||||
|
||||
// Extract only the first frame's content to avoid duplication
|
||||
// (Ink outputs multiple frames in non-TTY mode)
|
||||
|
||||
Reference in New Issue
Block a user