Compare commits
8 Commits
fix/repl-a
...
fix/383-ba
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3a25d71004 | ||
|
|
50efbe5614 | ||
|
|
b20d878b76 | ||
|
|
f2fc454baf | ||
|
|
10f17d38ea | ||
|
|
889c472ddb | ||
|
|
0ad7746b7a | ||
|
|
91df124064 |
@@ -52,11 +52,7 @@ async function renderFinalFrame(node: React.ReactNode): Promise<string> {
|
|||||||
patchConsole: false,
|
patchConsole: false,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Timeout guard: if render throws before exit effect fires, don't hang
|
await instance.waitUntilExit()
|
||||||
await Promise.race([
|
|
||||||
instance.waitUntilExit(),
|
|
||||||
new Promise<void>(resolve => setTimeout(resolve, 3000)),
|
|
||||||
])
|
|
||||||
return stripAnsi(extractLastFrame(getOutput()))
|
return stripAnsi(extractLastFrame(getOutput()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,305 +0,0 @@
|
|||||||
import { PassThrough } from 'node:stream'
|
|
||||||
|
|
||||||
import { afterEach, expect, mock, test } from 'bun:test'
|
|
||||||
import React from 'react'
|
|
||||||
import stripAnsi from 'strip-ansi'
|
|
||||||
|
|
||||||
import { createRoot } from '../ink.js'
|
|
||||||
import { AppStateProvider } from '../state/AppState.js'
|
|
||||||
|
|
||||||
const SYNC_START = '\x1B[?2026h'
|
|
||||||
const SYNC_END = '\x1B[?2026l'
|
|
||||||
|
|
||||||
const ORIGINAL_ENV = {
|
|
||||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
|
||||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
|
||||||
GH_TOKEN: process.env.GH_TOKEN,
|
|
||||||
}
|
|
||||||
|
|
||||||
function extractLastFrame(output: string): string {
|
|
||||||
let lastFrame: string | null = null
|
|
||||||
let cursor = 0
|
|
||||||
|
|
||||||
while (cursor < output.length) {
|
|
||||||
const start = output.indexOf(SYNC_START, cursor)
|
|
||||||
if (start === -1) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
const contentStart = start + SYNC_START.length
|
|
||||||
const end = output.indexOf(SYNC_END, contentStart)
|
|
||||||
if (end === -1) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
const frame = output.slice(contentStart, end)
|
|
||||||
if (frame.trim().length > 0) {
|
|
||||||
lastFrame = frame
|
|
||||||
}
|
|
||||||
cursor = end + SYNC_END.length
|
|
||||||
}
|
|
||||||
|
|
||||||
return lastFrame ?? output
|
|
||||||
}
|
|
||||||
|
|
||||||
function createTestStreams(): {
|
|
||||||
stdout: PassThrough
|
|
||||||
stdin: PassThrough & {
|
|
||||||
isTTY: boolean
|
|
||||||
setRawMode: (mode: boolean) => void
|
|
||||||
ref: () => void
|
|
||||||
unref: () => void
|
|
||||||
}
|
|
||||||
getOutput: () => string
|
|
||||||
} {
|
|
||||||
let output = ''
|
|
||||||
const stdout = new PassThrough()
|
|
||||||
const stdin = new PassThrough() as PassThrough & {
|
|
||||||
isTTY: boolean
|
|
||||||
setRawMode: (mode: boolean) => void
|
|
||||||
ref: () => void
|
|
||||||
unref: () => void
|
|
||||||
}
|
|
||||||
|
|
||||||
stdin.isTTY = true
|
|
||||||
stdin.setRawMode = () => {}
|
|
||||||
stdin.ref = () => {}
|
|
||||||
stdin.unref = () => {}
|
|
||||||
;(stdout as unknown as { columns: number }).columns = 120
|
|
||||||
stdout.on('data', chunk => {
|
|
||||||
output += chunk.toString()
|
|
||||||
})
|
|
||||||
|
|
||||||
return {
|
|
||||||
stdout,
|
|
||||||
stdin,
|
|
||||||
getOutput: () => output,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function waitForCondition(
|
|
||||||
predicate: () => boolean,
|
|
||||||
options?: { timeoutMs?: number; intervalMs?: number },
|
|
||||||
): Promise<void> {
|
|
||||||
const timeoutMs = options?.timeoutMs ?? 2000
|
|
||||||
const intervalMs = options?.intervalMs ?? 10
|
|
||||||
const startedAt = Date.now()
|
|
||||||
|
|
||||||
while (Date.now() - startedAt < timeoutMs) {
|
|
||||||
if (predicate()) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
await Bun.sleep(intervalMs)
|
|
||||||
}
|
|
||||||
|
|
||||||
throw new Error('Timed out waiting for ProviderManager test condition')
|
|
||||||
}
|
|
||||||
|
|
||||||
function createDeferred<T>(): {
|
|
||||||
promise: Promise<T>
|
|
||||||
resolve: (value: T) => void
|
|
||||||
} {
|
|
||||||
let resolve!: (value: T) => void
|
|
||||||
const promise = new Promise<T>(r => {
|
|
||||||
resolve = r
|
|
||||||
})
|
|
||||||
return { promise, resolve }
|
|
||||||
}
|
|
||||||
|
|
||||||
function mockProviderProfilesModule(): void {
|
|
||||||
mock.module('../utils/providerProfiles.js', () => ({
|
|
||||||
addProviderProfile: () => null,
|
|
||||||
applyActiveProviderProfileFromConfig: () => {},
|
|
||||||
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
|
|
||||||
getActiveProviderProfile: () => null,
|
|
||||||
getProviderPresetDefaults: () => ({
|
|
||||||
provider: 'openai',
|
|
||||||
name: 'Mock provider',
|
|
||||||
baseUrl: 'http://localhost:11434/v1',
|
|
||||||
model: 'mock-model',
|
|
||||||
apiKey: '',
|
|
||||||
}),
|
|
||||||
getProviderProfiles: () => [],
|
|
||||||
setActiveProviderProfile: () => null,
|
|
||||||
updateProviderProfile: () => null,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
function mockProviderManagerDependencies(
|
|
||||||
syncRead: () => string | undefined,
|
|
||||||
asyncRead: () => Promise<string | undefined>,
|
|
||||||
): void {
|
|
||||||
mockProviderProfilesModule()
|
|
||||||
|
|
||||||
mock.module('../utils/githubModelsCredentials.js', () => ({
|
|
||||||
clearGithubModelsToken: () => ({ success: true }),
|
|
||||||
GITHUB_MODELS_HYDRATED_ENV_MARKER: 'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED',
|
|
||||||
hydrateGithubModelsTokenFromSecureStorage: () => {},
|
|
||||||
readGithubModelsToken: syncRead,
|
|
||||||
readGithubModelsTokenAsync: asyncRead,
|
|
||||||
}))
|
|
||||||
|
|
||||||
mock.module('../utils/settings/settings.js', () => ({
|
|
||||||
updateSettingsForSource: () => ({ error: null }),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
async function waitForFrameOutput(
|
|
||||||
getOutput: () => string,
|
|
||||||
predicate: (output: string) => boolean,
|
|
||||||
timeoutMs = 2500,
|
|
||||||
): Promise<string> {
|
|
||||||
let output = ''
|
|
||||||
|
|
||||||
await waitForCondition(() => {
|
|
||||||
output = stripAnsi(extractLastFrame(getOutput()))
|
|
||||||
return predicate(output)
|
|
||||||
}, { timeoutMs })
|
|
||||||
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
async function mountProviderManager(
|
|
||||||
ProviderManager: React.ComponentType<{
|
|
||||||
mode: 'first-run' | 'manage'
|
|
||||||
onDone: () => void
|
|
||||||
}>,
|
|
||||||
): Promise<{
|
|
||||||
getOutput: () => string
|
|
||||||
dispose: () => Promise<void>
|
|
||||||
}> {
|
|
||||||
const { stdout, stdin, getOutput } = createTestStreams()
|
|
||||||
const root = await createRoot({
|
|
||||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
|
||||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
|
||||||
patchConsole: false,
|
|
||||||
})
|
|
||||||
|
|
||||||
root.render(
|
|
||||||
<AppStateProvider>
|
|
||||||
<ProviderManager
|
|
||||||
mode="manage"
|
|
||||||
onDone={() => {}}
|
|
||||||
/>
|
|
||||||
</AppStateProvider>,
|
|
||||||
)
|
|
||||||
|
|
||||||
return {
|
|
||||||
getOutput,
|
|
||||||
dispose: async () => {
|
|
||||||
root.unmount()
|
|
||||||
stdin.end()
|
|
||||||
stdout.end()
|
|
||||||
await Bun.sleep(0)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function renderProviderManagerFrame(
|
|
||||||
ProviderManager: React.ComponentType<{
|
|
||||||
mode: 'first-run' | 'manage'
|
|
||||||
onDone: () => void
|
|
||||||
}>,
|
|
||||||
options?: {
|
|
||||||
waitForOutput?: (output: string) => boolean
|
|
||||||
timeoutMs?: number
|
|
||||||
},
|
|
||||||
): Promise<string> {
|
|
||||||
const mounted = await mountProviderManager(ProviderManager)
|
|
||||||
const output = await waitForFrameOutput(
|
|
||||||
mounted.getOutput,
|
|
||||||
frame => {
|
|
||||||
if (!options?.waitForOutput) {
|
|
||||||
return frame.includes('Provider manager')
|
|
||||||
}
|
|
||||||
return options.waitForOutput(frame)
|
|
||||||
},
|
|
||||||
options?.timeoutMs ?? 2500,
|
|
||||||
)
|
|
||||||
|
|
||||||
await mounted.dispose()
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
mock.restore()
|
|
||||||
|
|
||||||
for (const [key, value] of Object.entries(ORIGINAL_ENV)) {
|
|
||||||
if (value === undefined) {
|
|
||||||
delete process.env[key as keyof typeof ORIGINAL_ENV]
|
|
||||||
} else {
|
|
||||||
process.env[key as keyof typeof ORIGINAL_ENV] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
test('ProviderManager resolves GitHub virtual provider from async storage without sync reads in render flow', async () => {
|
|
||||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
|
||||||
delete process.env.GITHUB_TOKEN
|
|
||||||
delete process.env.GH_TOKEN
|
|
||||||
|
|
||||||
const syncRead = mock(() => {
|
|
||||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
|
||||||
})
|
|
||||||
const asyncRead = mock(async () => 'stored-token')
|
|
||||||
|
|
||||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
|
||||||
|
|
||||||
const nonce = `${Date.now()}-${Math.random()}`
|
|
||||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
|
||||||
const output = await renderProviderManagerFrame(ProviderManager, {
|
|
||||||
waitForOutput: frame =>
|
|
||||||
frame.includes('Provider manager') &&
|
|
||||||
frame.includes('GitHub Models') &&
|
|
||||||
frame.includes('token stored'),
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(output).toContain('Provider manager')
|
|
||||||
expect(output).toContain('GitHub Models')
|
|
||||||
expect(output).toContain('token stored')
|
|
||||||
expect(output).not.toContain('No provider profiles configured yet.')
|
|
||||||
|
|
||||||
expect(syncRead).not.toHaveBeenCalled()
|
|
||||||
expect(asyncRead).toHaveBeenCalled()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
|
|
||||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
|
||||||
delete process.env.GITHUB_TOKEN
|
|
||||||
delete process.env.GH_TOKEN
|
|
||||||
|
|
||||||
const syncRead = mock(() => {
|
|
||||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
|
||||||
})
|
|
||||||
const deferredStoredToken = createDeferred<string | undefined>()
|
|
||||||
const asyncRead = mock(async () => deferredStoredToken.promise)
|
|
||||||
|
|
||||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
|
||||||
|
|
||||||
const nonce = `${Date.now()}-${Math.random()}`
|
|
||||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
|
||||||
const mounted = await mountProviderManager(ProviderManager)
|
|
||||||
|
|
||||||
const firstFrame = await waitForFrameOutput(
|
|
||||||
mounted.getOutput,
|
|
||||||
frame => frame.includes('Provider manager'),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(firstFrame).toContain('Checking GitHub Models credentials...')
|
|
||||||
expect(firstFrame).not.toContain('No provider profiles configured yet.')
|
|
||||||
|
|
||||||
deferredStoredToken.resolve('stored-token')
|
|
||||||
|
|
||||||
const resolvedFrame = await waitForFrameOutput(
|
|
||||||
mounted.getOutput,
|
|
||||||
frame => frame.includes('GitHub Models') && frame.includes('token stored'),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(resolvedFrame).toContain('GitHub Models')
|
|
||||||
expect(resolvedFrame).toContain('token stored')
|
|
||||||
|
|
||||||
await mounted.dispose()
|
|
||||||
|
|
||||||
expect(syncRead).not.toHaveBeenCalled()
|
|
||||||
expect(asyncRead).toHaveBeenCalled()
|
|
||||||
})
|
|
||||||
@@ -20,7 +20,6 @@ import {
|
|||||||
GITHUB_MODELS_HYDRATED_ENV_MARKER,
|
GITHUB_MODELS_HYDRATED_ENV_MARKER,
|
||||||
hydrateGithubModelsTokenFromSecureStorage,
|
hydrateGithubModelsTokenFromSecureStorage,
|
||||||
readGithubModelsToken,
|
readGithubModelsToken,
|
||||||
readGithubModelsTokenAsync,
|
|
||||||
} from '../utils/githubModelsCredentials.js'
|
} from '../utils/githubModelsCredentials.js'
|
||||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||||
@@ -119,38 +118,25 @@ function profileSummary(profile: ProviderProfile, isActive: boolean): string {
|
|||||||
return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}`
|
return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}`
|
||||||
}
|
}
|
||||||
|
|
||||||
function getGithubCredentialSourceFromEnv(
|
function getGithubCredentialSource(
|
||||||
processEnv: NodeJS.ProcessEnv = process.env,
|
processEnv: NodeJS.ProcessEnv = process.env,
|
||||||
): GithubCredentialSource {
|
): GithubCredentialSource {
|
||||||
|
if (readGithubModelsToken()?.trim()) {
|
||||||
|
return 'stored'
|
||||||
|
}
|
||||||
if (processEnv.GITHUB_TOKEN?.trim() || processEnv.GH_TOKEN?.trim()) {
|
if (processEnv.GITHUB_TOKEN?.trim() || processEnv.GH_TOKEN?.trim()) {
|
||||||
return 'env'
|
return 'env'
|
||||||
}
|
}
|
||||||
return 'none'
|
return 'none'
|
||||||
}
|
}
|
||||||
|
|
||||||
async function resolveGithubCredentialSource(
|
|
||||||
processEnv: NodeJS.ProcessEnv = process.env,
|
|
||||||
): Promise<GithubCredentialSource> {
|
|
||||||
const envSource = getGithubCredentialSourceFromEnv(processEnv)
|
|
||||||
if (envSource !== 'none') {
|
|
||||||
return envSource
|
|
||||||
}
|
|
||||||
|
|
||||||
if (await readGithubModelsTokenAsync()) {
|
|
||||||
return 'stored'
|
|
||||||
}
|
|
||||||
|
|
||||||
return 'none'
|
|
||||||
}
|
|
||||||
|
|
||||||
function isGithubProviderAvailable(
|
function isGithubProviderAvailable(
|
||||||
credentialSource: GithubCredentialSource,
|
|
||||||
processEnv: NodeJS.ProcessEnv = process.env,
|
processEnv: NodeJS.ProcessEnv = process.env,
|
||||||
): boolean {
|
): boolean {
|
||||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return credentialSource !== 'none'
|
return getGithubCredentialSource(processEnv) !== 'none'
|
||||||
}
|
}
|
||||||
|
|
||||||
function getGithubProviderModel(
|
function getGithubProviderModel(
|
||||||
@@ -178,24 +164,19 @@ function getGithubProviderSummary(
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||||
const initialGithubCredentialSource = getGithubCredentialSourceFromEnv()
|
|
||||||
const initialIsGithubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
|
||||||
const initialHasGithubCredential = initialGithubCredentialSource !== 'none'
|
|
||||||
|
|
||||||
const [profiles, setProfiles] = React.useState(() => getProviderProfiles())
|
const [profiles, setProfiles] = React.useState(() => getProviderProfiles())
|
||||||
const [activeProfileId, setActiveProfileId] = React.useState(
|
const [activeProfileId, setActiveProfileId] = React.useState(
|
||||||
() => getActiveProviderProfile()?.id,
|
() => getActiveProviderProfile()?.id,
|
||||||
)
|
)
|
||||||
const [githubProviderAvailable, setGithubProviderAvailable] = React.useState(
|
const [githubProviderAvailable, setGithubProviderAvailable] = React.useState(() =>
|
||||||
() => isGithubProviderAvailable(initialGithubCredentialSource),
|
isGithubProviderAvailable(),
|
||||||
)
|
)
|
||||||
const [githubCredentialSource, setGithubCredentialSource] = React.useState<GithubCredentialSource>(
|
const [githubCredentialSource, setGithubCredentialSource] = React.useState<GithubCredentialSource>(
|
||||||
() => initialGithubCredentialSource,
|
() => getGithubCredentialSource(),
|
||||||
|
)
|
||||||
|
const [isGithubActive, setIsGithubActive] = React.useState(() =>
|
||||||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB),
|
||||||
)
|
)
|
||||||
const [isGithubActive, setIsGithubActive] = React.useState(() => initialIsGithubActive)
|
|
||||||
const [isGithubCredentialSourceResolved, setIsGithubCredentialSourceResolved] =
|
|
||||||
React.useState(() => initialHasGithubCredential || initialIsGithubActive)
|
|
||||||
const githubRefreshEpochRef = React.useRef(0)
|
|
||||||
const [screen, setScreen] = React.useState<Screen>(
|
const [screen, setScreen] = React.useState<Screen>(
|
||||||
mode === 'first-run' ? 'select-preset' : 'menu',
|
mode === 'first-run' ? 'select-preset' : 'menu',
|
||||||
)
|
)
|
||||||
@@ -215,48 +196,13 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
const currentStepKey = currentStep.key
|
const currentStepKey = currentStep.key
|
||||||
const currentValue = draft[currentStepKey]
|
const currentValue = draft[currentStepKey]
|
||||||
|
|
||||||
const refreshGithubProviderState = React.useCallback((): void => {
|
|
||||||
const envCredentialSource = getGithubCredentialSourceFromEnv()
|
|
||||||
const githubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
|
||||||
const canResolveFromEnv = githubActive || envCredentialSource !== 'none'
|
|
||||||
|
|
||||||
if (canResolveFromEnv) {
|
|
||||||
githubRefreshEpochRef.current += 1
|
|
||||||
setGithubCredentialSource(envCredentialSource)
|
|
||||||
setGithubProviderAvailable(isGithubProviderAvailable(envCredentialSource))
|
|
||||||
setIsGithubActive(githubActive)
|
|
||||||
setIsGithubCredentialSourceResolved(true)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
setIsGithubCredentialSourceResolved(false)
|
|
||||||
const refreshEpoch = ++githubRefreshEpochRef.current
|
|
||||||
void (async () => {
|
|
||||||
const credentialSource = await resolveGithubCredentialSource()
|
|
||||||
if (refreshEpoch !== githubRefreshEpochRef.current) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
setGithubCredentialSource(credentialSource)
|
|
||||||
setGithubProviderAvailable(isGithubProviderAvailable(credentialSource))
|
|
||||||
setIsGithubActive(isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB))
|
|
||||||
setIsGithubCredentialSourceResolved(true)
|
|
||||||
})()
|
|
||||||
}, [])
|
|
||||||
|
|
||||||
React.useEffect(() => {
|
|
||||||
refreshGithubProviderState()
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
githubRefreshEpochRef.current += 1
|
|
||||||
}
|
|
||||||
}, [refreshGithubProviderState])
|
|
||||||
|
|
||||||
function refreshProfiles(): void {
|
function refreshProfiles(): void {
|
||||||
const nextProfiles = getProviderProfiles()
|
const nextProfiles = getProviderProfiles()
|
||||||
setProfiles(nextProfiles)
|
setProfiles(nextProfiles)
|
||||||
setActiveProfileId(getActiveProviderProfile()?.id)
|
setActiveProfileId(getActiveProviderProfile()?.id)
|
||||||
refreshGithubProviderState()
|
setGithubProviderAvailable(isGithubProviderAvailable())
|
||||||
|
setGithubCredentialSource(getGithubCredentialSource())
|
||||||
|
setIsGithubActive(isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB))
|
||||||
}
|
}
|
||||||
|
|
||||||
function clearStartupProviderOverrideFromUserSettings(): string | null {
|
function clearStartupProviderOverrideFromUserSettings(): string | null {
|
||||||
@@ -694,11 +640,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
{statusMessage && <Text>{statusMessage}</Text>}
|
{statusMessage && <Text>{statusMessage}</Text>}
|
||||||
<Box flexDirection="column">
|
<Box flexDirection="column">
|
||||||
{profiles.length === 0 && !githubProviderAvailable ? (
|
{profiles.length === 0 && !githubProviderAvailable ? (
|
||||||
isGithubCredentialSourceResolved ? (
|
<Text dimColor>No provider profiles configured yet.</Text>
|
||||||
<Text dimColor>No provider profiles configured yet.</Text>
|
|
||||||
) : (
|
|
||||||
<Text dimColor>Checking GitHub Models credentials...</Text>
|
|
||||||
)
|
|
||||||
) : (
|
) : (
|
||||||
<>
|
<>
|
||||||
{profiles.map(profile => (
|
{profiles.map(profile => (
|
||||||
|
|||||||
@@ -8,34 +8,6 @@ import {
|
|||||||
validateProviderEnvOrExit,
|
validateProviderEnvOrExit,
|
||||||
} from '../utils/providerValidation.js'
|
} from '../utils/providerValidation.js'
|
||||||
|
|
||||||
// OpenClaude: polyfill globalThis.File for Node < 20.
|
|
||||||
// undici v7 references `File` at module evaluation time (webidl type
|
|
||||||
// assertions). Node 18 lacks the global, causing a ReferenceError inside
|
|
||||||
// the bundled __commonJS require chain which deadlocks the process when a
|
|
||||||
// proxy is configured (configureGlobalAgents → require_undici).
|
|
||||||
// eslint-disable-next-line custom-rules/no-top-level-side-effects
|
|
||||||
if (typeof globalThis.File === 'undefined') {
|
|
||||||
try {
|
|
||||||
// Node 18.13+ exposes File in node:buffer but not as a global.
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
|
||||||
const { File: NodeFile } = require('node:buffer')
|
|
||||||
// @ts-expect-error -- polyfilling missing global
|
|
||||||
globalThis.File = NodeFile
|
|
||||||
} catch {
|
|
||||||
// Absolute fallback: stub so `MakeTypeAssertion(File)` doesn't throw.
|
|
||||||
// @ts-expect-error -- minimal polyfill
|
|
||||||
globalThis.File = class File extends Blob {
|
|
||||||
name: string
|
|
||||||
lastModified: number
|
|
||||||
constructor(parts: BlobPart[], name: string, opts?: FilePropertyBag) {
|
|
||||||
super(parts, opts)
|
|
||||||
this.name = name
|
|
||||||
this.lastModified = opts?.lastModified ?? Date.now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenClaude: disable experimental API betas by default.
|
// OpenClaude: disable experimental API betas by default.
|
||||||
// Tool search (defer_loading), global cache scope, and context management
|
// Tool search (defer_loading), global cache scope, and context management
|
||||||
// require internal API support not available to external accounts → 500.
|
// require internal API support not available to external accounts → 500.
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ export class GrpcServer {
|
|||||||
grpc.ServerCredentials.createInsecure(),
|
grpc.ServerCredentials.createInsecure(),
|
||||||
(error, boundPort) => {
|
(error, boundPort) => {
|
||||||
if (error) {
|
if (error) {
|
||||||
console.error('Failed to start gRPC server')
|
console.error('Failed to start gRPC server', error)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
console.log(`gRPC Server running at ${host}:${boundPort}`)
|
console.log(`gRPC Server running at ${host}:${boundPort}`)
|
||||||
@@ -225,7 +225,7 @@ export class GrpcServer {
|
|||||||
call.end()
|
call.end()
|
||||||
}
|
}
|
||||||
} catch (err: any) {
|
} catch (err: any) {
|
||||||
console.error('Error processing stream')
|
console.error("Error processing stream:", err)
|
||||||
call.write({
|
call.write({
|
||||||
error: {
|
error: {
|
||||||
message: err.message || "Internal server error",
|
message: err.message || "Internal server error",
|
||||||
|
|||||||
@@ -366,12 +366,14 @@ const reconciler = createReconciler<
|
|||||||
createTextInstance(
|
createTextInstance(
|
||||||
text: string,
|
text: string,
|
||||||
_root: DOMElement,
|
_root: DOMElement,
|
||||||
_hostContext: HostContext,
|
hostContext: HostContext,
|
||||||
): TextNode {
|
): TextNode {
|
||||||
// react-compiler memoization can reuse cached <Text> elements without
|
if (!hostContext.isInsideText) {
|
||||||
// re-traversing getChildHostContext, so hostContext.isInsideText may be
|
throw new Error(
|
||||||
// stale. Always create the text node — Ink will render it correctly
|
`Text string "${text}" must be rendered inside <Text> component`,
|
||||||
// regardless of the context tracking state.
|
)
|
||||||
|
}
|
||||||
|
|
||||||
return createTextNode(text)
|
return createTextNode(text)
|
||||||
},
|
},
|
||||||
resetTextContent() {},
|
resetTextContent() {},
|
||||||
|
|||||||
@@ -201,117 +201,6 @@ describe('Codex request translation', () => {
|
|||||||
])
|
])
|
||||||
})
|
})
|
||||||
|
|
||||||
test('preserves Grep tool pattern field in Codex strict schemas', () => {
|
|
||||||
const tools = convertToolsToResponsesTools([
|
|
||||||
{
|
|
||||||
name: 'Grep',
|
|
||||||
description: 'Search file contents',
|
|
||||||
input_schema: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: { type: 'string', description: 'Search pattern' },
|
|
||||||
path: { type: 'string' },
|
|
||||||
},
|
|
||||||
required: ['pattern'],
|
|
||||||
additionalProperties: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
])
|
|
||||||
|
|
||||||
expect(tools).toEqual([
|
|
||||||
{
|
|
||||||
type: 'function',
|
|
||||||
name: 'Grep',
|
|
||||||
description: 'Search file contents',
|
|
||||||
parameters: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: { type: 'string', description: 'Search pattern' },
|
|
||||||
path: { type: 'string' },
|
|
||||||
},
|
|
||||||
required: ['pattern', 'path'],
|
|
||||||
additionalProperties: false,
|
|
||||||
},
|
|
||||||
strict: true,
|
|
||||||
},
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('preserves Glob tool pattern field in Codex strict schemas', () => {
|
|
||||||
const tools = convertToolsToResponsesTools([
|
|
||||||
{
|
|
||||||
name: 'Glob',
|
|
||||||
description: 'Find files by pattern',
|
|
||||||
input_schema: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: { type: 'string', description: 'Glob pattern' },
|
|
||||||
path: { type: 'string' },
|
|
||||||
},
|
|
||||||
required: ['pattern'],
|
|
||||||
additionalProperties: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
])
|
|
||||||
|
|
||||||
expect(tools).toEqual([
|
|
||||||
{
|
|
||||||
type: 'function',
|
|
||||||
name: 'Glob',
|
|
||||||
description: 'Find files by pattern',
|
|
||||||
parameters: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: { type: 'string', description: 'Glob pattern' },
|
|
||||||
path: { type: 'string' },
|
|
||||||
},
|
|
||||||
required: ['pattern', 'path'],
|
|
||||||
additionalProperties: false,
|
|
||||||
},
|
|
||||||
strict: true,
|
|
||||||
},
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('strips validator pattern keyword but keeps string field named pattern in Codex schemas', () => {
|
|
||||||
const tools = convertToolsToResponsesTools([
|
|
||||||
{
|
|
||||||
name: 'RegexProbe',
|
|
||||||
description: 'Probe regex schema handling',
|
|
||||||
input_schema: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: {
|
|
||||||
type: 'string',
|
|
||||||
pattern: '^[a-z]+$',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['pattern'],
|
|
||||||
additionalProperties: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
])
|
|
||||||
|
|
||||||
expect(tools).toEqual([
|
|
||||||
{
|
|
||||||
type: 'function',
|
|
||||||
name: 'RegexProbe',
|
|
||||||
description: 'Probe regex schema handling',
|
|
||||||
parameters: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: {
|
|
||||||
type: 'string',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['pattern'],
|
|
||||||
additionalProperties: false,
|
|
||||||
},
|
|
||||||
strict: true,
|
|
||||||
},
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('removes unsupported uri format from strict Responses schemas', () => {
|
test('removes unsupported uri format from strict Responses schemas', () => {
|
||||||
const tools = convertToolsToResponsesTools([
|
const tools = convertToolsToResponsesTools([
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -261,125 +261,6 @@ test('preserves Gemini tool call extra_content in follow-up requests', async ()
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
test('preserves Grep tool pattern field in OpenAI-compatible schemas', async () => {
|
|
||||||
let requestBody: Record<string, unknown> | undefined
|
|
||||||
|
|
||||||
globalThis.fetch = (async (_input, init) => {
|
|
||||||
requestBody = JSON.parse(String(init?.body))
|
|
||||||
|
|
||||||
return new Response(
|
|
||||||
JSON.stringify({
|
|
||||||
id: 'chatcmpl-grep-schema',
|
|
||||||
model: 'qwen/qwen3.6-plus',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
message: {
|
|
||||||
role: 'assistant',
|
|
||||||
content: 'done',
|
|
||||||
},
|
|
||||||
finish_reason: 'stop',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
usage: {
|
|
||||||
prompt_tokens: 12,
|
|
||||||
completion_tokens: 4,
|
|
||||||
total_tokens: 16,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
{
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
await client.beta.messages.create({
|
|
||||||
model: 'qwen/qwen3.6-plus',
|
|
||||||
system: 'test system',
|
|
||||||
messages: [{ role: 'user', content: 'Use Grep' }],
|
|
||||||
tools: [
|
|
||||||
{
|
|
||||||
name: 'Grep',
|
|
||||||
description: 'Search file contents',
|
|
||||||
input_schema: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: { type: 'string', description: 'Search pattern' },
|
|
||||||
path: { type: 'string' },
|
|
||||||
},
|
|
||||||
required: ['pattern'],
|
|
||||||
additionalProperties: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
})
|
|
||||||
|
|
||||||
const tools = requestBody?.tools as Array<Record<string, unknown>> | undefined
|
|
||||||
const grepTool = tools?.find(tool => (tool.function as Record<string, unknown>)?.name === 'Grep') as
|
|
||||||
| { function?: { parameters?: { properties?: Record<string, unknown>; required?: string[] } } }
|
|
||||||
| undefined
|
|
||||||
|
|
||||||
expect(Object.keys(grepTool?.function?.parameters?.properties ?? {})).toContain('pattern')
|
|
||||||
expect(grepTool?.function?.parameters?.required).toContain('pattern')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('does not infer Gemini mode from OPENAI_BASE_URL path substrings', async () => {
|
|
||||||
let capturedAuthorization: string | null = null
|
|
||||||
|
|
||||||
process.env.OPENAI_BASE_URL =
|
|
||||||
'https://evil.example/generativelanguage.googleapis.com/v1beta/openai'
|
|
||||||
delete process.env.OPENAI_API_KEY
|
|
||||||
process.env.GEMINI_API_KEY = 'gemini-secret'
|
|
||||||
|
|
||||||
globalThis.fetch = (async (_input, init) => {
|
|
||||||
const headers = init?.headers as Record<string, string> | undefined
|
|
||||||
capturedAuthorization =
|
|
||||||
headers?.Authorization ?? headers?.authorization ?? null
|
|
||||||
|
|
||||||
return new Response(
|
|
||||||
JSON.stringify({
|
|
||||||
id: 'chatcmpl-1',
|
|
||||||
model: 'fake-model',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
message: {
|
|
||||||
role: 'assistant',
|
|
||||||
content: 'ok',
|
|
||||||
},
|
|
||||||
finish_reason: 'stop',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
usage: {
|
|
||||||
prompt_tokens: 12,
|
|
||||||
completion_tokens: 4,
|
|
||||||
total_tokens: 16,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
{
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
await client.beta.messages.create({
|
|
||||||
model: 'fake-model',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(capturedAuthorization).toBeNull()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('preserves image tool results as placeholders in follow-up requests', async () => {
|
test('preserves image tool results as placeholders in follow-up requests', async () => {
|
||||||
let requestBody: Record<string, unknown> | undefined
|
let requestBody: Record<string, unknown> | undefined
|
||||||
|
|
||||||
@@ -1888,237 +1769,3 @@ test('coalesces consecutive assistant messages preserving tool_calls (issue #202
|
|||||||
expect(assistantMsgs?.length).toBe(1) // two assistant turns merged into one
|
expect(assistantMsgs?.length).toBe(1) // two assistant turns merged into one
|
||||||
expect(assistantMsgs?.[0]?.tool_calls?.length).toBeGreaterThan(0)
|
expect(assistantMsgs?.[0]?.tool_calls?.length).toBeGreaterThan(0)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('non-streaming: reasoning_content emitted as thinking block, used as text when content is null', async () => {
|
|
||||||
globalThis.fetch = (async (_input, _init) => {
|
|
||||||
return new Response(
|
|
||||||
JSON.stringify({
|
|
||||||
id: 'chatcmpl-1',
|
|
||||||
model: 'glm-5',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
message: {
|
|
||||||
role: 'assistant',
|
|
||||||
content: null,
|
|
||||||
reasoning_content: 'Let me think about this step by step.',
|
|
||||||
},
|
|
||||||
finish_reason: 'stop',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
usage: {
|
|
||||||
prompt_tokens: 10,
|
|
||||||
completion_tokens: 20,
|
|
||||||
total_tokens: 30,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
{
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
const result = (await client.beta.messages.create({
|
|
||||||
model: 'glm-5',
|
|
||||||
system: 'test system',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
})) as { content: Array<Record<string, unknown>> }
|
|
||||||
|
|
||||||
expect(result.content).toEqual([
|
|
||||||
{ type: 'thinking', thinking: 'Let me think about this step by step.' },
|
|
||||||
{ type: 'text', text: 'Let me think about this step by step.' },
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('non-streaming: empty string content does not fall through to reasoning_content as text', async () => {
|
|
||||||
globalThis.fetch = (async (_input, _init) => {
|
|
||||||
return new Response(
|
|
||||||
JSON.stringify({
|
|
||||||
id: 'chatcmpl-1',
|
|
||||||
model: 'glm-5',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
message: {
|
|
||||||
role: 'assistant',
|
|
||||||
content: '',
|
|
||||||
reasoning_content: 'Chain of thought here.',
|
|
||||||
},
|
|
||||||
finish_reason: 'stop',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
usage: {
|
|
||||||
prompt_tokens: 10,
|
|
||||||
completion_tokens: 20,
|
|
||||||
total_tokens: 30,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
{
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
const result = (await client.beta.messages.create({
|
|
||||||
model: 'glm-5',
|
|
||||||
system: 'test system',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
})) as { content: Array<Record<string, unknown>> }
|
|
||||||
|
|
||||||
// reasoning_content should be a thinking block, and also used as text
|
|
||||||
// since content is empty string (treated as absent)
|
|
||||||
expect(result.content).toEqual([
|
|
||||||
{ type: 'thinking', thinking: 'Chain of thought here.' },
|
|
||||||
{ type: 'text', text: 'Chain of thought here.' },
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('non-streaming: real content takes precedence over reasoning_content', async () => {
|
|
||||||
globalThis.fetch = (async (_input, _init) => {
|
|
||||||
return new Response(
|
|
||||||
JSON.stringify({
|
|
||||||
id: 'chatcmpl-1',
|
|
||||||
model: 'glm-5',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
message: {
|
|
||||||
role: 'assistant',
|
|
||||||
content: 'The answer is 42.',
|
|
||||||
reasoning_content: 'I need to calculate this.',
|
|
||||||
},
|
|
||||||
finish_reason: 'stop',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
usage: {
|
|
||||||
prompt_tokens: 10,
|
|
||||||
completion_tokens: 20,
|
|
||||||
total_tokens: 30,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
{
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
const result = (await client.beta.messages.create({
|
|
||||||
model: 'glm-5',
|
|
||||||
system: 'test system',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
})) as { content: Array<Record<string, unknown>> }
|
|
||||||
|
|
||||||
expect(result.content).toEqual([
|
|
||||||
{ type: 'thinking', thinking: 'I need to calculate this.' },
|
|
||||||
{ type: 'text', text: 'The answer is 42.' },
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('streaming: thinking block closed before tool call', async () => {
|
|
||||||
globalThis.fetch = (async (_input, _init) => {
|
|
||||||
const chunks = makeStreamChunks([
|
|
||||||
{
|
|
||||||
id: 'chatcmpl-1',
|
|
||||||
object: 'chat.completion.chunk',
|
|
||||||
model: 'glm-5',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
index: 0,
|
|
||||||
delta: { role: 'assistant', reasoning_content: 'Thinking...' },
|
|
||||||
finish_reason: null,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 'chatcmpl-1',
|
|
||||||
object: 'chat.completion.chunk',
|
|
||||||
model: 'glm-5',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
index: 0,
|
|
||||||
delta: {
|
|
||||||
tool_calls: [
|
|
||||||
{
|
|
||||||
index: 0,
|
|
||||||
id: 'call-1',
|
|
||||||
type: 'function',
|
|
||||||
function: {
|
|
||||||
name: 'Bash',
|
|
||||||
arguments: '{"command":"ls"}',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
finish_reason: null,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 'chatcmpl-1',
|
|
||||||
object: 'chat.completion.chunk',
|
|
||||||
model: 'glm-5',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
index: 0,
|
|
||||||
delta: {},
|
|
||||||
finish_reason: 'tool_calls',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
])
|
|
||||||
|
|
||||||
return makeSseResponse(chunks)
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
const result = await client.beta.messages
|
|
||||||
.create({
|
|
||||||
model: 'glm-5',
|
|
||||||
system: 'test system',
|
|
||||||
messages: [{ role: 'user', content: 'Run ls' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: true,
|
|
||||||
})
|
|
||||||
.withResponse()
|
|
||||||
|
|
||||||
const events: Array<Record<string, unknown>> = []
|
|
||||||
for await (const event of result.data) {
|
|
||||||
events.push(event)
|
|
||||||
}
|
|
||||||
|
|
||||||
const types = events.map(e => e.type)
|
|
||||||
|
|
||||||
// Verify thinking block is started, then closed, then tool call starts
|
|
||||||
const thinkingStartIdx = types.indexOf('content_block_start')
|
|
||||||
const firstStopIdx = types.indexOf('content_block_stop')
|
|
||||||
const toolStartIdx = types.indexOf(
|
|
||||||
'content_block_start',
|
|
||||||
thinkingStartIdx + 1,
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(thinkingStartIdx).toBeGreaterThanOrEqual(0)
|
|
||||||
expect(firstStopIdx).toBeGreaterThan(thinkingStartIdx)
|
|
||||||
expect(toolStartIdx).toBeGreaterThan(firstStopIdx)
|
|
||||||
|
|
||||||
// Verify thinking block start content
|
|
||||||
const thinkingStart = events[thinkingStartIdx] as {
|
|
||||||
content_block?: Record<string, unknown>
|
|
||||||
}
|
|
||||||
expect(thinkingStart?.content_block?.type).toBe('thinking')
|
|
||||||
})
|
|
||||||
|
|||||||
@@ -60,22 +60,11 @@ const GITHUB_API_VERSION = '2022-11-28'
|
|||||||
const GITHUB_429_MAX_RETRIES = 3
|
const GITHUB_429_MAX_RETRIES = 3
|
||||||
const GITHUB_429_BASE_DELAY_SEC = 1
|
const GITHUB_429_BASE_DELAY_SEC = 1
|
||||||
const GITHUB_429_MAX_DELAY_SEC = 32
|
const GITHUB_429_MAX_DELAY_SEC = 32
|
||||||
const GEMINI_API_HOST = 'generativelanguage.googleapis.com'
|
|
||||||
|
|
||||||
function isGithubModelsMode(): boolean {
|
function isGithubModelsMode(): boolean {
|
||||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||||
}
|
}
|
||||||
|
|
||||||
function hasGeminiApiHost(baseUrl: string | undefined): boolean {
|
|
||||||
if (!baseUrl) return false
|
|
||||||
|
|
||||||
try {
|
|
||||||
return new URL(baseUrl).hostname.toLowerCase() === GEMINI_API_HOST
|
|
||||||
} catch {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function formatRetryAfterHint(response: Response): string {
|
function formatRetryAfterHint(response: Response): string {
|
||||||
const ra = response.headers.get('retry-after')
|
const ra = response.headers.get('retry-after')
|
||||||
return ra ? ` (Retry-After: ${ra})` : ''
|
return ra ? ` (Retry-After: ${ra})` : ''
|
||||||
@@ -195,12 +184,10 @@ function convertContentBlocks(
|
|||||||
// handled separately
|
// handled separately
|
||||||
break
|
break
|
||||||
case 'thinking':
|
case 'thinking':
|
||||||
case 'redacted_thinking':
|
// Append thinking as text with a marker for models that support reasoning
|
||||||
// Strip thinking blocks for OpenAI-compatible providers.
|
if (block.thinking) {
|
||||||
// These are Anthropic-specific content types that 3P providers
|
parts.push({ type: 'text', text: `<thinking>${block.thinking}</thinking>` })
|
||||||
// don't understand. Serializing them as <thinking> text corrupts
|
}
|
||||||
// multi-turn context: the model sees the tags as part of its
|
|
||||||
// previous reply and may mimic or misattribute them.
|
|
||||||
break
|
break
|
||||||
default:
|
default:
|
||||||
if (block.text) {
|
if (block.text) {
|
||||||
@@ -214,13 +201,6 @@ function convertContentBlocks(
|
|||||||
return parts
|
return parts
|
||||||
}
|
}
|
||||||
|
|
||||||
function isGeminiMode(): boolean {
|
|
||||||
return (
|
|
||||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
|
||||||
hasGeminiApiHost(process.env.OPENAI_BASE_URL)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
function convertMessages(
|
function convertMessages(
|
||||||
messages: Array<{ role: string; message?: { role?: string; content?: unknown }; content?: unknown }>,
|
messages: Array<{ role: string; message?: { role?: string; content?: unknown }; content?: unknown }>,
|
||||||
system: unknown,
|
system: unknown,
|
||||||
@@ -272,7 +252,6 @@ function convertMessages(
|
|||||||
// Check for tool_use blocks
|
// Check for tool_use blocks
|
||||||
if (Array.isArray(content)) {
|
if (Array.isArray(content)) {
|
||||||
const toolUses = content.filter((b: { type?: string }) => b.type === 'tool_use')
|
const toolUses = content.filter((b: { type?: string }) => b.type === 'tool_use')
|
||||||
const thinkingBlock = content.find((b: { type?: string }) => b.type === 'thinking')
|
|
||||||
const textContent = content.filter(
|
const textContent = content.filter(
|
||||||
(b: { type?: string }) => b.type !== 'tool_use' && b.type !== 'thinking',
|
(b: { type?: string }) => b.type !== 'tool_use' && b.type !== 'thinking',
|
||||||
)
|
)
|
||||||
@@ -292,46 +271,18 @@ function convertMessages(
|
|||||||
name?: string
|
name?: string
|
||||||
input?: unknown
|
input?: unknown
|
||||||
extra_content?: Record<string, unknown>
|
extra_content?: Record<string, unknown>
|
||||||
signature?: string
|
}) => ({
|
||||||
}, index) => {
|
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
||||||
const toolCall: NonNullable<OpenAIMessage['tool_calls']>[number] = {
|
type: 'function' as const,
|
||||||
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
function: {
|
||||||
type: 'function' as const,
|
name: tu.name ?? 'unknown',
|
||||||
function: {
|
arguments:
|
||||||
name: tu.name ?? 'unknown',
|
typeof tu.input === 'string'
|
||||||
arguments:
|
? tu.input
|
||||||
typeof tu.input === 'string'
|
: JSON.stringify(tu.input ?? {}),
|
||||||
? tu.input
|
},
|
||||||
: JSON.stringify(tu.input ?? {}),
|
...(tu.extra_content ? { extra_content: tu.extra_content } : {}),
|
||||||
},
|
}),
|
||||||
}
|
|
||||||
|
|
||||||
// Preserve existing extra_content if present
|
|
||||||
if (tu.extra_content) {
|
|
||||||
toolCall.extra_content = { ...tu.extra_content }
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle Gemini thought_signature
|
|
||||||
if (isGeminiMode()) {
|
|
||||||
// If the model provided a signature in the tool_use block itself (e.g. from a previous Turn/Step)
|
|
||||||
// Use thinkingBlock.signature for ALL tool calls in the same assistant turn if available.
|
|
||||||
// The API requires the same signature on every replayed function call part in a parallel set.
|
|
||||||
const signature = tu.signature ?? (thinkingBlock as any)?.signature
|
|
||||||
|
|
||||||
// Merge into existing google-specific metadata if present
|
|
||||||
const existingGoogle = (toolCall.extra_content?.google as Record<string, unknown>) ?? {}
|
|
||||||
|
|
||||||
toolCall.extra_content = {
|
|
||||||
...toolCall.extra_content,
|
|
||||||
google: {
|
|
||||||
...existingGoogle,
|
|
||||||
thought_signature: signature ?? "skip_thought_signature_validator"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return toolCall
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -450,7 +401,7 @@ function normalizeSchemaForOpenAI(
|
|||||||
function convertTools(
|
function convertTools(
|
||||||
tools: Array<{ name: string; description?: string; input_schema?: Record<string, unknown> }>,
|
tools: Array<{ name: string; description?: string; input_schema?: Record<string, unknown> }>,
|
||||||
): OpenAITool[] {
|
): OpenAITool[] {
|
||||||
const isGemini = isGeminiMode()
|
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||||
|
|
||||||
return tools
|
return tools
|
||||||
.filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI
|
.filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI
|
||||||
@@ -492,7 +443,6 @@ interface OpenAIStreamChunk {
|
|||||||
delta: {
|
delta: {
|
||||||
role?: string
|
role?: string
|
||||||
content?: string | null
|
content?: string | null
|
||||||
reasoning_content?: string | null
|
|
||||||
tool_calls?: Array<{
|
tool_calls?: Array<{
|
||||||
index: number
|
index: number
|
||||||
id?: string
|
id?: string
|
||||||
@@ -575,8 +525,6 @@ async function* openaiStreamToAnthropic(
|
|||||||
}
|
}
|
||||||
>()
|
>()
|
||||||
let hasEmittedContentStart = false
|
let hasEmittedContentStart = false
|
||||||
let hasEmittedThinkingStart = false
|
|
||||||
let hasClosedThinking = false
|
|
||||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||||
let hasEmittedFinalUsage = false
|
let hasEmittedFinalUsage = false
|
||||||
let hasProcessedFinishReason = false
|
let hasProcessedFinishReason = false
|
||||||
@@ -633,34 +581,9 @@ async function* openaiStreamToAnthropic(
|
|||||||
for (const choice of chunk.choices ?? []) {
|
for (const choice of chunk.choices ?? []) {
|
||||||
const delta = choice.delta
|
const delta = choice.delta
|
||||||
|
|
||||||
// Reasoning models (e.g. GLM-5, DeepSeek) may stream chain-of-thought
|
|
||||||
// in `reasoning_content` before the actual reply appears in `content`.
|
|
||||||
// Emit reasoning as a thinking block and content as a text block.
|
|
||||||
if (delta.reasoning_content != null && delta.reasoning_content !== '') {
|
|
||||||
if (!hasEmittedThinkingStart) {
|
|
||||||
yield {
|
|
||||||
type: 'content_block_start',
|
|
||||||
index: contentBlockIndex,
|
|
||||||
content_block: { type: 'thinking', thinking: '' },
|
|
||||||
}
|
|
||||||
hasEmittedThinkingStart = true
|
|
||||||
}
|
|
||||||
yield {
|
|
||||||
type: 'content_block_delta',
|
|
||||||
index: contentBlockIndex,
|
|
||||||
delta: { type: 'thinking_delta', thinking: delta.reasoning_content },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Text content — use != null to distinguish absent field from empty string,
|
// Text content — use != null to distinguish absent field from empty string,
|
||||||
// some providers send "" as first delta to signal streaming start
|
// some providers send "" as first delta to signal streaming start
|
||||||
if (delta.content != null && delta.content !== '') {
|
if (delta.content != null) {
|
||||||
// Close thinking block if transitioning from reasoning to content
|
|
||||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
|
||||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
|
||||||
contentBlockIndex++
|
|
||||||
hasClosedThinking = true
|
|
||||||
}
|
|
||||||
if (!hasEmittedContentStart) {
|
if (!hasEmittedContentStart) {
|
||||||
yield {
|
yield {
|
||||||
type: 'content_block_start',
|
type: 'content_block_start',
|
||||||
@@ -680,12 +603,7 @@ async function* openaiStreamToAnthropic(
|
|||||||
if (delta.tool_calls) {
|
if (delta.tool_calls) {
|
||||||
for (const tc of delta.tool_calls) {
|
for (const tc of delta.tool_calls) {
|
||||||
if (tc.id && tc.function?.name) {
|
if (tc.id && tc.function?.name) {
|
||||||
// New tool call starting — close any open thinking block first
|
// New tool call starting
|
||||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
|
||||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
|
||||||
contentBlockIndex++
|
|
||||||
hasClosedThinking = true
|
|
||||||
}
|
|
||||||
if (hasEmittedContentStart) {
|
if (hasEmittedContentStart) {
|
||||||
yield {
|
yield {
|
||||||
type: 'content_block_stop',
|
type: 'content_block_stop',
|
||||||
@@ -715,13 +633,6 @@ async function* openaiStreamToAnthropic(
|
|||||||
name: tc.function.name,
|
name: tc.function.name,
|
||||||
input: {},
|
input: {},
|
||||||
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
||||||
// Extract Gemini signature from extra_content
|
|
||||||
...((tc.extra_content?.google as any)?.thought_signature
|
|
||||||
? {
|
|
||||||
signature: (tc.extra_content.google as any)
|
|
||||||
.thought_signature,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
contentBlockIndex++
|
contentBlockIndex++
|
||||||
@@ -767,12 +678,6 @@ async function* openaiStreamToAnthropic(
|
|||||||
if (choice.finish_reason && !hasProcessedFinishReason) {
|
if (choice.finish_reason && !hasProcessedFinishReason) {
|
||||||
hasProcessedFinishReason = true
|
hasProcessedFinishReason = true
|
||||||
|
|
||||||
// Close any open thinking block that wasn't closed by content transition
|
|
||||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
|
||||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
|
||||||
contentBlockIndex++
|
|
||||||
hasClosedThinking = true
|
|
||||||
}
|
|
||||||
// Close any open content blocks
|
// Close any open content blocks
|
||||||
if (hasEmittedContentStart) {
|
if (hasEmittedContentStart) {
|
||||||
yield {
|
yield {
|
||||||
@@ -1098,7 +1003,7 @@ class OpenAIShimMessages {
|
|||||||
...(options?.headers ?? {}),
|
...(options?.headers ?? {}),
|
||||||
}
|
}
|
||||||
|
|
||||||
const isGemini = isGeminiMode()
|
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||||
const apiKey =
|
const apiKey =
|
||||||
this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||||
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
||||||
@@ -1211,7 +1116,6 @@ class OpenAIShimMessages {
|
|||||||
| string
|
| string
|
||||||
| null
|
| null
|
||||||
| Array<{ type?: string; text?: string }>
|
| Array<{ type?: string; text?: string }>
|
||||||
reasoning_content?: string | null
|
|
||||||
tool_calls?: Array<{
|
tool_calls?: Array<{
|
||||||
id: string
|
id: string
|
||||||
function: { name: string; arguments: string }
|
function: { name: string; arguments: string }
|
||||||
@@ -1233,17 +1137,7 @@ class OpenAIShimMessages {
|
|||||||
const choice = data.choices?.[0]
|
const choice = data.choices?.[0]
|
||||||
const content: Array<Record<string, unknown>> = []
|
const content: Array<Record<string, unknown>> = []
|
||||||
|
|
||||||
// Some reasoning models (e.g. GLM-5) put their reply in reasoning_content
|
const rawContent = choice?.message?.content
|
||||||
// while content stays null — emit reasoning as a thinking block, then
|
|
||||||
// fall back to it for visible text if content is empty.
|
|
||||||
const reasoningText = choice?.message?.reasoning_content
|
|
||||||
if (typeof reasoningText === 'string' && reasoningText) {
|
|
||||||
content.push({ type: 'thinking', thinking: reasoningText })
|
|
||||||
}
|
|
||||||
const rawContent =
|
|
||||||
choice?.message?.content !== '' && choice?.message?.content != null
|
|
||||||
? choice?.message?.content
|
|
||||||
: choice?.message?.reasoning_content
|
|
||||||
if (typeof rawContent === 'string' && rawContent) {
|
if (typeof rawContent === 'string' && rawContent) {
|
||||||
content.push({ type: 'text', text: rawContent })
|
content.push({ type: 'text', text: rawContent })
|
||||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||||
@@ -1276,10 +1170,6 @@ class OpenAIShimMessages {
|
|||||||
name: tc.function.name,
|
name: tc.function.name,
|
||||||
input,
|
input,
|
||||||
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
||||||
// Extract Gemini signature from extra_content
|
|
||||||
...((tc.extra_content?.google as any)?.thought_signature
|
|
||||||
? { signature: (tc.extra_content.google as any).thought_signature }
|
|
||||||
: {}),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,127 +0,0 @@
|
|||||||
import { describe, expect, test } from 'bun:test'
|
|
||||||
|
|
||||||
import type { Message } from '../../types/message.js'
|
|
||||||
import { createAssistantMessage, createUserMessage } from '../../utils/messages.js'
|
|
||||||
|
|
||||||
// We test the exported collectCompactableToolIds behavior indirectly via
|
|
||||||
// the public microcompactMessages + time-based path. But first we need to
|
|
||||||
// verify the core predicate: MCP tools (prefixed 'mcp__') should be
|
|
||||||
// compactable alongside the built-in tool set.
|
|
||||||
|
|
||||||
// Import internals we can test
|
|
||||||
import { evaluateTimeBasedTrigger } from './microCompact.js'
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper: build a minimal assistant message with a tool_use block.
|
|
||||||
*/
|
|
||||||
function assistantWithToolUse(toolName: string, toolId: string): Message {
|
|
||||||
return createAssistantMessage({
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'tool_use' as const,
|
|
||||||
id: toolId,
|
|
||||||
name: toolName,
|
|
||||||
input: {},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper: build a user message with a tool_result block.
|
|
||||||
*/
|
|
||||||
function userWithToolResult(toolId: string, output: string): Message {
|
|
||||||
return createUserMessage({
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'tool_result' as const,
|
|
||||||
tool_use_id: toolId,
|
|
||||||
content: output,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('microCompact MCP tool compaction', () => {
|
|
||||||
// We can't easily unit-test the private isCompactableTool directly,
|
|
||||||
// but we can test the full time-based microcompact path which exercises
|
|
||||||
// collectCompactableToolIds → isCompactableTool under the hood.
|
|
||||||
// The time-based path is the simplest to trigger: it content-clears
|
|
||||||
// old tool results when the gap since last assistant message exceeds
|
|
||||||
// the threshold.
|
|
||||||
|
|
||||||
// However, evaluateTimeBasedTrigger depends on config (GrowthBook).
|
|
||||||
// So instead, let's test the observable behavior by importing the
|
|
||||||
// microcompactMessages function and checking that MCP tool_use blocks
|
|
||||||
// are collected.
|
|
||||||
|
|
||||||
// Since collectCompactableToolIds is not exported, we test the predicate
|
|
||||||
// behavior by verifying that the module loads without error and that
|
|
||||||
// built-in and MCP tools are treated consistently.
|
|
||||||
|
|
||||||
test('module exports load correctly', async () => {
|
|
||||||
const mod = await import('./microCompact.js')
|
|
||||||
expect(mod.microcompactMessages).toBeFunction()
|
|
||||||
expect(mod.estimateMessageTokens).toBeFunction()
|
|
||||||
expect(mod.evaluateTimeBasedTrigger).toBeFunction()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('estimateMessageTokens counts MCP tool_use blocks', async () => {
|
|
||||||
const { estimateMessageTokens } = await import('./microCompact.js')
|
|
||||||
|
|
||||||
const builtinMessages: Message[] = [
|
|
||||||
assistantWithToolUse('Read', 'tool-builtin-1'),
|
|
||||||
userWithToolResult('tool-builtin-1', 'file contents here'),
|
|
||||||
]
|
|
||||||
|
|
||||||
const mcpMessages: Message[] = [
|
|
||||||
assistantWithToolUse('mcp__github__get_file_contents', 'tool-mcp-1'),
|
|
||||||
userWithToolResult('tool-mcp-1', 'file contents here'),
|
|
||||||
]
|
|
||||||
|
|
||||||
const builtinTokens = estimateMessageTokens(builtinMessages)
|
|
||||||
const mcpTokens = estimateMessageTokens(mcpMessages)
|
|
||||||
|
|
||||||
// Both should produce non-zero estimates
|
|
||||||
expect(builtinTokens).toBeGreaterThan(0)
|
|
||||||
expect(mcpTokens).toBeGreaterThan(0)
|
|
||||||
|
|
||||||
// The tool_result content is identical, so token estimates should be
|
|
||||||
// similar (tool_use name differs slightly, so not exactly equal)
|
|
||||||
expect(Math.abs(builtinTokens - mcpTokens)).toBeLessThan(50)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('microcompactMessages processes MCP tools without error', async () => {
|
|
||||||
const { microcompactMessages } = await import('./microCompact.js')
|
|
||||||
|
|
||||||
const messages: Message[] = [
|
|
||||||
assistantWithToolUse('mcp__slack__send_message', 'tool-mcp-2'),
|
|
||||||
userWithToolResult('tool-mcp-2', 'Message sent successfully'),
|
|
||||||
assistantWithToolUse('mcp__github__create_pull_request', 'tool-mcp-3'),
|
|
||||||
userWithToolResult('tool-mcp-3', JSON.stringify({ number: 42, url: 'https://github.com/org/repo/pull/42' })),
|
|
||||||
]
|
|
||||||
|
|
||||||
// Should not throw — MCP tools should be handled gracefully
|
|
||||||
const result = await microcompactMessages(messages)
|
|
||||||
expect(result).toBeDefined()
|
|
||||||
expect(result.messages).toBeDefined()
|
|
||||||
expect(result.messages.length).toBe(messages.length)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('microcompactMessages processes mixed built-in and MCP tools', async () => {
|
|
||||||
const { microcompactMessages } = await import('./microCompact.js')
|
|
||||||
|
|
||||||
const messages: Message[] = [
|
|
||||||
assistantWithToolUse('Read', 'tool-read-1'),
|
|
||||||
userWithToolResult('tool-read-1', 'some file content'),
|
|
||||||
assistantWithToolUse('mcp__playwright__screenshot', 'tool-mcp-4'),
|
|
||||||
userWithToolResult('tool-mcp-4', 'base64-encoded-screenshot-data'.repeat(100)),
|
|
||||||
assistantWithToolUse('Bash', 'tool-bash-1'),
|
|
||||||
userWithToolResult('tool-bash-1', 'command output'),
|
|
||||||
]
|
|
||||||
|
|
||||||
const result = await microcompactMessages(messages)
|
|
||||||
expect(result).toBeDefined()
|
|
||||||
expect(result.messages.length).toBe(messages.length)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -37,7 +37,7 @@ export const TIME_BASED_MC_CLEARED_MESSAGE = '[Old tool result content cleared]'
|
|||||||
|
|
||||||
const IMAGE_MAX_TOKEN_SIZE = 2000
|
const IMAGE_MAX_TOKEN_SIZE = 2000
|
||||||
|
|
||||||
// Only compact these built-in tools (MCP tools are also compactable via prefix match)
|
// Only compact these tools
|
||||||
const COMPACTABLE_TOOLS = new Set<string>([
|
const COMPACTABLE_TOOLS = new Set<string>([
|
||||||
FILE_READ_TOOL_NAME,
|
FILE_READ_TOOL_NAME,
|
||||||
...SHELL_TOOL_NAMES,
|
...SHELL_TOOL_NAMES,
|
||||||
@@ -49,13 +49,7 @@ const COMPACTABLE_TOOLS = new Set<string>([
|
|||||||
FILE_WRITE_TOOL_NAME,
|
FILE_WRITE_TOOL_NAME,
|
||||||
])
|
])
|
||||||
|
|
||||||
const MCP_TOOL_PREFIX = 'mcp__'
|
// --- Cached microcompact state (internal-only, gated by feature('CACHED_MICROCOMPACT')) ---
|
||||||
|
|
||||||
function isCompactableTool(name: string): boolean {
|
|
||||||
return COMPACTABLE_TOOLS.has(name) || name.startsWith(MCP_TOOL_PREFIX)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Cached microcompact state (gated by feature('CACHED_MICROCOMPACT')) ---
|
|
||||||
|
|
||||||
// Lazy-initialized cached MC module and state to avoid importing in external builds.
|
// Lazy-initialized cached MC module and state to avoid importing in external builds.
|
||||||
// The imports and state live inside feature() checks for dead code elimination.
|
// The imports and state live inside feature() checks for dead code elimination.
|
||||||
@@ -237,7 +231,7 @@ function collectCompactableToolIds(messages: Message[]): string[] {
|
|||||||
Array.isArray(message.message.content)
|
Array.isArray(message.message.content)
|
||||||
) {
|
) {
|
||||||
for (const block of message.message.content) {
|
for (const block of message.message.content) {
|
||||||
if (block.type === 'tool_use' && isCompactableTool(block.name)) {
|
if (block.type === 'tool_use' && COMPACTABLE_TOOLS.has(block.name)) {
|
||||||
ids.push(block.id)
|
ids.push(block.id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,23 +72,16 @@ export function getContextWindowForModel(
|
|||||||
return 1_000_000
|
return 1_000_000
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenAI-compatible provider — use known context windows for the model.
|
// OpenAI-compatible provider — use known context windows for the model
|
||||||
// Unknown models get a conservative 8k default so auto-compact triggers
|
if (
|
||||||
// before hitting a hard context_window_exceeded error (issue #248 finding 3).
|
|
||||||
const isOpenAIProvider =
|
|
||||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||||
if (isOpenAIProvider) {
|
) {
|
||||||
const openaiWindow = getOpenAIContextWindow(model)
|
const openaiWindow = getOpenAIContextWindow(model)
|
||||||
if (openaiWindow !== undefined) {
|
if (openaiWindow !== undefined) {
|
||||||
return openaiWindow
|
return openaiWindow
|
||||||
}
|
}
|
||||||
console.error(
|
|
||||||
`[context] Warning: model "${model}" not in context window table — using conservative 8k default. ` +
|
|
||||||
'Add it to src/utils/model/openaiContextWindows.ts for accurate compaction.',
|
|
||||||
)
|
|
||||||
return 8_000
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const cap = getModelCapability(model)
|
const cap = getModelCapability(model)
|
||||||
|
|||||||
@@ -69,93 +69,3 @@ test('loadConversationForResume rejects oversized transcripts before resume hook
|
|||||||
)
|
)
|
||||||
expect(hookSpy).not.toHaveBeenCalled()
|
expect(hookSpy).not.toHaveBeenCalled()
|
||||||
})
|
})
|
||||||
|
|
||||||
test('deserializeMessagesWithInterruptDetection strips thinking blocks only for OpenAI-compatible providers', async () => {
|
|
||||||
const serializedMessages = [
|
|
||||||
user(id(10), 'hello'),
|
|
||||||
{
|
|
||||||
type: 'assistant',
|
|
||||||
uuid: id(11),
|
|
||||||
parentUuid: id(10),
|
|
||||||
timestamp: ts,
|
|
||||||
cwd: '/tmp',
|
|
||||||
sessionId,
|
|
||||||
version: 'test',
|
|
||||||
message: {
|
|
||||||
role: 'assistant',
|
|
||||||
content: [
|
|
||||||
{ type: 'thinking', thinking: 'secret reasoning' },
|
|
||||||
{ type: 'text', text: 'visible reply' },
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: 'assistant',
|
|
||||||
uuid: id(12),
|
|
||||||
parentUuid: id(11),
|
|
||||||
timestamp: ts,
|
|
||||||
cwd: '/tmp',
|
|
||||||
sessionId,
|
|
||||||
version: 'test',
|
|
||||||
message: {
|
|
||||||
role: 'assistant',
|
|
||||||
content: [{ type: 'thinking', thinking: 'only hidden reasoning' }],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
user(id(13), 'follow up'),
|
|
||||||
]
|
|
||||||
|
|
||||||
mock.module('./model/providers.js', () => ({
|
|
||||||
getAPIProvider: () => 'openai',
|
|
||||||
isOpenAICompatibleProvider: (provider: string) =>
|
|
||||||
provider === 'openai' ||
|
|
||||||
provider === 'gemini' ||
|
|
||||||
provider === 'github' ||
|
|
||||||
provider === 'codex',
|
|
||||||
}))
|
|
||||||
|
|
||||||
const openaiModule = await import(`./conversationRecovery.ts?provider=openai-${Date.now()}`)
|
|
||||||
const thirdParty = openaiModule.deserializeMessagesWithInterruptDetection(serializedMessages as never[])
|
|
||||||
const thirdPartyAssistantMessages = thirdParty.messages.filter(
|
|
||||||
message => message.type === 'assistant',
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(thirdPartyAssistantMessages).toHaveLength(2)
|
|
||||||
expect(thirdPartyAssistantMessages[0]?.message?.content).toEqual([
|
|
||||||
{ type: 'text', text: 'visible reply' },
|
|
||||||
])
|
|
||||||
expect(
|
|
||||||
JSON.stringify(thirdPartyAssistantMessages.map(message => message.message?.content)),
|
|
||||||
).not.toContain('secret reasoning')
|
|
||||||
expect(
|
|
||||||
JSON.stringify(thirdPartyAssistantMessages.map(message => message.message?.content)),
|
|
||||||
).not.toContain('only hidden reasoning')
|
|
||||||
|
|
||||||
mock.restore()
|
|
||||||
mock.module('./model/providers.js', () => ({
|
|
||||||
getAPIProvider: () => 'bedrock',
|
|
||||||
isOpenAICompatibleProvider: (provider: string) =>
|
|
||||||
provider === 'openai' ||
|
|
||||||
provider === 'gemini' ||
|
|
||||||
provider === 'github' ||
|
|
||||||
provider === 'codex',
|
|
||||||
}))
|
|
||||||
|
|
||||||
const bedrockModule = await import(`./conversationRecovery.ts?provider=bedrock-${Date.now()}`)
|
|
||||||
const anthropicCompatible = bedrockModule.deserializeMessagesWithInterruptDetection(serializedMessages as never[])
|
|
||||||
const anthropicAssistantMessages = anthropicCompatible.messages.filter(
|
|
||||||
message => message.type === 'assistant',
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(anthropicAssistantMessages).toHaveLength(2)
|
|
||||||
expect(anthropicAssistantMessages[0]?.message?.content).toEqual([
|
|
||||||
{ type: 'thinking', thinking: 'secret reasoning' },
|
|
||||||
{ type: 'text', text: 'visible reply' },
|
|
||||||
])
|
|
||||||
expect(
|
|
||||||
JSON.stringify(anthropicAssistantMessages.map(message => message.message?.content)),
|
|
||||||
).toContain('secret reasoning')
|
|
||||||
expect(
|
|
||||||
JSON.stringify(anthropicAssistantMessages.map(message => message.message?.content)),
|
|
||||||
).not.toContain('only hidden reasoning')
|
|
||||||
})
|
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ const originalSimple = process.env.CLAUDE_CODE_SIMPLE
|
|||||||
const sessionId = '00000000-0000-4000-8000-000000001999'
|
const sessionId = '00000000-0000-4000-8000-000000001999'
|
||||||
const ts = '2026-04-02T00:00:00.000Z'
|
const ts = '2026-04-02T00:00:00.000Z'
|
||||||
|
|
||||||
|
|
||||||
function id(n: number): string {
|
function id(n: number): string {
|
||||||
return `00000000-0000-4000-8000-${String(n).padStart(12, '0')}`
|
return `00000000-0000-4000-8000-${String(n).padStart(12, '0')}`
|
||||||
}
|
}
|
||||||
@@ -77,3 +76,4 @@ test('loadConversationForResume rejects oversized reconstructed transcripts', as
|
|||||||
'Reconstructed transcript is too large to resume safely',
|
'Reconstructed transcript is too large to resume safely',
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ import {
|
|||||||
type FileHistorySnapshot,
|
type FileHistorySnapshot,
|
||||||
} from './fileHistory.js'
|
} from './fileHistory.js'
|
||||||
import { logError } from './log.js'
|
import { logError } from './log.js'
|
||||||
import { getAPIProvider } from './model/providers.js'
|
|
||||||
import {
|
import {
|
||||||
createAssistantMessage,
|
createAssistantMessage,
|
||||||
createUserMessage,
|
createUserMessage,
|
||||||
@@ -178,25 +177,6 @@ export type DeserializeResult = {
|
|||||||
turnInterruptionState: TurnInterruptionState
|
turnInterruptionState: TurnInterruptionState
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Remove thinking/redacted_thinking content blocks from assistant messages.
|
|
||||||
* Messages that become empty after stripping are removed entirely.
|
|
||||||
*/
|
|
||||||
function stripThinkingBlocks(messages: NormalizedMessage[]): NormalizedMessage[] {
|
|
||||||
return messages.reduce<NormalizedMessage[]>((acc, msg) => {
|
|
||||||
if (msg.type !== 'assistant' || !Array.isArray(msg.message?.content)) {
|
|
||||||
acc.push(msg)
|
|
||||||
return acc
|
|
||||||
}
|
|
||||||
const filtered = msg.message.content.filter(
|
|
||||||
(block: { type?: string }) => block.type !== 'thinking' && block.type !== 'redacted_thinking',
|
|
||||||
)
|
|
||||||
if (filtered.length === 0) return acc
|
|
||||||
acc.push({ ...msg, message: { ...msg.message, content: filtered } })
|
|
||||||
return acc
|
|
||||||
}, [])
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deserializes messages from a log file into the format expected by the REPL.
|
* Deserializes messages from a log file into the format expected by the REPL.
|
||||||
* Filters unresolved tool uses, orphaned thinking messages, and appends a
|
* Filters unresolved tool uses, orphaned thinking messages, and appends a
|
||||||
@@ -247,19 +227,10 @@ export function deserializeMessagesWithInterruptDetection(
|
|||||||
filteredToolUses,
|
filteredToolUses,
|
||||||
) as NormalizedMessage[]
|
) as NormalizedMessage[]
|
||||||
|
|
||||||
// Strip thinking/redacted_thinking content blocks from assistant messages
|
|
||||||
// when resuming against a 3P provider. These Anthropic-specific blocks cause
|
|
||||||
// 400 errors or context corruption on OpenAI-compatible providers (issue #248 finding 5).
|
|
||||||
const provider = getAPIProvider()
|
|
||||||
const isThirdPartyProvider = provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex' && provider !== 'foundry'
|
|
||||||
const thinkingStripped = isThirdPartyProvider
|
|
||||||
? stripThinkingBlocks(filteredThinking)
|
|
||||||
: filteredThinking
|
|
||||||
|
|
||||||
// Filter out assistant messages with only whitespace text content.
|
// Filter out assistant messages with only whitespace text content.
|
||||||
// This can happen when model outputs "\n\n" before thinking, user cancels mid-stream.
|
// This can happen when model outputs "\n\n" before thinking, user cancels mid-stream.
|
||||||
const filteredMessages = filterWhitespaceOnlyAssistantMessages(
|
const filteredMessages = filterWhitespaceOnlyAssistantMessages(
|
||||||
thinkingStripped,
|
filteredThinking,
|
||||||
) as NormalizedMessage[]
|
) as NormalizedMessage[]
|
||||||
|
|
||||||
const internalState = detectTurnInterruption(filteredMessages)
|
const internalState = detectTurnInterruption(filteredMessages)
|
||||||
|
|||||||
@@ -4,10 +4,6 @@ import { tmpdir } from 'os'
|
|||||||
import { join } from 'path'
|
import { join } from 'path'
|
||||||
import { extractDraggedFilePaths } from './dragDropPaths.js'
|
import { extractDraggedFilePaths } from './dragDropPaths.js'
|
||||||
|
|
||||||
function escapeFinderDraggedPath(filePath: string): string {
|
|
||||||
return filePath.replace(/([\\ ])/g, '\\$1')
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('extractDraggedFilePaths', () => {
|
describe('extractDraggedFilePaths', () => {
|
||||||
// Paths that exist on any system.
|
// Paths that exist on any system.
|
||||||
const thisFile = import.meta.path
|
const thisFile = import.meta.path
|
||||||
@@ -84,12 +80,6 @@ describe('extractDraggedFilePaths', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
test('escapeFinderDraggedPath escapes spaces and backslashes', () => {
|
|
||||||
expect(escapeFinderDraggedPath('/tmp/my\\notes file.txt')).toBe(
|
|
||||||
'/tmp/my\\\\notes\\ file.txt',
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Backslash-escaped paths are a Finder/macOS + Linux convention — on
|
// Backslash-escaped paths are a Finder/macOS + Linux convention — on
|
||||||
// Windows the shell-escape step is skipped, so these cases do not apply.
|
// Windows the shell-escape step is skipped, so these cases do not apply.
|
||||||
if (process.platform !== 'win32') {
|
if (process.platform !== 'win32') {
|
||||||
@@ -102,7 +92,7 @@ describe('extractDraggedFilePaths', () => {
|
|||||||
|
|
||||||
test('resolves an escaped real file with a space in its name', () => {
|
test('resolves an escaped real file with a space in its name', () => {
|
||||||
// Raw form matches what a terminal delivers on Finder drag.
|
// Raw form matches what a terminal delivers on Finder drag.
|
||||||
const escaped = escapeFinderDraggedPath(spacedFile)
|
const escaped = spacedFile.replace(/ /g, '\\ ')
|
||||||
expect(extractDraggedFilePaths(escaped)).toEqual([spacedFile])
|
expect(extractDraggedFilePaths(escaped)).toEqual([spacedFile])
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
|
|||||||
}))
|
}))
|
||||||
|
|
||||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
||||||
'./githubModelsCredentials.js?hydrate=sets-token'
|
'./githubModelsCredentials.js'
|
||||||
)
|
)
|
||||||
hydrateGithubModelsTokenFromSecureStorage()
|
hydrateGithubModelsTokenFromSecureStorage()
|
||||||
expect(process.env.GITHUB_TOKEN).toBe('stored-secret')
|
expect(process.env.GITHUB_TOKEN).toBe('stored-secret')
|
||||||
@@ -62,7 +62,7 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
|
|||||||
}))
|
}))
|
||||||
|
|
||||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
||||||
'./githubModelsCredentials.js?hydrate=preserve-existing'
|
'./githubModelsCredentials.js'
|
||||||
)
|
)
|
||||||
hydrateGithubModelsTokenFromSecureStorage()
|
hydrateGithubModelsTokenFromSecureStorage()
|
||||||
expect(process.env.GITHUB_TOKEN).toBe('already')
|
expect(process.env.GITHUB_TOKEN).toBe('already')
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
import { describe, expect, test } from 'bun:test'
|
import { describe, expect, test } from 'bun:test'
|
||||||
|
|
||||||
describe('readGithubModelsToken', () => {
|
import {
|
||||||
test('returns undefined in bare mode', async () => {
|
clearGithubModelsToken,
|
||||||
const { readGithubModelsToken } = await import(
|
readGithubModelsToken,
|
||||||
'./githubModelsCredentials.js?read-bare-mode'
|
saveGithubModelsToken,
|
||||||
)
|
} from './githubModelsCredentials.js'
|
||||||
|
|
||||||
|
describe('readGithubModelsToken', () => {
|
||||||
|
test('returns undefined in bare mode', () => {
|
||||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||||
expect(readGithubModelsToken()).toBeUndefined()
|
expect(readGithubModelsToken()).toBeUndefined()
|
||||||
@@ -18,11 +20,7 @@ describe('readGithubModelsToken', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
describe('saveGithubModelsToken / clearGithubModelsToken', () => {
|
describe('saveGithubModelsToken / clearGithubModelsToken', () => {
|
||||||
test('save returns failure in bare mode', async () => {
|
test('save returns failure in bare mode', () => {
|
||||||
const { saveGithubModelsToken } = await import(
|
|
||||||
'./githubModelsCredentials.js?save-bare-mode'
|
|
||||||
)
|
|
||||||
|
|
||||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||||
const r = saveGithubModelsToken('abc')
|
const r = saveGithubModelsToken('abc')
|
||||||
@@ -35,11 +33,7 @@ describe('saveGithubModelsToken / clearGithubModelsToken', () => {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
test('clear succeeds in bare mode', async () => {
|
test('clear succeeds in bare mode', () => {
|
||||||
const { clearGithubModelsToken } = await import(
|
|
||||||
'./githubModelsCredentials.js?clear-bare-mode'
|
|
||||||
)
|
|
||||||
|
|
||||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||||
expect(clearGithubModelsToken().success).toBe(true)
|
expect(clearGithubModelsToken().success).toBe(true)
|
||||||
|
|||||||
@@ -23,19 +23,6 @@ export function readGithubModelsToken(): string | undefined {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function readGithubModelsTokenAsync(): Promise<string | undefined> {
|
|
||||||
if (isBareMode()) return undefined
|
|
||||||
try {
|
|
||||||
const data = (await getSecureStorage().readAsync()) as
|
|
||||||
| ({ githubModels?: GithubModelsCredentialBlob } & Record<string, unknown>)
|
|
||||||
| null
|
|
||||||
const t = data?.githubModels?.accessToken?.trim()
|
|
||||||
return t || undefined
|
|
||||||
} catch {
|
|
||||||
return undefined
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If GitHub Models mode is on and no token is in the environment, copy the
|
* If GitHub Models mode is on and no token is in the environment, copy the
|
||||||
* stored token into process.env so the OpenAI shim and validation see it.
|
* stored token into process.env so the OpenAI shim and validation see it.
|
||||||
|
|||||||
@@ -1,52 +1,11 @@
|
|||||||
import { expect, test } from 'bun:test'
|
import { expect, test } from 'bun:test'
|
||||||
import path from 'path'
|
|
||||||
|
|
||||||
import { resolveRipgrepConfig, wrapRipgrepUnavailableError } from './ripgrep.js'
|
import { wrapRipgrepUnavailableError } from './ripgrep.ts'
|
||||||
|
|
||||||
const MOCK_BUILTIN_PATH = path.normalize(
|
|
||||||
process.platform === 'win32'
|
|
||||||
? `vendor/ripgrep/${process.arch}-win32/rg.exe`
|
|
||||||
: `vendor/ripgrep/${process.arch}-${process.platform}/rg`,
|
|
||||||
)
|
|
||||||
|
|
||||||
test('ripgrepCommand falls back to system rg when builtin binary is missing', () => {
|
|
||||||
const config = resolveRipgrepConfig({
|
|
||||||
userWantsSystemRipgrep: false,
|
|
||||||
bundledMode: false,
|
|
||||||
builtinCommand: MOCK_BUILTIN_PATH,
|
|
||||||
builtinExists: false,
|
|
||||||
systemExecutablePath: '/usr/bin/rg',
|
|
||||||
processExecPath: '/fake/bun',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(config).toMatchObject({
|
|
||||||
mode: 'system',
|
|
||||||
command: 'rg',
|
|
||||||
args: [],
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('ripgrepCommand keeps builtin mode when bundled binary exists', () => {
|
|
||||||
const config = resolveRipgrepConfig({
|
|
||||||
userWantsSystemRipgrep: false,
|
|
||||||
bundledMode: false,
|
|
||||||
builtinCommand: MOCK_BUILTIN_PATH,
|
|
||||||
builtinExists: true,
|
|
||||||
systemExecutablePath: '/usr/bin/rg',
|
|
||||||
processExecPath: '/fake/bun',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(config).toMatchObject({
|
|
||||||
mode: 'builtin',
|
|
||||||
command: MOCK_BUILTIN_PATH,
|
|
||||||
args: [],
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
|
test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
|
||||||
const error = wrapRipgrepUnavailableError(
|
const error = wrapRipgrepUnavailableError(
|
||||||
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
|
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
|
||||||
{ mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe', args: [] },
|
{ mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe' },
|
||||||
'win32',
|
'win32',
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -59,7 +18,7 @@ test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
|
|||||||
test('wrapRipgrepUnavailableError explains missing system ripgrep', () => {
|
test('wrapRipgrepUnavailableError explains missing system ripgrep', () => {
|
||||||
const error = wrapRipgrepUnavailableError(
|
const error = wrapRipgrepUnavailableError(
|
||||||
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
|
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
|
||||||
{ mode: 'system', command: 'rg', args: [] },
|
{ mode: 'system', command: 'rg' },
|
||||||
'linux',
|
'linux',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import type { ChildProcess, ExecFileException } from 'child_process'
|
import type { ChildProcess, ExecFileException } from 'child_process'
|
||||||
import { execFile, spawn } from 'child_process'
|
import { execFile, spawn } from 'child_process'
|
||||||
import { existsSync } from 'fs'
|
|
||||||
import memoize from 'lodash-es/memoize.js'
|
import memoize from 'lodash-es/memoize.js'
|
||||||
import { homedir } from 'os'
|
import { homedir } from 'os'
|
||||||
import * as path from 'path'
|
import * as path from 'path'
|
||||||
@@ -31,72 +30,40 @@ type RipgrepConfig = {
|
|||||||
|
|
||||||
type RipgrepErrorLike = Pick<NodeJS.ErrnoException, 'code' | 'message'>
|
type RipgrepErrorLike = Pick<NodeJS.ErrnoException, 'code' | 'message'>
|
||||||
|
|
||||||
function isErrnoException(error: unknown): error is NodeJS.ErrnoException {
|
const getRipgrepConfig = memoize((): RipgrepConfig => {
|
||||||
return error instanceof Error
|
const userWantsSystemRipgrep = isEnvDefinedFalsy(
|
||||||
}
|
process.env.USE_BUILTIN_RIPGREP,
|
||||||
|
)
|
||||||
|
|
||||||
type ResolveRipgrepConfigArgs = {
|
// Try system ripgrep if user wants it
|
||||||
userWantsSystemRipgrep: boolean
|
if (userWantsSystemRipgrep) {
|
||||||
bundledMode: boolean
|
const { cmd: systemPath } = findExecutable('rg', [])
|
||||||
builtinCommand: string
|
if (systemPath !== 'rg') {
|
||||||
builtinExists: boolean
|
// SECURITY: Use command name 'rg' instead of systemPath to prevent PATH hijacking
|
||||||
systemExecutablePath: string
|
// If we used systemPath, a malicious ./rg.exe in current directory could be executed
|
||||||
processExecPath?: string
|
// Using just 'rg' lets the OS resolve it safely with NoDefaultCurrentDirectoryInExePath protection
|
||||||
}
|
return { mode: 'system', command: 'rg', args: [] }
|
||||||
|
}
|
||||||
export function resolveRipgrepConfig({
|
|
||||||
userWantsSystemRipgrep,
|
|
||||||
bundledMode,
|
|
||||||
builtinCommand,
|
|
||||||
builtinExists,
|
|
||||||
systemExecutablePath,
|
|
||||||
processExecPath = process.execPath,
|
|
||||||
}: ResolveRipgrepConfigArgs): RipgrepConfig {
|
|
||||||
if (userWantsSystemRipgrep && systemExecutablePath !== 'rg') {
|
|
||||||
// SECURITY: Use command name 'rg' instead of systemExecutablePath to prevent PATH hijacking
|
|
||||||
return { mode: 'system', command: 'rg', args: [] }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bundledMode) {
|
// In bundled (native) mode, ripgrep is statically compiled into bun-internal
|
||||||
|
// and dispatches based on argv[0]. We spawn ourselves with argv0='rg'.
|
||||||
|
if (isInBundledMode()) {
|
||||||
return {
|
return {
|
||||||
mode: 'embedded',
|
mode: 'embedded',
|
||||||
command: processExecPath,
|
command: process.execPath,
|
||||||
args: ['--no-config'],
|
args: ['--no-config'],
|
||||||
argv0: 'rg',
|
argv0: 'rg',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (builtinExists) {
|
|
||||||
return { mode: 'builtin', command: builtinCommand, args: [] }
|
|
||||||
}
|
|
||||||
|
|
||||||
if (systemExecutablePath !== 'rg') {
|
|
||||||
return { mode: 'system', command: 'rg', args: [] }
|
|
||||||
}
|
|
||||||
|
|
||||||
return { mode: 'builtin', command: builtinCommand, args: [] }
|
|
||||||
}
|
|
||||||
|
|
||||||
const getRipgrepConfig = memoize((): RipgrepConfig => {
|
|
||||||
const userWantsSystemRipgrep = isEnvDefinedFalsy(
|
|
||||||
process.env.USE_BUILTIN_RIPGREP,
|
|
||||||
)
|
|
||||||
const bundledMode = isInBundledMode()
|
|
||||||
const rgRoot = path.resolve(__dirname, 'vendor', 'ripgrep')
|
const rgRoot = path.resolve(__dirname, 'vendor', 'ripgrep')
|
||||||
const builtinCommand =
|
const command =
|
||||||
process.platform === 'win32'
|
process.platform === 'win32'
|
||||||
? path.resolve(rgRoot, `${process.arch}-win32`, 'rg.exe')
|
? path.resolve(rgRoot, `${process.arch}-win32`, 'rg.exe')
|
||||||
: path.resolve(rgRoot, `${process.arch}-${process.platform}`, 'rg')
|
: path.resolve(rgRoot, `${process.arch}-${process.platform}`, 'rg')
|
||||||
const builtinExists = existsSync(builtinCommand)
|
|
||||||
const { cmd: systemExecutablePath } = findExecutable('rg', [])
|
|
||||||
|
|
||||||
return resolveRipgrepConfig({
|
return { mode: 'builtin', command, args: [] }
|
||||||
userWantsSystemRipgrep,
|
|
||||||
bundledMode,
|
|
||||||
builtinCommand,
|
|
||||||
builtinExists,
|
|
||||||
systemExecutablePath,
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
export function ripgrepCommand(): {
|
export function ripgrepCommand(): {
|
||||||
@@ -357,9 +324,7 @@ async function ripGrepFileCount(
|
|||||||
if (settled) return
|
if (settled) return
|
||||||
settled = true
|
settled = true
|
||||||
reject(
|
reject(
|
||||||
isErrnoException(err) && err.code === 'ENOENT'
|
err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err,
|
||||||
? wrapRipgrepUnavailableError(err)
|
|
||||||
: err,
|
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -423,9 +388,7 @@ export async function ripGrepStream(
|
|||||||
if (settled) return
|
if (settled) return
|
||||||
settled = true
|
settled = true
|
||||||
reject(
|
reject(
|
||||||
isErrnoException(err) && err.code === 'ENOENT'
|
err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err,
|
||||||
? wrapRipgrepUnavailableError(err)
|
|
||||||
: err,
|
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -473,9 +436,7 @@ export async function ripGrep(
|
|||||||
const CRITICAL_ERROR_CODES = ['ENOENT', 'EACCES', 'EPERM']
|
const CRITICAL_ERROR_CODES = ['ENOENT', 'EACCES', 'EPERM']
|
||||||
if (CRITICAL_ERROR_CODES.includes(error.code as string)) {
|
if (CRITICAL_ERROR_CODES.includes(error.code as string)) {
|
||||||
reject(
|
reject(
|
||||||
isErrnoException(error) && error.code === 'ENOENT'
|
error.code === 'ENOENT' ? wrapRipgrepUnavailableError(error) : error,
|
||||||
? wrapRipgrepUnavailableError(error)
|
|
||||||
: error,
|
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,68 +0,0 @@
|
|||||||
import { describe, expect, test } from 'bun:test'
|
|
||||||
|
|
||||||
import { sanitizeSchemaForOpenAICompat } from './schemaSanitizer'
|
|
||||||
|
|
||||||
describe('sanitizeSchemaForOpenAICompat', () => {
|
|
||||||
test('preserves Grep-like properties.pattern while keeping it required', () => {
|
|
||||||
const schema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'The regular expression pattern to search for in file contents',
|
|
||||||
},
|
|
||||||
path: { type: 'string' },
|
|
||||||
glob: { type: 'string' },
|
|
||||||
},
|
|
||||||
required: ['pattern'],
|
|
||||||
}
|
|
||||||
|
|
||||||
const sanitized = sanitizeSchemaForOpenAICompat(schema)
|
|
||||||
const properties = sanitized.properties as Record<string, unknown> | undefined
|
|
||||||
|
|
||||||
expect(Object.keys(properties ?? {})).toEqual(['pattern', 'path', 'glob'])
|
|
||||||
expect(properties?.pattern).toEqual({
|
|
||||||
type: 'string',
|
|
||||||
description: 'The regular expression pattern to search for in file contents',
|
|
||||||
})
|
|
||||||
expect(sanitized.required).toEqual(['pattern'])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('preserves Glob-like properties.pattern while keeping it required', () => {
|
|
||||||
const schema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
pattern: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'The glob pattern to match files against',
|
|
||||||
},
|
|
||||||
path: { type: 'string' },
|
|
||||||
},
|
|
||||||
required: ['pattern'],
|
|
||||||
}
|
|
||||||
|
|
||||||
const sanitized = sanitizeSchemaForOpenAICompat(schema)
|
|
||||||
const properties = sanitized.properties as Record<string, unknown> | undefined
|
|
||||||
|
|
||||||
expect(Object.keys(properties ?? {})).toEqual(['pattern', 'path'])
|
|
||||||
expect(properties?.pattern).toEqual({
|
|
||||||
type: 'string',
|
|
||||||
description: 'The glob pattern to match files against',
|
|
||||||
})
|
|
||||||
expect(sanitized.required).toEqual(['pattern'])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('strips JSON Schema validator pattern from string schemas', () => {
|
|
||||||
const schema = {
|
|
||||||
type: 'string',
|
|
||||||
pattern: '^[a-z]+$',
|
|
||||||
minLength: 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
const sanitized = sanitizeSchemaForOpenAICompat(schema)
|
|
||||||
|
|
||||||
expect(sanitized).toEqual({
|
|
||||||
type: 'string',
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -33,15 +33,6 @@ function stripSchemaKeywords(schema: unknown, keywords: Set<string>): unknown {
|
|||||||
|
|
||||||
const result: Record<string, unknown> = {}
|
const result: Record<string, unknown> = {}
|
||||||
for (const [key, value] of Object.entries(schema)) {
|
for (const [key, value] of Object.entries(schema)) {
|
||||||
if (key === 'properties' && isSchemaRecord(value)) {
|
|
||||||
const sanitizedProps: Record<string, unknown> = {}
|
|
||||||
for (const [propName, propSchema] of Object.entries(value)) {
|
|
||||||
sanitizedProps[propName] = stripSchemaKeywords(propSchema, keywords)
|
|
||||||
}
|
|
||||||
result[key] = sanitizedProps
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if (keywords.has(key)) {
|
if (keywords.has(key)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -224,13 +215,10 @@ export function sanitizeSchemaForOpenAICompat(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const properties = isSchemaRecord(record.properties)
|
if (Array.isArray(record.required) && isSchemaRecord(record.properties)) {
|
||||||
? record.properties
|
|
||||||
: undefined
|
|
||||||
|
|
||||||
if (Array.isArray(record.required) && properties) {
|
|
||||||
record.required = record.required.filter(
|
record.required = record.required.filter(
|
||||||
(value): value is string => typeof value === 'string' && value in properties,
|
(value): value is string =>
|
||||||
|
typeof value === 'string' && value in record.properties,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -97,12 +97,8 @@ export function renderToAnsiString(node: React.ReactNode, columns?: number): Pro
|
|||||||
patchConsole: false
|
patchConsole: false
|
||||||
});
|
});
|
||||||
|
|
||||||
// Wait for the component to exit naturally, with a timeout guard so
|
// Wait for the component to exit naturally
|
||||||
// tests never hang indefinitely if a render error prevents exit().
|
await instance.waitUntilExit();
|
||||||
await Promise.race([
|
|
||||||
instance.waitUntilExit(),
|
|
||||||
new Promise<void>(resolve => setTimeout(resolve, 3000)),
|
|
||||||
]);
|
|
||||||
|
|
||||||
// Extract only the first frame's content to avoid duplication
|
// Extract only the first frame's content to avoid duplication
|
||||||
// (Ink outputs multiple frames in non-TTY mode)
|
// (Ink outputs multiple frames in non-TTY mode)
|
||||||
|
|||||||
Reference in New Issue
Block a user