Compare commits
7 Commits
v0.5.1
...
fix/securi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a06ea87545 | ||
|
|
c0354e8699 | ||
|
|
4d4fb2880e | ||
|
|
fdef4a1b4c | ||
|
|
4cb963e660 | ||
|
|
b09972f223 | ||
|
|
336ddcc50d |
@@ -1,3 +1,3 @@
|
||||
{
|
||||
".": "0.5.1"
|
||||
".": "0.5.2"
|
||||
}
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
## [0.5.2](https://github.com/Gitlawb/openclaude/compare/v0.5.1...v0.5.2) (2026-04-20)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **api:** replace phrase-based reasoning sanitizer with tag-based filter ([#779](https://github.com/Gitlawb/openclaude/issues/779)) ([336ddcc](https://github.com/Gitlawb/openclaude/commit/336ddcc50d59d79ebff50993f2673652aecb0d7d))
|
||||
|
||||
## [0.5.1](https://github.com/Gitlawb/openclaude/compare/v0.5.0...v0.5.1) (2026-04-20)
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@gitlawb/openclaude",
|
||||
"version": "0.5.1",
|
||||
"version": "0.5.2",
|
||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
|
||||
@@ -20,6 +20,23 @@ describe('formatReachabilityFailureDetail', () => {
|
||||
)
|
||||
})
|
||||
|
||||
test('redacts credentials and sensitive query parameters in endpoint details', () => {
|
||||
const detail = formatReachabilityFailureDetail(
|
||||
'http://user:pass@localhost:11434/v1/models?token=abc123&mode=test',
|
||||
502,
|
||||
'bad gateway',
|
||||
{
|
||||
transport: 'chat_completions',
|
||||
requestedModel: 'llama3.1:8b',
|
||||
resolvedModel: 'llama3.1:8b',
|
||||
},
|
||||
)
|
||||
|
||||
expect(detail).toBe(
|
||||
'Unexpected status 502 from http://redacted:redacted@localhost:11434/v1/models?token=redacted&mode=test. Body: bad gateway',
|
||||
)
|
||||
})
|
||||
|
||||
test('adds alias/entitlement hint for codex model support 400s', () => {
|
||||
const detail = formatReachabilityFailureDetail(
|
||||
'https://chatgpt.com/backend-api/codex/responses',
|
||||
|
||||
@@ -7,6 +7,11 @@ import {
|
||||
resolveProviderRequest,
|
||||
isLocalProviderUrl as isProviderLocalUrl,
|
||||
} from '../src/services/api/providerConfig.js'
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
probeOllamaGenerationReadiness,
|
||||
} from '../src/utils/providerDiscovery.js'
|
||||
import { redactUrlForDisplay } from '../src/utils/urlRedaction.js'
|
||||
|
||||
type CheckResult = {
|
||||
ok: boolean
|
||||
@@ -69,7 +74,7 @@ export function formatReachabilityFailureDetail(
|
||||
},
|
||||
): string {
|
||||
const compactBody = responseBody.trim().replace(/\s+/g, ' ').slice(0, 240)
|
||||
const base = `Unexpected status ${status} from ${endpoint}.`
|
||||
const base = `Unexpected status ${status} from ${redactUrlForDisplay(endpoint)}.`
|
||||
const bodySuffix = compactBody ? ` Body: ${compactBody}` : ''
|
||||
|
||||
if (request.transport !== 'codex_responses' || status !== 400) {
|
||||
@@ -255,7 +260,7 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
|
||||
}
|
||||
|
||||
results.push(pass('OPENAI_BASE_URL', request.baseUrl))
|
||||
results.push(pass('OPENAI_BASE_URL', redactUrlForDisplay(request.baseUrl)))
|
||||
|
||||
if (request.transport === 'codex_responses') {
|
||||
const credentials = resolveCodexApiCredentials(process.env)
|
||||
@@ -308,7 +313,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
if (useGithub) {
|
||||
if (useGithub && !useOpenAI) {
|
||||
return pass(
|
||||
'Provider reachability',
|
||||
'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).',
|
||||
@@ -326,6 +331,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
const endpoint = request.transport === 'codex_responses'
|
||||
? `${request.baseUrl}/responses`
|
||||
: `${request.baseUrl}/models`
|
||||
const redactedEndpoint = redactUrlForDisplay(endpoint)
|
||||
|
||||
const controller = new AbortController()
|
||||
const timeout = setTimeout(() => controller.abort(), 4000)
|
||||
@@ -375,7 +381,10 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
})
|
||||
|
||||
if (response.status === 200 || response.status === 401 || response.status === 403) {
|
||||
return pass('Provider reachability', `Reached ${endpoint} (status ${response.status}).`)
|
||||
return pass(
|
||||
'Provider reachability',
|
||||
`Reached ${redactedEndpoint} (status ${response.status}).`,
|
||||
)
|
||||
}
|
||||
|
||||
const responseBody = await response.text().catch(() => '')
|
||||
@@ -391,12 +400,100 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return fail('Provider reachability', `Failed to reach ${endpoint}: ${message}`)
|
||||
return fail(
|
||||
'Provider reachability',
|
||||
`Failed to reach ${redactedEndpoint}: ${message}`,
|
||||
)
|
||||
} finally {
|
||||
clearTimeout(timeout)
|
||||
}
|
||||
}
|
||||
|
||||
async function checkProviderGenerationReadiness(): Promise<CheckResult> {
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
|
||||
if (!useGemini && !useOpenAI && !useGithub && !useMistral) {
|
||||
return pass('Provider generation readiness', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
'Skipped for GitHub Models (runtime generation uses a different endpoint flow).',
|
||||
)
|
||||
}
|
||||
|
||||
if (useGemini || useMistral) {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
'Skipped for managed provider mode.',
|
||||
)
|
||||
}
|
||||
|
||||
if (!useOpenAI) {
|
||||
return pass('Provider generation readiness', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
const request = resolveProviderRequest({
|
||||
model: process.env.OPENAI_MODEL,
|
||||
baseUrl: process.env.OPENAI_BASE_URL,
|
||||
})
|
||||
|
||||
if (request.transport === 'codex_responses') {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
'Skipped for Codex responses (reachability probe already performs a lightweight generation request).',
|
||||
)
|
||||
}
|
||||
|
||||
if (!isLocalBaseUrl(request.baseUrl)) {
|
||||
return pass('Provider generation readiness', 'Skipped for non-local provider URL.')
|
||||
}
|
||||
|
||||
const localProviderLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
|
||||
if (localProviderLabel !== 'Ollama') {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
`Skipped for ${localProviderLabel} (no provider-specific generation probe).`,
|
||||
)
|
||||
}
|
||||
|
||||
const readiness = await probeOllamaGenerationReadiness({
|
||||
baseUrl: request.baseUrl,
|
||||
model: request.requestedModel,
|
||||
})
|
||||
|
||||
if (readiness.state === 'ready') {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
`Generated a test response with ${readiness.probeModel ?? request.requestedModel}.`,
|
||||
)
|
||||
}
|
||||
|
||||
if (readiness.state === 'unreachable') {
|
||||
return fail(
|
||||
'Provider generation readiness',
|
||||
`Could not reach Ollama at ${redactUrlForDisplay(request.baseUrl)}.`,
|
||||
)
|
||||
}
|
||||
|
||||
if (readiness.state === 'no_models') {
|
||||
return fail(
|
||||
'Provider generation readiness',
|
||||
'Ollama is reachable, but no installed models were found. Pull a model first (for example: ollama pull qwen2.5-coder:7b).',
|
||||
)
|
||||
}
|
||||
|
||||
const detailSuffix = readiness.detail ? ` Detail: ${readiness.detail}.` : ''
|
||||
return fail(
|
||||
'Provider generation readiness',
|
||||
`Ollama is reachable, but generation failed for ${readiness.probeModel ?? request.requestedModel}.${detailSuffix}`,
|
||||
)
|
||||
}
|
||||
|
||||
function isAtomicChatUrl(baseUrl: string): boolean {
|
||||
try {
|
||||
const parsed = new URL(baseUrl)
|
||||
@@ -567,6 +664,7 @@ async function main(): Promise<void> {
|
||||
results.push(checkBuildArtifacts())
|
||||
results.push(...checkOpenAIEnv())
|
||||
results.push(await checkBaseUrlReachability())
|
||||
results.push(await checkProviderGenerationReadiness())
|
||||
results.push(checkOllamaProcessorMode())
|
||||
|
||||
if (!options.json) {
|
||||
|
||||
191
src/__tests__/security-hardening.test.ts
Normal file
191
src/__tests__/security-hardening.test.ts
Normal file
@@ -0,0 +1,191 @@
|
||||
/**
|
||||
* Security hardening regression tests.
|
||||
*
|
||||
* Covers:
|
||||
* 1. MCP tool result Unicode sanitization
|
||||
* 2. Sandbox settings source filtering (exclude projectSettings)
|
||||
* 3. Plugin git clone/pull hooks disabled
|
||||
* 4. ANTHROPIC_FOUNDRY_API_KEY removed from SAFE_ENV_VARS
|
||||
* 5. WebFetch SSRF protection via ssrfGuardedLookup
|
||||
*/
|
||||
|
||||
import { describe, test, expect } from 'bun:test'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const SRC = resolve(import.meta.dir, '..')
|
||||
const file = (relative: string) => Bun.file(resolve(SRC, relative))
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 1: MCP tool result Unicode sanitization
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('MCP tool result sanitization', () => {
|
||||
test('transformResultContent sanitizes text content', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
// Tool definitions are already sanitized (line ~1798)
|
||||
expect(content).toContain('recursivelySanitizeUnicode(result.tools)')
|
||||
// Tool results must also be sanitized
|
||||
expect(content).toMatch(
|
||||
/case 'text':[\s\S]*?recursivelySanitizeUnicode\(resultContent\.text\)/,
|
||||
)
|
||||
})
|
||||
|
||||
test('resource text content is also sanitized', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
expect(content).toMatch(
|
||||
/recursivelySanitizeUnicode\(\s*`\$\{prefix\}\$\{resource\.text\}`/,
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 2: Sandbox settings source filtering
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Sandbox settings trust boundary', () => {
|
||||
test('getSandboxEnabledSetting does not use getSettings_DEPRECATED', async () => {
|
||||
const content = await file('utils/sandbox/sandbox-adapter.ts').text()
|
||||
// Extract the getSandboxEnabledSetting function body
|
||||
const fnMatch = content.match(
|
||||
/function getSandboxEnabledSetting\(\)[^{]*\{([\s\S]*?)\n\}/,
|
||||
)
|
||||
expect(fnMatch).not.toBeNull()
|
||||
const fnBody = fnMatch![1]
|
||||
// Must NOT use getSettings_DEPRECATED (reads all sources including project)
|
||||
expect(fnBody).not.toContain('getSettings_DEPRECATED')
|
||||
// Must use getSettingsForSource for individual trusted sources
|
||||
expect(fnBody).toContain("getSettingsForSource('userSettings')")
|
||||
expect(fnBody).toContain("getSettingsForSource('policySettings')")
|
||||
// Must NOT read from projectSettings
|
||||
expect(fnBody).not.toContain("'projectSettings'")
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 3: Plugin git hooks disabled
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Plugin git operations disable hooks', () => {
|
||||
test('gitClone includes core.hooksPath=/dev/null', async () => {
|
||||
const content = await file('utils/plugins/marketplaceManager.ts').text()
|
||||
// The clone args must disable hooks
|
||||
const cloneSection = content.slice(
|
||||
content.indexOf('export async function gitClone('),
|
||||
content.indexOf('export async function gitClone(') + 2000,
|
||||
)
|
||||
expect(cloneSection).toContain("'core.hooksPath=/dev/null'")
|
||||
})
|
||||
|
||||
test('gitPull includes core.hooksPath=/dev/null', async () => {
|
||||
const content = await file('utils/plugins/marketplaceManager.ts').text()
|
||||
const pullSection = content.slice(
|
||||
content.indexOf('export async function gitPull('),
|
||||
content.indexOf('export async function gitPull(') + 2000,
|
||||
)
|
||||
expect(pullSection).toContain("'core.hooksPath=/dev/null'")
|
||||
})
|
||||
|
||||
test('gitSubmoduleUpdate includes core.hooksPath=/dev/null', async () => {
|
||||
const content = await file('utils/plugins/marketplaceManager.ts').text()
|
||||
const subSection = content.slice(
|
||||
content.indexOf('async function gitSubmoduleUpdate('),
|
||||
content.indexOf('async function gitSubmoduleUpdate(') + 1000,
|
||||
)
|
||||
expect(subSection).toContain("'core.hooksPath=/dev/null'")
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 4: ANTHROPIC_FOUNDRY_API_KEY not in SAFE_ENV_VARS
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('SAFE_ENV_VARS excludes credentials', () => {
|
||||
test('ANTHROPIC_FOUNDRY_API_KEY is not in SAFE_ENV_VARS', async () => {
|
||||
const content = await file('utils/managedEnvConstants.ts').text()
|
||||
// Extract the SAFE_ENV_VARS set definition
|
||||
const safeStart = content.indexOf('export const SAFE_ENV_VARS')
|
||||
const safeEnd = content.indexOf('])', safeStart)
|
||||
const safeSection = content.slice(safeStart, safeEnd)
|
||||
expect(safeSection).not.toContain('ANTHROPIC_FOUNDRY_API_KEY')
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 5: WebFetch SSRF protection
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('WebFetch SSRF guard', () => {
|
||||
test('getWithPermittedRedirects uses ssrfGuardedLookup', async () => {
|
||||
const content = await file('tools/WebFetchTool/utils.ts').text()
|
||||
expect(content).toContain(
|
||||
"import { ssrfGuardedLookup } from '../../utils/hooks/ssrfGuard.js'",
|
||||
)
|
||||
// The axios.get call in getWithPermittedRedirects must include lookup
|
||||
const fnSection = content.slice(
|
||||
content.indexOf('export async function getWithPermittedRedirects('),
|
||||
content.indexOf('export async function getWithPermittedRedirects(') +
|
||||
1000,
|
||||
)
|
||||
expect(fnSection).toContain('lookup: ssrfGuardedLookup')
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 6: Swarm permission file polling removed (security hardening)
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Swarm permission file polling removed', () => {
|
||||
test('useSwarmPermissionPoller hook no longer exists', async () => {
|
||||
const content = await file(
|
||||
'hooks/useSwarmPermissionPoller.ts',
|
||||
).text()
|
||||
// The file-based polling hook must not exist — it read from an
|
||||
// unauthenticated resolved/ directory where any local process could
|
||||
// forge approval files.
|
||||
expect(content).not.toContain('function useSwarmPermissionPoller(')
|
||||
// The file-based processResponse must not exist
|
||||
expect(content).not.toContain('function processResponse(')
|
||||
})
|
||||
|
||||
test('poller does not import from permissionSync', async () => {
|
||||
const content = await file(
|
||||
'hooks/useSwarmPermissionPoller.ts',
|
||||
).text()
|
||||
// Must not import anything from permissionSync — all file-based
|
||||
// functions have been removed from this module's dependencies
|
||||
expect(content).not.toContain('permissionSync')
|
||||
})
|
||||
|
||||
test('file-based permission functions are marked deprecated', async () => {
|
||||
const content = await file(
|
||||
'utils/swarm/permissionSync.ts',
|
||||
).text()
|
||||
// All file-based functions must have @deprecated JSDoc
|
||||
const deprecatedFns = [
|
||||
'writePermissionRequest',
|
||||
'readPendingPermissions',
|
||||
'readResolvedPermission',
|
||||
'resolvePermission',
|
||||
'pollForResponse',
|
||||
'removeWorkerResponse',
|
||||
]
|
||||
for (const fn of deprecatedFns) {
|
||||
// Find the function and check that @deprecated appears before it
|
||||
const fnIndex = content.indexOf(`export async function ${fn}(`)
|
||||
if (fnIndex === -1) continue // submitPermissionRequest is a const, not async function
|
||||
const preceding = content.slice(Math.max(0, fnIndex - 500), fnIndex)
|
||||
expect(preceding).toContain('@deprecated')
|
||||
}
|
||||
})
|
||||
|
||||
test('mailbox-based functions are NOT deprecated', async () => {
|
||||
const content = await file(
|
||||
'utils/swarm/permissionSync.ts',
|
||||
).text()
|
||||
// These are the active path — must not be deprecated
|
||||
const activeFns = [
|
||||
'sendPermissionRequestViaMailbox',
|
||||
'sendPermissionResponseViaMailbox',
|
||||
]
|
||||
for (const fn of activeFns) {
|
||||
const fnIndex = content.indexOf(`export async function ${fn}(`)
|
||||
expect(fnIndex).not.toBe(-1)
|
||||
const preceding = content.slice(Math.max(0, fnIndex - 300), fnIndex)
|
||||
expect(preceding).not.toContain('@deprecated')
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -66,10 +66,44 @@ import {
|
||||
import {
|
||||
getOllamaChatBaseUrl,
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
hasLocalOllama,
|
||||
listOllamaModels,
|
||||
probeOllamaGenerationReadiness,
|
||||
type OllamaGenerationReadiness,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
|
||||
function describeOllamaReadinessIssue(
|
||||
readiness: OllamaGenerationReadiness,
|
||||
options?: {
|
||||
baseUrl?: string
|
||||
allowManualFallback?: boolean
|
||||
},
|
||||
): string {
|
||||
const endpoint = options?.baseUrl ?? 'http://localhost:11434'
|
||||
|
||||
if (readiness.state === 'unreachable') {
|
||||
return `Could not reach Ollama at ${endpoint}. Start Ollama first, then run /provider again.`
|
||||
}
|
||||
|
||||
if (readiness.state === 'no_models') {
|
||||
const manualSuffix = options?.allowManualFallback
|
||||
? ', or enter details manually'
|
||||
: ''
|
||||
return `Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first${manualSuffix}.`
|
||||
}
|
||||
|
||||
if (readiness.state === 'generation_failed') {
|
||||
const modelHint = readiness.probeModel ?? 'the selected model'
|
||||
const detailSuffix = readiness.detail
|
||||
? ` Details: ${readiness.detail}.`
|
||||
: ''
|
||||
const manualSuffix = options?.allowManualFallback
|
||||
? ' You can also enter details manually.'
|
||||
: ''
|
||||
return `Ollama is reachable and models are installed, but a generation probe failed for ${modelHint}.${detailSuffix} Run "ollama run ${modelHint}" once and retry.${manualSuffix}`
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
type ProviderChoice = 'auto' | ProviderProfile | 'codex-oauth' | 'clear'
|
||||
|
||||
type Step =
|
||||
@@ -715,6 +749,7 @@ function AutoRecommendationStep({
|
||||
| {
|
||||
state: 'openai'
|
||||
defaultModel: string
|
||||
reason: string
|
||||
}
|
||||
| {
|
||||
state: 'error'
|
||||
@@ -728,19 +763,27 @@ function AutoRecommendationStep({
|
||||
void (async () => {
|
||||
const defaultModel = getGoalDefaultOpenAIModel(goal)
|
||||
try {
|
||||
const ollamaAvailable = await hasLocalOllama()
|
||||
if (!ollamaAvailable) {
|
||||
const readiness = await probeOllamaGenerationReadiness()
|
||||
if (readiness.state !== 'ready') {
|
||||
if (!cancelled) {
|
||||
setStatus({ state: 'openai', defaultModel })
|
||||
setStatus({
|
||||
state: 'openai',
|
||||
defaultModel,
|
||||
reason: describeOllamaReadinessIssue(readiness),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels()
|
||||
const recommended = recommendOllamaModel(models, goal)
|
||||
const recommended = recommendOllamaModel(readiness.models, goal)
|
||||
if (!recommended) {
|
||||
if (!cancelled) {
|
||||
setStatus({ state: 'openai', defaultModel })
|
||||
setStatus({
|
||||
state: 'openai',
|
||||
defaultModel,
|
||||
reason:
|
||||
'Ollama responded to a generation probe, but no recommended chat model matched this goal.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -796,10 +839,10 @@ function AutoRecommendationStep({
|
||||
<Dialog title="Auto setup fallback" onCancel={onCancel}>
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>
|
||||
No viable local Ollama chat model was detected. Auto setup can
|
||||
continue into OpenAI-compatible setup with a default model of{' '}
|
||||
Auto setup can continue into OpenAI-compatible setup with a default model of{' '}
|
||||
{status.defaultModel}.
|
||||
</Text>
|
||||
<Text dimColor>{status.reason}</Text>
|
||||
<Select
|
||||
options={[
|
||||
{ label: 'Continue to OpenAI-compatible setup', value: 'continue' },
|
||||
@@ -883,32 +926,19 @@ function OllamaModelStep({
|
||||
let cancelled = false
|
||||
|
||||
void (async () => {
|
||||
const available = await hasLocalOllama()
|
||||
if (!available) {
|
||||
const readiness = await probeOllamaGenerationReadiness()
|
||||
if (readiness.state !== 'ready') {
|
||||
if (!cancelled) {
|
||||
setStatus({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Could not reach Ollama at http://localhost:11434. Start Ollama first, then run /provider again.',
|
||||
message: describeOllamaReadinessIssue(readiness),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels()
|
||||
if (models.length === 0) {
|
||||
if (!cancelled) {
|
||||
setStatus({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const ranked = rankOllamaModels(models, 'balanced')
|
||||
const recommended = recommendOllamaModel(models, 'balanced')
|
||||
const ranked = rankOllamaModels(readiness.models, 'balanced')
|
||||
const recommended = recommendOllamaModel(readiness.models, 'balanced')
|
||||
if (!cancelled) {
|
||||
setStatus({
|
||||
state: 'ready',
|
||||
|
||||
@@ -149,17 +149,21 @@ function mockProviderManagerDependencies(
|
||||
applySavedProfileToCurrentSession?: (...args: unknown[]) => Promise<string | null>
|
||||
clearCodexCredentials?: () => { success: boolean; warning?: string }
|
||||
getProviderProfiles?: () => unknown[]
|
||||
hasLocalOllama?: () => Promise<boolean>
|
||||
listOllamaModels?: () => Promise<
|
||||
Array<{
|
||||
probeOllamaGenerationReadiness?: () => Promise<{
|
||||
state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed'
|
||||
models: Array<
|
||||
{
|
||||
name: string
|
||||
sizeBytes?: number | null
|
||||
family?: string | null
|
||||
families?: string[]
|
||||
parameterSize?: string | null
|
||||
quantizationLevel?: string | null
|
||||
}>
|
||||
}
|
||||
>
|
||||
probeModel?: string
|
||||
detail?: string
|
||||
}>
|
||||
codexSyncRead?: () => unknown
|
||||
codexAsyncRead?: () => Promise<unknown>
|
||||
updateProviderProfile?: (...args: unknown[]) => unknown
|
||||
@@ -189,8 +193,12 @@ function mockProviderManagerDependencies(
|
||||
})
|
||||
|
||||
mock.module('../utils/providerDiscovery.js', () => ({
|
||||
hasLocalOllama: options?.hasLocalOllama ?? (async () => false),
|
||||
listOllamaModels: options?.listOllamaModels ?? (async () => []),
|
||||
probeOllamaGenerationReadiness:
|
||||
options?.probeOllamaGenerationReadiness ??
|
||||
(async () => ({
|
||||
state: 'unreachable' as const,
|
||||
models: [],
|
||||
})),
|
||||
}))
|
||||
|
||||
mock.module('../utils/githubModelsCredentials.js', () => ({
|
||||
@@ -455,8 +463,9 @@ test('ProviderManager first-run Ollama preset auto-detects installed models', as
|
||||
async () => undefined,
|
||||
{
|
||||
addProviderProfile,
|
||||
hasLocalOllama: async () => true,
|
||||
listOllamaModels: async () => [
|
||||
probeOllamaGenerationReadiness: async () => ({
|
||||
state: 'ready',
|
||||
models: [
|
||||
{
|
||||
name: 'gemma4:31b-cloud',
|
||||
family: 'gemma',
|
||||
@@ -468,6 +477,8 @@ test('ProviderManager first-run Ollama preset auto-detects installed models', as
|
||||
parameterSize: '2.5b',
|
||||
},
|
||||
],
|
||||
probeModel: 'gemma4:31b-cloud',
|
||||
}),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -37,13 +37,14 @@ import {
|
||||
readGithubModelsTokenAsync,
|
||||
} from '../utils/githubModelsCredentials.js'
|
||||
import {
|
||||
hasLocalOllama,
|
||||
listOllamaModels,
|
||||
probeOllamaGenerationReadiness,
|
||||
type OllamaGenerationReadiness,
|
||||
} from '../utils/providerDiscovery.js'
|
||||
import {
|
||||
rankOllamaModels,
|
||||
recommendOllamaModel,
|
||||
} from '../utils/providerRecommendation.js'
|
||||
import { redactUrlForDisplay } from '../utils/urlRedaction.js'
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||
import {
|
||||
type OptionWithDescription,
|
||||
@@ -52,7 +53,6 @@ import {
|
||||
import { Pane } from './design-system/Pane.js'
|
||||
import TextInput from './TextInput.js'
|
||||
import { useCodexOAuthFlow } from './useCodexOAuthFlow.js'
|
||||
import { useSetAppState } from '../state/AppState.js'
|
||||
|
||||
export type ProviderManagerResult = {
|
||||
action: 'saved' | 'cancelled'
|
||||
@@ -222,6 +222,29 @@ function getGithubProviderSummary(
|
||||
return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}`
|
||||
}
|
||||
|
||||
function describeOllamaSelectionIssue(
|
||||
readiness: OllamaGenerationReadiness,
|
||||
baseUrl: string,
|
||||
): string {
|
||||
if (readiness.state === 'unreachable') {
|
||||
return `Could not reach Ollama at ${redactUrlForDisplay(baseUrl)}. Start Ollama first, or enter the endpoint manually.`
|
||||
}
|
||||
|
||||
if (readiness.state === 'no_models') {
|
||||
return 'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.'
|
||||
}
|
||||
|
||||
if (readiness.state === 'generation_failed') {
|
||||
const modelHint = readiness.probeModel ?? 'the selected model'
|
||||
const detailSuffix = readiness.detail
|
||||
? ` Details: ${readiness.detail}.`
|
||||
: ''
|
||||
return `Ollama is reachable and models are installed, but a generation probe failed for ${modelHint}.${detailSuffix} Run "ollama run ${modelHint}" once and retry, or enter details manually.`
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
function findCodexOAuthProfile(
|
||||
profiles: ProviderProfile[],
|
||||
profileId?: string,
|
||||
@@ -450,32 +473,21 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
|
||||
void (async () => {
|
||||
const available = await hasLocalOllama(draft.baseUrl)
|
||||
if (!available) {
|
||||
const readiness = await probeOllamaGenerationReadiness({
|
||||
baseUrl: draft.baseUrl,
|
||||
})
|
||||
if (readiness.state !== 'ready') {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Could not reach Ollama. Start Ollama first, or enter the endpoint manually.',
|
||||
message: describeOllamaSelectionIssue(readiness, draft.baseUrl),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels(draft.baseUrl)
|
||||
if (models.length === 0) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const ranked = rankOllamaModels(models, 'balanced')
|
||||
const recommended = recommendOllamaModel(models, 'balanced')
|
||||
const ranked = rankOllamaModels(readiness.models, 'balanced')
|
||||
const recommended = recommendOllamaModel(readiness.models, 'balanced')
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'ready',
|
||||
|
||||
@@ -53,17 +53,20 @@ describe('getProjectMemoryPathForSelector', () => {
|
||||
})
|
||||
|
||||
test('defaults to a new AGENTS.md in the current cwd when no project file is loaded', () => {
|
||||
expect(getProjectMemoryPathForSelector([], '/repo/packages/app')).toBe(
|
||||
'/repo/packages/app/AGENTS.md',
|
||||
const cwd = join('/repo', 'packages', 'app')
|
||||
expect(getProjectMemoryPathForSelector([], cwd)).toBe(
|
||||
join(cwd, 'AGENTS.md'),
|
||||
)
|
||||
})
|
||||
|
||||
test('ignores loaded project instruction files outside the current cwd ancestry', () => {
|
||||
const outsideRepoPath = join('/other-worktree', 'AGENTS.md')
|
||||
const cwd = join('/repo', 'packages', 'app')
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[projectFile('/other-worktree/AGENTS.md')],
|
||||
'/repo/packages/app',
|
||||
[projectFile(outsideRepoPath)],
|
||||
cwd,
|
||||
),
|
||||
).toBe('/repo/packages/app/AGENTS.md')
|
||||
).toBe(join(cwd, 'AGENTS.md'))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@ async function _temp() {
|
||||
logForDebugging("Showing marketplace config save failure notification");
|
||||
notifs.push({
|
||||
key: "marketplace-config-save-failed",
|
||||
jsx: <Text color="error">Failed to save marketplace retry info · Check ~/.claude.json permissions</Text>,
|
||||
jsx: <Text color="error">Failed to save marketplace retry info · Check ~/.openclaude.json permissions</Text>,
|
||||
priority: "immediate",
|
||||
timeoutMs: 10000
|
||||
});
|
||||
|
||||
@@ -1,34 +1,23 @@
|
||||
/**
|
||||
* Swarm Permission Poller Hook
|
||||
* Swarm Permission Callback Registry
|
||||
*
|
||||
* This hook polls for permission responses from the team leader when running
|
||||
* as a worker agent in a swarm. When a response is received, it calls the
|
||||
* appropriate callback (onAllow/onReject) to continue execution.
|
||||
* Manages callback registrations for permission requests and responses
|
||||
* in agent swarms. Responses are delivered exclusively via the mailbox
|
||||
* system (useInboxPoller → processMailboxPermissionResponse).
|
||||
*
|
||||
* This hook should be used in conjunction with the worker-side integration
|
||||
* in useCanUseTool.ts, which creates pending requests that this hook monitors.
|
||||
* The legacy file-based polling (resolved/ directory) has been removed
|
||||
* because it created an unauthenticated attack surface — any local process
|
||||
* could forge approval files. The mailbox path is the sole active channel.
|
||||
*/
|
||||
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
import { useInterval } from 'usehooks-ts'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import {
|
||||
type PermissionUpdate,
|
||||
permissionUpdateSchema,
|
||||
} from '../utils/permissions/PermissionUpdateSchema.js'
|
||||
import {
|
||||
isSwarmWorker,
|
||||
type PermissionResponse,
|
||||
pollForResponse,
|
||||
removeWorkerResponse,
|
||||
} from '../utils/swarm/permissionSync.js'
|
||||
import { getAgentName, getTeamName } from '../utils/teammate.js'
|
||||
|
||||
const POLL_INTERVAL_MS = 500
|
||||
|
||||
/**
|
||||
* Validate permissionUpdates from external sources (mailbox IPC, disk polling).
|
||||
* Validate permissionUpdates from external sources (mailbox IPC).
|
||||
* Malformed entries from buggy/old teammate processes are filtered out rather
|
||||
* than propagated unchecked into callback.onAllow().
|
||||
*/
|
||||
@@ -225,106 +214,9 @@ export function processSandboxPermissionResponse(params: {
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a permission response by invoking the registered callback
|
||||
*/
|
||||
function processResponse(response: PermissionResponse): boolean {
|
||||
const callback = pendingCallbacks.get(response.requestId)
|
||||
|
||||
if (!callback) {
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] No callback registered for request ${response.requestId}`,
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Processing response for request ${response.requestId}: ${response.decision}`,
|
||||
)
|
||||
|
||||
// Remove from registry before invoking callback
|
||||
pendingCallbacks.delete(response.requestId)
|
||||
|
||||
if (response.decision === 'approved') {
|
||||
const permissionUpdates = parsePermissionUpdates(response.permissionUpdates)
|
||||
const updatedInput = response.updatedInput
|
||||
callback.onAllow(updatedInput, permissionUpdates)
|
||||
} else {
|
||||
callback.onReject(response.feedback)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook that polls for permission responses when running as a swarm worker.
|
||||
*
|
||||
* This hook:
|
||||
* 1. Only activates when isSwarmWorker() returns true
|
||||
* 2. Polls every 500ms for responses
|
||||
* 3. When a response is found, invokes the registered callback
|
||||
* 4. Cleans up the response file after processing
|
||||
*/
|
||||
export function useSwarmPermissionPoller(): void {
|
||||
const isProcessingRef = useRef(false)
|
||||
|
||||
const poll = useCallback(async () => {
|
||||
// Don't poll if not a swarm worker
|
||||
if (!isSwarmWorker()) {
|
||||
return
|
||||
}
|
||||
|
||||
// Prevent concurrent polling
|
||||
if (isProcessingRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't poll if no callbacks are registered
|
||||
if (pendingCallbacks.size === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
isProcessingRef.current = true
|
||||
|
||||
try {
|
||||
const agentName = getAgentName()
|
||||
const teamName = getTeamName()
|
||||
|
||||
if (!agentName || !teamName) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check each pending request for a response
|
||||
for (const [requestId, _callback] of pendingCallbacks) {
|
||||
const response = await pollForResponse(requestId, agentName, teamName)
|
||||
|
||||
if (response) {
|
||||
// Process the response
|
||||
const processed = processResponse(response)
|
||||
|
||||
if (processed) {
|
||||
// Clean up the response from the worker's inbox
|
||||
await removeWorkerResponse(requestId, agentName, teamName)
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Error during poll: ${errorMessage(error)}`,
|
||||
)
|
||||
} finally {
|
||||
isProcessingRef.current = false
|
||||
}
|
||||
}, [])
|
||||
|
||||
// Only poll if we're a swarm worker
|
||||
const shouldPoll = isSwarmWorker()
|
||||
useInterval(() => void poll(), shouldPoll ? POLL_INTERVAL_MS : null)
|
||||
|
||||
// Initial poll on mount
|
||||
useEffect(() => {
|
||||
if (isSwarmWorker()) {
|
||||
void poll()
|
||||
}
|
||||
}, [poll])
|
||||
}
|
||||
// Legacy file-based polling (useSwarmPermissionPoller, processResponse)
|
||||
// has been removed. Permission responses are now delivered exclusively
|
||||
// via the mailbox system:
|
||||
// Leader: sendPermissionResponseViaMailbox() → writeToMailbox()
|
||||
// Worker: useInboxPoller → processMailboxPermissionResponse()
|
||||
// See: fix(security) — remove unauthenticated file-based permission channel
|
||||
|
||||
@@ -11,14 +11,16 @@ const execFileNoThrowMock = mock(
|
||||
async () => ({ code: 0, stdout: '', stderr: '' }),
|
||||
)
|
||||
|
||||
mock.module('../../utils/execFileNoThrow.js', () => ({
|
||||
function installOscMocks(): void {
|
||||
mock.module('../../utils/execFileNoThrow.js', () => ({
|
||||
execFileNoThrow: execFileNoThrowMock,
|
||||
execFileNoThrowWithCwd: execFileNoThrowMock,
|
||||
}))
|
||||
}))
|
||||
|
||||
mock.module('../../utils/tempfile.js', () => ({
|
||||
mock.module('../../utils/tempfile.js', () => ({
|
||||
generateTempFilePath: generateTempFilePathMock,
|
||||
}))
|
||||
}))
|
||||
}
|
||||
|
||||
async function importFreshOscModule() {
|
||||
return import(`./osc.ts?ts=${Date.now()}-${Math.random()}`)
|
||||
@@ -45,6 +47,7 @@ async function waitForExecCall(
|
||||
|
||||
describe('Windows clipboard fallback', () => {
|
||||
beforeEach(() => {
|
||||
installOscMocks()
|
||||
execFileNoThrowMock.mockClear()
|
||||
generateTempFilePathMock.mockClear()
|
||||
process.env = { ...originalEnv }
|
||||
@@ -62,14 +65,12 @@ describe('Windows clipboard fallback', () => {
|
||||
const { setClipboard } = await importFreshOscModule()
|
||||
|
||||
await setClipboard('Привет мир')
|
||||
await flushClipboardCopy()
|
||||
const windowsCall = await waitForExecCall('powershell')
|
||||
|
||||
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'clip')).toBe(
|
||||
false,
|
||||
)
|
||||
expect(
|
||||
execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'powershell'),
|
||||
).toBe(true)
|
||||
expect(windowsCall).toBeDefined()
|
||||
})
|
||||
|
||||
test('passes Windows clipboard text through a UTF-8 temp file instead of stdin', async () => {
|
||||
@@ -97,6 +98,7 @@ describe('Windows clipboard fallback', () => {
|
||||
|
||||
describe('clipboard path behavior remains stable', () => {
|
||||
beforeEach(() => {
|
||||
installOscMocks()
|
||||
execFileNoThrowMock.mockClear()
|
||||
process.env = { ...originalEnv }
|
||||
delete process.env['SSH_CONNECTION']
|
||||
|
||||
@@ -12,7 +12,7 @@ import {
|
||||
* One-shot migration: clear skipAutoPermissionPrompt for users who accepted
|
||||
* the old 2-option AutoModeOptInDialog but don't have auto as their default.
|
||||
* Re-surfaces the dialog so they see the new "make it my default mode" option.
|
||||
* Guard lives in GlobalConfig (~/.claude.json), not settings.json, so it
|
||||
* Guard lives in GlobalConfig (~/.openclaude.json), not settings.json, so it
|
||||
* survives settings resets and doesn't re-arm itself.
|
||||
*
|
||||
* Only runs when tengu_auto_mode_config.enabled === 'enabled'. For 'opt-in'
|
||||
|
||||
@@ -3873,7 +3873,7 @@ export function REPL({
|
||||
// empty to non-empty, not on every length change -- otherwise a render loop
|
||||
// (concurrent onQuery thrashing, etc.) spams saveGlobalConfig, which hits
|
||||
// ELOCKED under concurrent sessions and falls back to unlocked writes.
|
||||
// That write storm is the primary trigger for ~/.claude.json corruption
|
||||
// That write storm is the primary trigger for ~/.openclaude.json corruption
|
||||
// (GH #3117).
|
||||
const hasCountedQueueUseRef = useRef(false);
|
||||
useEffect(() => {
|
||||
|
||||
@@ -334,7 +334,7 @@ async function processRemoteEvalPayload(
|
||||
// Empty object is truthy — without the length check, `{features: {}}`
|
||||
// (transient server bug, truncated response) would pass, clear the maps
|
||||
// below, return true, and syncRemoteEvalToDisk would wholesale-write `{}`
|
||||
// to disk: total flag blackout for every process sharing ~/.claude.json.
|
||||
// to disk: total flag blackout for every process sharing ~/.openclaude.json.
|
||||
if (!payload?.features || Object.keys(payload.features).length === 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import { randomUUID } from 'crypto'
|
||||
import {
|
||||
getAPIProvider,
|
||||
isFirstPartyAnthropicBaseUrl,
|
||||
isGithubNativeAnthropicMode,
|
||||
} from 'src/utils/model/providers.js'
|
||||
import {
|
||||
getAttributionHeader,
|
||||
@@ -334,8 +335,13 @@ export function getPromptCachingEnabled(model: string): boolean {
|
||||
// Prompt caching is an Anthropic-specific feature. Third-party providers
|
||||
// do not understand cache_control blocks and strict backends (e.g. Azure
|
||||
// Foundry) reject or flag requests that contain them.
|
||||
//
|
||||
// Exception: when the GitHub provider is configured in native Anthropic API
|
||||
// mode (CLAUDE_CODE_GITHUB_ANTHROPIC_API=1), requests are sent in Anthropic
|
||||
// format, so cache_control blocks are supported.
|
||||
const provider = getAPIProvider()
|
||||
if (provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex') {
|
||||
const isNativeGithub = isGithubNativeAnthropicMode(model)
|
||||
if (provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex' && !isNativeGithub) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import { getSmallFastModel } from 'src/utils/model/model.js'
|
||||
import {
|
||||
getAPIProvider,
|
||||
isFirstPartyAnthropicBaseUrl,
|
||||
isGithubNativeAnthropicMode,
|
||||
} from 'src/utils/model/providers.js'
|
||||
import { getProxyFetchOptions } from 'src/utils/proxy.js'
|
||||
import {
|
||||
@@ -174,6 +175,25 @@ export async function getAnthropicClient({
|
||||
providerOverride,
|
||||
}) as unknown as Anthropic
|
||||
}
|
||||
// GitHub provider in native Anthropic API mode: send requests in Anthropic
|
||||
// format so cache_control blocks are honoured and prompt caching works.
|
||||
// Requires the GitHub endpoint (OPENAI_BASE_URL) to support Anthropic's
|
||||
// messages API — set CLAUDE_CODE_GITHUB_ANTHROPIC_API=1 to opt in.
|
||||
if (isGithubNativeAnthropicMode(model)) {
|
||||
const githubBaseUrl =
|
||||
process.env.OPENAI_BASE_URL?.replace(/\/$/, '') ??
|
||||
'https://api.githubcopilot.com'
|
||||
const githubToken =
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? ''
|
||||
const nativeArgs: ConstructorParameters<typeof Anthropic>[0] = {
|
||||
...ARGS,
|
||||
baseURL: githubBaseUrl,
|
||||
authToken: githubToken,
|
||||
// No apiKey — we authenticate via Bearer token (authToken)
|
||||
apiKey: null,
|
||||
}
|
||||
return new Anthropic(nativeArgs)
|
||||
}
|
||||
if (
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
|
||||
|
||||
@@ -547,7 +547,7 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('strips leaked reasoning preamble from completed Codex text responses', () => {
|
||||
test('strips <think> tag block from completed Codex text responses', () => {
|
||||
const message = convertCodexResponseToAnthropicMessage(
|
||||
{
|
||||
id: 'resp_1',
|
||||
@@ -560,7 +560,7 @@ describe('Codex request translation', () => {
|
||||
{
|
||||
type: 'output_text',
|
||||
text:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
'<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?',
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -578,6 +578,37 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('strips unterminated <think> tag at block boundary in Codex completed response', () => {
|
||||
const message = convertCodexResponseToAnthropicMessage(
|
||||
{
|
||||
id: 'resp_1',
|
||||
model: 'gpt-5.4',
|
||||
output: [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'output_text',
|
||||
text:
|
||||
'Here is the answer.\n<think>wait, let me reconsider the user request',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
usage: { input_tokens: 12, output_tokens: 4 },
|
||||
},
|
||||
'gpt-5.4',
|
||||
)
|
||||
|
||||
expect(message.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'Here is the answer.',
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('translates Codex SSE text stream into Anthropic events', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
@@ -609,7 +640,7 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('strips leaked reasoning preamble from Codex SSE text stream', async () => {
|
||||
test('strips <think> tag block from Codex SSE text stream', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
|
||||
@@ -618,13 +649,13 @@ describe('Codex request translation', () => {
|
||||
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
|
||||
'',
|
||||
'event: response.output_text.delta',
|
||||
'data: {"type":"response.output_text.delta","content_index":0,"delta":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?","item_id":"msg_1","output_index":0,"sequence_number":2}',
|
||||
'data: {"type":"response.output_text.delta","content_index":0,"delta":"<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?","item_id":"msg_1","output_index":0,"sequence_number":2}',
|
||||
'',
|
||||
'event: response.output_item.done',
|
||||
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}],"role":"assistant"},"output_index":0,"sequence_number":3}',
|
||||
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?"}],"role":"assistant"},"output_index":0,"sequence_number":3}',
|
||||
'',
|
||||
'event: response.completed',
|
||||
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
|
||||
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
|
||||
'',
|
||||
].join('\n')
|
||||
|
||||
@@ -646,6 +677,50 @@ describe('Codex request translation', () => {
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
expect(textDeltas.join('')).toBe('Hey! How can I help you today?')
|
||||
})
|
||||
|
||||
test('preserves prose without tags (no phrase-based false positive)', async () => {
|
||||
// Regression test: older phrase-based sanitizer would incorrectly strip text
|
||||
// starting with "I should" or "The user". The tag-based approach leaves it alone.
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
|
||||
'',
|
||||
'event: response.content_part.added',
|
||||
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
|
||||
'',
|
||||
'event: response.output_text.delta',
|
||||
'data: {"type":"response.output_text.delta","content_index":0,"delta":"I should note that the user role requires a briefly concise friendly response format.","item_id":"msg_1","output_index":0,"sequence_number":2}',
|
||||
'',
|
||||
'event: response.output_item.done',
|
||||
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"I should note that the user role requires a briefly concise friendly response format."}],"role":"assistant"},"output_index":0,"sequence_number":3}',
|
||||
'',
|
||||
'event: response.completed',
|
||||
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"I should note that the user role requires a briefly concise friendly response format."}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
|
||||
'',
|
||||
].join('\n')
|
||||
|
||||
const stream = new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(responseText))
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of codexStreamToAnthropic(
|
||||
new Response(stream),
|
||||
'gpt-5.4',
|
||||
)) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas.join('')).toBe(
|
||||
'I should note that the user role requires a briefly concise friendly response format.',
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -6,10 +6,9 @@ import type {
|
||||
} from './providerConfig.js'
|
||||
import { sanitizeSchemaForOpenAICompat } from './openaiSchemaSanitizer.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
createThinkTagFilter,
|
||||
stripThinkTags,
|
||||
} from './thinkTagSanitizer.js'
|
||||
|
||||
export interface AnthropicUsage {
|
||||
input_tokens: number
|
||||
@@ -734,34 +733,29 @@ export async function* codexStreamToAnthropic(
|
||||
{ index: number; toolUseId: string }
|
||||
>()
|
||||
let activeTextBlockIndex: number | null = null
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
const thinkFilter = createThinkTagFilter()
|
||||
let nextContentBlockIndex = 0
|
||||
let sawToolUse = false
|
||||
let finalResponse: Record<string, any> | undefined
|
||||
|
||||
const closeActiveTextBlock = async function* () {
|
||||
if (activeTextBlockIndex === null) return
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
const tail = thinkFilter.flush()
|
||||
if (tail) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: sanitized,
|
||||
text: tail,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: activeTextBlockIndex,
|
||||
}
|
||||
activeTextBlockIndex = null
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
const startTextBlockIfNeeded = async function* () {
|
||||
@@ -837,43 +831,17 @@ export async function* codexStreamToAnthropic(
|
||||
|
||||
if (event.event === 'response.output_text.delta') {
|
||||
yield* startTextBlockIfNeeded()
|
||||
activeTextBuffer += payload.delta ?? ''
|
||||
if (activeTextBlockIndex !== null) {
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
const visible = thinkFilter.feed(payload.delta ?? '')
|
||||
if (visible) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
text: visible,
|
||||
},
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: payload.delta ?? '',
|
||||
},
|
||||
}
|
||||
}
|
||||
continue
|
||||
@@ -969,7 +937,7 @@ export function convertCodexResponseToAnthropicMessage(
|
||||
if (part?.type === 'output_text') {
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(part.text ?? ''),
|
||||
text: stripThinkTags(part.text ?? ''),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -320,10 +320,7 @@ export function classifyOpenAIHttpFailure(options: {
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
(options.status >= 200 && options.status < 300 && isMalformedProviderResponse(body)) ||
|
||||
(options.status >= 400 && isMalformedProviderResponse(body))
|
||||
) {
|
||||
if (options.status >= 400 && isMalformedProviderResponse(body)) {
|
||||
return {
|
||||
source: 'http',
|
||||
category: 'malformed_provider_response',
|
||||
|
||||
@@ -117,3 +117,170 @@ test('redacts credentials in transport diagnostic URL logs', async () => {
|
||||
expect(logLine).not.toContain('user:supersecret')
|
||||
expect(logLine).not.toContain('supersecret@')
|
||||
})
|
||||
test('logs self-heal localhost fallback with redacted from/to URLs', async () => {
|
||||
const debugSpy = mock(() => {})
|
||||
mock.module('../../utils/debug.js', () => ({
|
||||
logForDebugging: debugSpy,
|
||||
}))
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
|
||||
|
||||
process.env.OPENAI_BASE_URL = 'http://user:supersecret@localhost:11434/v1'
|
||||
process.env.OPENAI_API_KEY = 'supersecret'
|
||||
|
||||
globalThis.fetch = mock(async (input: string | Request) => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
if (url.includes('localhost')) {
|
||||
throw Object.assign(new TypeError('fetch failed'), {
|
||||
code: 'ENOTFOUND',
|
||||
})
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'qwen2.5-coder:7b',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 5,
|
||||
completion_tokens: 2,
|
||||
total_tokens: 7,
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
const client = createOpenAIShimClient({}) as {
|
||||
beta: {
|
||||
messages: {
|
||||
create: (params: Record<string, unknown>) => Promise<unknown>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await expect(
|
||||
client.beta.messages.create({
|
||||
model: 'qwen2.5-coder:7b',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
}),
|
||||
).resolves.toBeDefined()
|
||||
|
||||
const fallbackLog = debugSpy.mock.calls.find(call =>
|
||||
typeof call?.[0] === 'string' &&
|
||||
call[0].includes('self-heal retry reason=localhost_resolution_failed'),
|
||||
)
|
||||
|
||||
expect(fallbackLog).toBeDefined()
|
||||
const logLine = String(fallbackLog?.[0])
|
||||
expect(logLine).toContain('from=http://redacted:redacted@localhost:11434/v1/chat/completions')
|
||||
expect(logLine).toContain('to=http://redacted:redacted@127.0.0.1:11434/v1/chat/completions')
|
||||
expect(logLine).not.toContain('supersecret')
|
||||
})
|
||||
|
||||
test('logs self-heal toolless retry for local tool-call incompatibility', async () => {
|
||||
const debugSpy = mock(() => {})
|
||||
mock.module('../../utils/debug.js', () => ({
|
||||
logForDebugging: debugSpy,
|
||||
}))
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
|
||||
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
process.env.OPENAI_API_KEY = 'ollama'
|
||||
|
||||
let callCount = 0
|
||||
globalThis.fetch = mock(async () => {
|
||||
callCount += 1
|
||||
if (callCount === 1) {
|
||||
return new Response('tool_calls are not supported', {
|
||||
status: 400,
|
||||
headers: {
|
||||
'Content-Type': 'text/plain',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'qwen2.5-coder:7b',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 7,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 10,
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
const client = createOpenAIShimClient({}) as {
|
||||
beta: {
|
||||
messages: {
|
||||
create: (params: Record<string, unknown>) => Promise<unknown>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await expect(
|
||||
client.beta.messages.create({
|
||||
model: 'qwen2.5-coder:7b',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
tools: [
|
||||
{
|
||||
name: 'Read',
|
||||
description: 'Read file',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
filePath: { type: 'string' },
|
||||
},
|
||||
required: ['filePath'],
|
||||
},
|
||||
},
|
||||
],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
}),
|
||||
).resolves.toBeDefined()
|
||||
|
||||
const fallbackLog = debugSpy.mock.calls.find(call =>
|
||||
typeof call?.[0] === 'string' &&
|
||||
call[0].includes('self-heal retry reason=tool_call_incompatible mode=toolless'),
|
||||
)
|
||||
|
||||
expect(fallbackLog).toBeDefined()
|
||||
expect(fallbackLog?.[1]).toEqual({ level: 'warn' })
|
||||
})
|
||||
|
||||
@@ -2513,7 +2513,7 @@ test('non-streaming: real content takes precedence over reasoning_content', asyn
|
||||
])
|
||||
})
|
||||
|
||||
test('non-streaming: strips leaked reasoning preamble from assistant content', async () => {
|
||||
test('non-streaming: strips <think> tag block from assistant content', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
@@ -2524,7 +2524,7 @@ test('non-streaming: strips leaked reasoning preamble from assistant content', a
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
'<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
@@ -2645,7 +2645,7 @@ test('streaming: thinking block closed before tool call', async () => {
|
||||
expect(thinkingStart?.content_block?.type).toBe('thinking')
|
||||
})
|
||||
|
||||
test('streaming: strips leaked reasoning preamble from assistant content deltas', async () => {
|
||||
test('streaming: strips <think> tag block from assistant content deltas', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
const chunks = makeStreamChunks([
|
||||
{
|
||||
@@ -2658,7 +2658,7 @@ test('streaming: strips leaked reasoning preamble from assistant content deltas'
|
||||
delta: {
|
||||
role: 'assistant',
|
||||
content:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
'<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
@@ -2700,10 +2700,10 @@ test('streaming: strips leaked reasoning preamble from assistant content deltas'
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
expect(textDeltas.join('')).toBe('Hey! How can I help you today?')
|
||||
})
|
||||
|
||||
test('streaming: strips leaked reasoning preamble when split across multiple content chunks', async () => {
|
||||
test('streaming: strips <think> tag split across multiple content chunks', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
const chunks = makeStreamChunks([
|
||||
{
|
||||
@@ -2715,7 +2715,7 @@ test('streaming: strips leaked reasoning preamble when split across multiple con
|
||||
index: 0,
|
||||
delta: {
|
||||
role: 'assistant',
|
||||
content: 'The user said "hey" - this is a simple greeting. ',
|
||||
content: '<think>user wants a greeting,',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
@@ -2729,8 +2729,21 @@ test('streaming: strips leaked reasoning preamble when split across multiple con
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
content:
|
||||
'I should respond in a friendly, concise way.\n\nHey! How can I help you today?',
|
||||
content: ' respond briefly</th',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
content: 'ink>Hey! How can I help you today?',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
@@ -2773,7 +2786,69 @@ test('streaming: strips leaked reasoning preamble when split across multiple con
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
expect(textDeltas.join('')).toBe('Hey! How can I help you today?')
|
||||
})
|
||||
|
||||
test('streaming: preserves prose without tags (no phrase-based false positive)', async () => {
|
||||
// Regression: older phrase-based sanitizer would strip "I should..." prose.
|
||||
// The tag-based approach leaves legitimate assistant output alone.
|
||||
globalThis.fetch = (async () => {
|
||||
const chunks = makeStreamChunks([
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
role: 'assistant',
|
||||
content:
|
||||
'I should note that the user role requires a briefly concise friendly response format.',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
return makeSseResponse(chunks)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
const result = await client.beta.messages
|
||||
.create({
|
||||
model: 'gpt-5-mini',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hey' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
})
|
||||
.withResponse()
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of result.data) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas.join('')).toBe(
|
||||
'I should note that the user role requires a briefly concise friendly response format.',
|
||||
)
|
||||
})
|
||||
|
||||
test('classifies localhost transport failures with actionable category marker', async () => {
|
||||
@@ -2856,6 +2931,204 @@ test('classifies chat-completions endpoint 404 failures with endpoint_not_found
|
||||
}),
|
||||
).rejects.toThrow('openai_category=endpoint_not_found')
|
||||
})
|
||||
test('self-heals localhost resolution failures by retrying local loopback base URL', async () => {
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
|
||||
const requestUrls: string[] = []
|
||||
globalThis.fetch = (async (input, _init) => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
requestUrls.push(url)
|
||||
|
||||
if (url.includes('localhost')) {
|
||||
const error = Object.assign(new TypeError('fetch failed'), {
|
||||
code: 'ENOTFOUND',
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'qwen2.5-coder:7b',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'hello from loopback',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 4,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 7,
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await expect(
|
||||
client.beta.messages.create({
|
||||
model: 'qwen2.5-coder:7b',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
}),
|
||||
).resolves.toBeDefined()
|
||||
|
||||
expect(requestUrls[0]).toBe('http://localhost:11434/v1/chat/completions')
|
||||
expect(requestUrls).toContain('http://127.0.0.1:11434/v1/chat/completions')
|
||||
})
|
||||
|
||||
test('self-heals local endpoint_not_found by retrying with /v1 base URL', async () => {
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434'
|
||||
|
||||
const requestUrls: string[] = []
|
||||
globalThis.fetch = (async (input, _init) => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
requestUrls.push(url)
|
||||
|
||||
if (url === 'http://localhost:11434/chat/completions') {
|
||||
return new Response('Not Found', {
|
||||
status: 404,
|
||||
headers: {
|
||||
'Content-Type': 'text/plain',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'qwen2.5-coder:7b',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'hello from /v1',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 5,
|
||||
completion_tokens: 2,
|
||||
total_tokens: 7,
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await expect(
|
||||
client.beta.messages.create({
|
||||
model: 'qwen2.5-coder:7b',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
}),
|
||||
).resolves.toBeDefined()
|
||||
|
||||
expect(requestUrls).toEqual([
|
||||
'http://localhost:11434/chat/completions',
|
||||
'http://localhost:11434/v1/chat/completions',
|
||||
])
|
||||
})
|
||||
|
||||
test('self-heals tool-call incompatibility by retrying local Ollama requests without tools', async () => {
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
|
||||
const requestBodies: Array<Record<string, unknown>> = []
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
const requestBody = JSON.parse(String(init?.body)) as Record<string, unknown>
|
||||
requestBodies.push(requestBody)
|
||||
|
||||
if (requestBodies.length === 1) {
|
||||
return new Response('tool_calls are not supported', {
|
||||
status: 400,
|
||||
headers: {
|
||||
'Content-Type': 'text/plain',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'qwen2.5-coder:7b',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'fallback without tools',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 4,
|
||||
total_tokens: 12,
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await expect(
|
||||
client.beta.messages.create({
|
||||
model: 'qwen2.5-coder:7b',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
tools: [
|
||||
{
|
||||
name: 'Read',
|
||||
description: 'Read a file',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
filePath: { type: 'string' },
|
||||
},
|
||||
required: ['filePath'],
|
||||
},
|
||||
},
|
||||
],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
}),
|
||||
).resolves.toBeDefined()
|
||||
|
||||
expect(requestBodies).toHaveLength(2)
|
||||
expect(Array.isArray(requestBodies[0]?.tools)).toBe(true)
|
||||
expect(requestBodies[0]?.tool_choice).toBeUndefined()
|
||||
expect(
|
||||
requestBodies[1]?.tools === undefined ||
|
||||
(Array.isArray(requestBodies[1]?.tools) && requestBodies[1]?.tools.length === 0),
|
||||
).toBe(true)
|
||||
expect(requestBodies[1]?.tool_choice).toBeUndefined()
|
||||
})
|
||||
|
||||
test('preserves valid tool_result and drops orphan tool_result', async () => {
|
||||
let requestBody: Record<string, unknown> | undefined
|
||||
@@ -2924,7 +3197,7 @@ test('preserves valid tool_result and drops orphan tool_result', async () => {
|
||||
{
|
||||
role: 'user',
|
||||
content: 'What happened?',
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
|
||||
@@ -32,10 +32,9 @@ import { resolveGeminiCredential } from '../../utils/geminiAuth.js'
|
||||
import { hydrateGeminiAccessTokenFromSecureStorage } from '../../utils/geminiCredentials.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
createThinkTagFilter,
|
||||
stripThinkTags,
|
||||
} from './thinkTagSanitizer.js'
|
||||
import {
|
||||
codexStreamToAnthropic,
|
||||
collectCodexCompletedResponse,
|
||||
@@ -49,10 +48,12 @@ import {
|
||||
} from './codexShim.js'
|
||||
import { fetchWithProxyRetry } from './fetchWithProxyRetry.js'
|
||||
import {
|
||||
getLocalProviderRetryBaseUrls,
|
||||
getGithubEndpointType,
|
||||
isLocalProviderUrl,
|
||||
resolveRuntimeCodexCredentials,
|
||||
resolveProviderRequest,
|
||||
getGithubEndpointType,
|
||||
shouldAttemptLocalToollessRetry,
|
||||
} from './providerConfig.js'
|
||||
import {
|
||||
buildOpenAICompatibilityErrorMessage,
|
||||
@@ -718,8 +719,7 @@ async function* openaiStreamToAnthropic(
|
||||
let hasEmittedContentStart = false
|
||||
let hasEmittedThinkingStart = false
|
||||
let hasClosedThinking = false
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
const thinkFilter = createThinkTagFilter()
|
||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||
let hasEmittedFinalUsage = false
|
||||
let hasProcessedFinishReason = false
|
||||
@@ -798,14 +798,12 @@ async function* openaiStreamToAnthropic(
|
||||
const closeActiveContentBlock = async function* () {
|
||||
if (!hasEmittedContentStart) return
|
||||
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
const tail = thinkFilter.flush()
|
||||
if (tail) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'text_delta', text: sanitized },
|
||||
}
|
||||
delta: { type: 'text_delta', text: tail },
|
||||
}
|
||||
}
|
||||
|
||||
@@ -815,8 +813,6 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -873,7 +869,6 @@ async function* openaiStreamToAnthropic(
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
activeTextBuffer += delta.content
|
||||
if (!hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
@@ -883,38 +878,13 @@ async function* openaiStreamToAnthropic(
|
||||
hasEmittedContentStart = true
|
||||
}
|
||||
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
const visible = thinkFilter.feed(delta.content)
|
||||
if (visible) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
},
|
||||
delta: { type: 'text_delta', text: visible },
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'text_delta', text: delta.content },
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1459,46 +1429,93 @@ class OpenAIShimMessages {
|
||||
headers['X-GitHub-Api-Version'] = '2022-11-28'
|
||||
}
|
||||
|
||||
// Build the chat completions URL
|
||||
// Azure Cognitive Services / Azure OpenAI require a deployment-specific path
|
||||
// and an api-version query parameter.
|
||||
// Standard format: {base}/openai/deployments/{model}/chat/completions?api-version={version}
|
||||
// Non-Azure: {base}/chat/completions
|
||||
let chatCompletionsUrl: string
|
||||
const buildChatCompletionsUrl = (baseUrl: string): string => {
|
||||
// Azure Cognitive Services / Azure OpenAI require a deployment-specific
|
||||
// path and an api-version query parameter.
|
||||
if (isAzure) {
|
||||
const apiVersion = process.env.AZURE_OPENAI_API_VERSION ?? '2024-12-01-preview'
|
||||
const deployment = request.resolvedModel ?? process.env.OPENAI_MODEL ?? 'gpt-4o'
|
||||
// If base URL already contains /deployments/, use it as-is with api-version
|
||||
if (/\/deployments\//i.test(request.baseUrl)) {
|
||||
const base = request.baseUrl.replace(/\/+$/, '')
|
||||
chatCompletionsUrl = `${base}/chat/completions?api-version=${apiVersion}`
|
||||
} else {
|
||||
// Strip trailing /v1 or /openai/v1 if present, then build Azure path
|
||||
const base = request.baseUrl.replace(/\/(openai\/)?v1\/?$/, '').replace(/\/+$/, '')
|
||||
chatCompletionsUrl = `${base}/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`
|
||||
}
|
||||
} else {
|
||||
chatCompletionsUrl = `${request.baseUrl}/chat/completions`
|
||||
|
||||
// If base URL already contains /deployments/, use it as-is with api-version.
|
||||
if (/\/deployments\//i.test(baseUrl)) {
|
||||
const normalizedBase = baseUrl.replace(/\/+$/, '')
|
||||
return `${normalizedBase}/chat/completions?api-version=${apiVersion}`
|
||||
}
|
||||
|
||||
const fetchInit = {
|
||||
// Strip trailing /v1 or /openai/v1 if present, then build Azure path.
|
||||
const normalizedBase = baseUrl
|
||||
.replace(/\/(openai\/)?v1\/?$/, '')
|
||||
.replace(/\/+$/, '')
|
||||
|
||||
return `${normalizedBase}/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`
|
||||
}
|
||||
|
||||
return `${baseUrl}/chat/completions`
|
||||
}
|
||||
|
||||
const localRetryBaseUrls = isLocal
|
||||
? getLocalProviderRetryBaseUrls(request.baseUrl)
|
||||
: []
|
||||
|
||||
let activeBaseUrl = request.baseUrl
|
||||
let chatCompletionsUrl = buildChatCompletionsUrl(activeBaseUrl)
|
||||
const attemptedLocalBaseUrls = new Set<string>([activeBaseUrl])
|
||||
let didRetryWithoutTools = false
|
||||
|
||||
const promoteNextLocalBaseUrl = (
|
||||
reason: 'endpoint_not_found' | 'localhost_resolution_failed',
|
||||
): boolean => {
|
||||
for (const candidateBaseUrl of localRetryBaseUrls) {
|
||||
if (attemptedLocalBaseUrls.has(candidateBaseUrl)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const previousUrl = chatCompletionsUrl
|
||||
attemptedLocalBaseUrls.add(candidateBaseUrl)
|
||||
activeBaseUrl = candidateBaseUrl
|
||||
chatCompletionsUrl = buildChatCompletionsUrl(activeBaseUrl)
|
||||
|
||||
logForDebugging(
|
||||
`[OpenAIShim] self-heal retry reason=${reason} method=POST from=${redactUrlForDiagnostics(previousUrl)} to=${redactUrlForDiagnostics(chatCompletionsUrl)} model=${request.resolvedModel}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
let serializedBody = JSON.stringify(body)
|
||||
|
||||
const refreshSerializedBody = (): void => {
|
||||
serializedBody = JSON.stringify(body)
|
||||
}
|
||||
|
||||
const buildFetchInit = () => ({
|
||||
method: 'POST' as const,
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
body: serializedBody,
|
||||
signal: options?.signal,
|
||||
}
|
||||
})
|
||||
|
||||
const maxAttempts = isGithub ? GITHUB_429_MAX_RETRIES : 1
|
||||
const maxSelfHealAttempts = isLocal
|
||||
? localRetryBaseUrls.length + 1
|
||||
: 0
|
||||
const maxAttempts = (isGithub ? GITHUB_429_MAX_RETRIES : 1) + maxSelfHealAttempts
|
||||
|
||||
const throwClassifiedTransportError = (
|
||||
error: unknown,
|
||||
requestUrl: string,
|
||||
preclassifiedFailure?: ReturnType<typeof classifyOpenAINetworkFailure>,
|
||||
): never => {
|
||||
if (options?.signal?.aborted) {
|
||||
throw error
|
||||
}
|
||||
|
||||
const failure = classifyOpenAINetworkFailure(error, {
|
||||
const failure =
|
||||
preclassifiedFailure ??
|
||||
classifyOpenAINetworkFailure(error, {
|
||||
url: requestUrl,
|
||||
})
|
||||
const redactedUrl = redactUrlForDiagnostics(requestUrl)
|
||||
@@ -1531,8 +1548,11 @@ class OpenAIShimMessages {
|
||||
responseHeaders: Headers,
|
||||
requestUrl: string,
|
||||
rateHint = '',
|
||||
preclassifiedFailure?: ReturnType<typeof classifyOpenAIHttpFailure>,
|
||||
): never => {
|
||||
const failure = classifyOpenAIHttpFailure({
|
||||
const failure =
|
||||
preclassifiedFailure ??
|
||||
classifyOpenAIHttpFailure({
|
||||
status,
|
||||
body: errorBody,
|
||||
})
|
||||
@@ -1557,10 +1577,13 @@ class OpenAIShimMessages {
|
||||
let response: Response | undefined
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
try {
|
||||
response = await fetchWithProxyRetry(chatCompletionsUrl, fetchInit)
|
||||
response = await fetchWithProxyRetry(
|
||||
chatCompletionsUrl,
|
||||
buildFetchInit(),
|
||||
)
|
||||
} catch (error) {
|
||||
const isAbortError =
|
||||
fetchInit.signal?.aborted === true ||
|
||||
options?.signal?.aborted === true ||
|
||||
(typeof DOMException !== 'undefined' &&
|
||||
error instanceof DOMException &&
|
||||
error.name === 'AbortError') ||
|
||||
@@ -1573,7 +1596,19 @@ class OpenAIShimMessages {
|
||||
throw error
|
||||
}
|
||||
|
||||
throwClassifiedTransportError(error, chatCompletionsUrl)
|
||||
const failure = classifyOpenAINetworkFailure(error, {
|
||||
url: chatCompletionsUrl,
|
||||
})
|
||||
|
||||
if (
|
||||
isLocal &&
|
||||
failure.category === 'localhost_resolution_failed' &&
|
||||
promoteNextLocalBaseUrl('localhost_resolution_failed')
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
throwClassifiedTransportError(error, chatCompletionsUrl, failure)
|
||||
}
|
||||
|
||||
if (response.ok) {
|
||||
@@ -1665,6 +1700,10 @@ class OpenAIShimMessages {
|
||||
return responsesResponse
|
||||
}
|
||||
const responsesErrorBody = await responsesResponse.text().catch(() => 'unknown error')
|
||||
const responsesFailure = classifyOpenAIHttpFailure({
|
||||
status: responsesResponse.status,
|
||||
body: responsesErrorBody,
|
||||
})
|
||||
let responsesErrorResponse: object | undefined
|
||||
try { responsesErrorResponse = JSON.parse(responsesErrorBody) } catch { /* raw text */ }
|
||||
throwClassifiedHttpError(
|
||||
@@ -1673,10 +1712,49 @@ class OpenAIShimMessages {
|
||||
responsesErrorResponse,
|
||||
responsesResponse.headers,
|
||||
responsesUrl,
|
||||
'',
|
||||
responsesFailure,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const failure = classifyOpenAIHttpFailure({
|
||||
status: response.status,
|
||||
body: errorBody,
|
||||
})
|
||||
|
||||
if (
|
||||
isLocal &&
|
||||
failure.category === 'endpoint_not_found' &&
|
||||
promoteNextLocalBaseUrl('endpoint_not_found')
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
const hasToolsPayload =
|
||||
Array.isArray(body.tools) &&
|
||||
body.tools.length > 0
|
||||
|
||||
if (
|
||||
!didRetryWithoutTools &&
|
||||
failure.category === 'tool_call_incompatible' &&
|
||||
shouldAttemptLocalToollessRetry({
|
||||
baseUrl: activeBaseUrl,
|
||||
hasTools: hasToolsPayload,
|
||||
})
|
||||
) {
|
||||
didRetryWithoutTools = true
|
||||
delete body.tools
|
||||
delete body.tool_choice
|
||||
refreshSerializedBody()
|
||||
|
||||
logForDebugging(
|
||||
`[OpenAIShim] self-heal retry reason=tool_call_incompatible mode=toolless method=POST url=${redactUrlForDiagnostics(chatCompletionsUrl)} model=${request.resolvedModel}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
let errorResponse: object | undefined
|
||||
try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ }
|
||||
throwClassifiedHttpError(
|
||||
@@ -1686,6 +1764,7 @@ class OpenAIShimMessages {
|
||||
response.headers as unknown as Headers,
|
||||
chatCompletionsUrl,
|
||||
rateHint,
|
||||
failure,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1742,7 +1821,7 @@ class OpenAIShimMessages {
|
||||
if (typeof rawContent === 'string' && rawContent) {
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(rawContent),
|
||||
text: stripThinkTags(rawContent),
|
||||
})
|
||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||
const parts: string[] = []
|
||||
@@ -1760,7 +1839,7 @@ class OpenAIShimMessages {
|
||||
if (joined) {
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(joined),
|
||||
text: stripThinkTags(joined),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,10 @@ import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
getAdditionalModelOptionsCacheScope,
|
||||
getLocalProviderRetryBaseUrls,
|
||||
isLocalProviderUrl,
|
||||
resolveProviderRequest,
|
||||
shouldAttemptLocalToollessRetry,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const originalEnv = {
|
||||
@@ -83,3 +85,42 @@ test('skips local model cache scope for remote openai-compatible providers', ()
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBeNull()
|
||||
})
|
||||
|
||||
test('derives local retry base URLs with /v1 and loopback fallback candidates', () => {
|
||||
expect(getLocalProviderRetryBaseUrls('http://localhost:11434')).toEqual([
|
||||
'http://localhost:11434/v1',
|
||||
'http://127.0.0.1:11434',
|
||||
'http://127.0.0.1:11434/v1',
|
||||
])
|
||||
})
|
||||
|
||||
test('does not derive local retry base URLs for remote providers', () => {
|
||||
expect(getLocalProviderRetryBaseUrls('https://api.openai.com/v1')).toEqual([])
|
||||
})
|
||||
|
||||
test('enables local toolless retry for likely Ollama endpoints with tools', () => {
|
||||
expect(
|
||||
shouldAttemptLocalToollessRetry({
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
hasTools: true,
|
||||
}),
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
test('disables local toolless retry when no tools are present', () => {
|
||||
expect(
|
||||
shouldAttemptLocalToollessRetry({
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
hasTools: false,
|
||||
}),
|
||||
).toBe(false)
|
||||
})
|
||||
|
||||
test('disables local toolless retry for non-Ollama local endpoints', () => {
|
||||
expect(
|
||||
shouldAttemptLocalToollessRetry({
|
||||
baseUrl: 'http://localhost:1234/v1',
|
||||
hasTools: true,
|
||||
}),
|
||||
).toBe(false)
|
||||
})
|
||||
|
||||
@@ -305,6 +305,101 @@ export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
|
||||
}
|
||||
}
|
||||
|
||||
function trimTrailingSlash(value: string): string {
|
||||
return value.replace(/\/+$/, '')
|
||||
}
|
||||
|
||||
function normalizePathWithV1(pathname: string): string {
|
||||
const trimmed = trimTrailingSlash(pathname)
|
||||
if (!trimmed || trimmed === '/') {
|
||||
return '/v1'
|
||||
}
|
||||
|
||||
if (trimmed.toLowerCase().endsWith('/v1')) {
|
||||
return trimmed
|
||||
}
|
||||
|
||||
return `${trimmed}/v1`
|
||||
}
|
||||
|
||||
function isLikelyOllamaEndpoint(baseUrl: string): boolean {
|
||||
try {
|
||||
const parsed = new URL(baseUrl)
|
||||
const hostname = parsed.hostname.toLowerCase()
|
||||
const pathname = parsed.pathname.toLowerCase()
|
||||
|
||||
if (parsed.port === '11434') {
|
||||
return true
|
||||
}
|
||||
|
||||
return (
|
||||
hostname.includes('ollama') ||
|
||||
pathname.includes('ollama')
|
||||
)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export function getLocalProviderRetryBaseUrls(baseUrl: string): string[] {
|
||||
if (!isLocalProviderUrl(baseUrl)) {
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = new URL(baseUrl)
|
||||
const original = trimTrailingSlash(parsed.toString())
|
||||
const seen = new Set<string>([original])
|
||||
const candidates: string[] = []
|
||||
|
||||
const addCandidate = (hostname: string, pathname: string): void => {
|
||||
const next = new URL(parsed.toString())
|
||||
next.hostname = hostname
|
||||
next.pathname = pathname
|
||||
next.search = ''
|
||||
next.hash = ''
|
||||
|
||||
const normalized = trimTrailingSlash(next.toString())
|
||||
if (seen.has(normalized)) {
|
||||
return
|
||||
}
|
||||
|
||||
seen.add(normalized)
|
||||
candidates.push(normalized)
|
||||
}
|
||||
|
||||
const v1Pathname = normalizePathWithV1(parsed.pathname)
|
||||
if (v1Pathname !== trimTrailingSlash(parsed.pathname)) {
|
||||
addCandidate(parsed.hostname, v1Pathname)
|
||||
}
|
||||
|
||||
const hostname = parsed.hostname.toLowerCase().replace(/^\[|\]$/g, '')
|
||||
if (hostname === 'localhost' || hostname === '::1') {
|
||||
addCandidate('127.0.0.1', parsed.pathname || '/')
|
||||
addCandidate('127.0.0.1', v1Pathname)
|
||||
}
|
||||
|
||||
return candidates
|
||||
} catch {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
export function shouldAttemptLocalToollessRetry(options: {
|
||||
baseUrl: string
|
||||
hasTools: boolean
|
||||
}): boolean {
|
||||
if (!options.hasTools) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!isLocalProviderUrl(options.baseUrl)) {
|
||||
return false
|
||||
}
|
||||
|
||||
return isLikelyOllamaEndpoint(options.baseUrl)
|
||||
}
|
||||
|
||||
export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
try {
|
||||
@@ -412,6 +507,9 @@ export function resolveProviderRequest(options?: {
|
||||
? normalizedGeminiEnvBaseUrl
|
||||
: asNamedEnvUrl(process.env.OPENAI_BASE_URL, 'OPENAI_BASE_URL')
|
||||
|
||||
// In Mistral mode, a literal "undefined" MISTRAL_BASE_URL is treated as
|
||||
// misconfiguration and falls back to OPENAI_API_BASE, then
|
||||
// DEFAULT_MISTRAL_BASE_URL for a safe default endpoint.
|
||||
const fallbackEnvBaseUrl = isMistralMode
|
||||
? (primaryEnvBaseUrl === undefined
|
||||
? asNamedEnvUrl(process.env.OPENAI_API_BASE, 'OPENAI_API_BASE') ?? DEFAULT_MISTRAL_BASE_URL
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.ts'
|
||||
|
||||
describe('reasoning leak sanitizer', () => {
|
||||
test('strips explicit internal reasoning preambles', () => {
|
||||
const text =
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(true)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(
|
||||
'Hey! How can I help you today?',
|
||||
)
|
||||
})
|
||||
|
||||
test('does not strip normal user-facing advice that mentions "the user should"', () => {
|
||||
const text =
|
||||
'The user should reset their password immediately.\n\nHere are the steps...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
|
||||
test('does not strip legitimate first-person advice about responding to an incident', () => {
|
||||
const text =
|
||||
'I need to respond to this security incident immediately. The system is compromised.\n\nHere are the remediation steps...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
|
||||
test('does not strip legitimate first-person advice about answering a support ticket', () => {
|
||||
const text =
|
||||
'I need to answer the support ticket before end of day. The customer is waiting.\n\nHere is the response I drafted...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
})
|
||||
@@ -1,54 +0,0 @@
|
||||
const EXPLICIT_REASONING_START_RE =
|
||||
/^\s*(i should\b|i need to\b|let me think\b|the task\b|the request\b)/i
|
||||
|
||||
const EXPLICIT_REASONING_META_RE =
|
||||
/\b(user|request|question|prompt|message|task|greeting|small talk|briefly|friendly|concise)\b/i
|
||||
|
||||
const USER_META_START_RE =
|
||||
/^\s*the user\s+(just\s+)?(said|asked|is asking|wants|wanted|mentioned|seems|appears)\b/i
|
||||
|
||||
const USER_REASONING_RE =
|
||||
/^\s*the user\s+(just\s+)?(said|asked|is asking|wants|wanted|mentioned|seems|appears)\b[\s\S]*\b(i should|i need to|let me think|respond|reply|answer|greeting|small talk|briefly|friendly|concise)\b/i
|
||||
|
||||
export function shouldBufferPotentialReasoningPrefix(text: string): boolean {
|
||||
const normalized = text.trim()
|
||||
if (!normalized) return false
|
||||
|
||||
if (looksLikeLeakedReasoningPrefix(normalized)) {
|
||||
return true
|
||||
}
|
||||
|
||||
const hasParagraphBoundary = /\n\s*\n/.test(normalized)
|
||||
if (hasParagraphBoundary) {
|
||||
return false
|
||||
}
|
||||
|
||||
return (
|
||||
EXPLICIT_REASONING_START_RE.test(normalized) ||
|
||||
USER_META_START_RE.test(normalized)
|
||||
)
|
||||
}
|
||||
|
||||
export function looksLikeLeakedReasoningPrefix(text: string): boolean {
|
||||
const normalized = text.trim()
|
||||
if (!normalized) return false
|
||||
return (
|
||||
(EXPLICIT_REASONING_START_RE.test(normalized) &&
|
||||
EXPLICIT_REASONING_META_RE.test(normalized)) ||
|
||||
USER_REASONING_RE.test(normalized)
|
||||
)
|
||||
}
|
||||
|
||||
export function stripLeakedReasoningPreamble(text: string): string {
|
||||
const normalized = text.replace(/\r\n/g, '\n')
|
||||
const parts = normalized.split(/\n\s*\n/)
|
||||
if (parts.length < 2) return text
|
||||
|
||||
const first = parts[0]?.trim() ?? ''
|
||||
if (!looksLikeLeakedReasoningPrefix(first)) {
|
||||
return text
|
||||
}
|
||||
|
||||
const remainder = parts.slice(1).join('\n\n').trim()
|
||||
return remainder || text
|
||||
}
|
||||
183
src/services/api/thinkTagSanitizer.test.ts
Normal file
183
src/services/api/thinkTagSanitizer.test.ts
Normal file
@@ -0,0 +1,183 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
createThinkTagFilter,
|
||||
stripThinkTags,
|
||||
} from './thinkTagSanitizer.ts'
|
||||
|
||||
describe('stripThinkTags — whole-text cleanup', () => {
|
||||
test('strips closed think pair', () => {
|
||||
expect(stripThinkTags('<think>reasoning</think>Hello')).toBe('Hello')
|
||||
})
|
||||
|
||||
test('strips closed thinking pair', () => {
|
||||
expect(stripThinkTags('<thinking>x</thinking>Out')).toBe('Out')
|
||||
})
|
||||
|
||||
test('strips closed reasoning pair', () => {
|
||||
expect(stripThinkTags('<reasoning>x</reasoning>Out')).toBe('Out')
|
||||
})
|
||||
|
||||
test('strips REASONING_SCRATCHPAD pair', () => {
|
||||
expect(stripThinkTags('<REASONING_SCRATCHPAD>plan</REASONING_SCRATCHPAD>Answer'))
|
||||
.toBe('Answer')
|
||||
})
|
||||
|
||||
test('is case-insensitive', () => {
|
||||
expect(stripThinkTags('<THINKING>x</THINKING>out')).toBe('out')
|
||||
expect(stripThinkTags('<Think>x</Think>out')).toBe('out')
|
||||
})
|
||||
|
||||
test('handles attributes on open tag', () => {
|
||||
expect(stripThinkTags('<think id="plan-1">reason</think>ok')).toBe('ok')
|
||||
})
|
||||
|
||||
test('strips unterminated open tag at block boundary', () => {
|
||||
expect(stripThinkTags('<think>reasoning that never closes')).toBe('')
|
||||
})
|
||||
|
||||
test('strips unterminated open tag after newline', () => {
|
||||
// Block-boundary match consumes the leading newline, same as hermes.
|
||||
expect(stripThinkTags('Answer: 42\n<think>second-guess myself'))
|
||||
.toBe('Answer: 42')
|
||||
})
|
||||
|
||||
test('strips orphan close tag', () => {
|
||||
expect(stripThinkTags('trailing </think>done')).toBe('trailing done')
|
||||
})
|
||||
|
||||
test('strips multiple blocks', () => {
|
||||
expect(stripThinkTags('<think>a</think>B<think>c</think>D')).toBe('BD')
|
||||
})
|
||||
|
||||
test('handles reasoning mid-response after content', () => {
|
||||
expect(stripThinkTags('Answer: 42\n<think>double-check</think>\nDone'))
|
||||
.toBe('Answer: 42\n\nDone')
|
||||
})
|
||||
|
||||
test('handles nested-looking tags (lazy match + orphan cleanup)', () => {
|
||||
expect(stripThinkTags('<think><think>x</think></think>y')).toBe('y')
|
||||
})
|
||||
|
||||
test('preserves legitimate non-think tags', () => {
|
||||
expect(stripThinkTags('use <div> and <span>')).toBe('use <div> and <span>')
|
||||
})
|
||||
|
||||
test('preserves text without any tags', () => {
|
||||
expect(stripThinkTags('Hello, world. I should respond briefly.')).toBe(
|
||||
'Hello, world. I should respond briefly.',
|
||||
)
|
||||
})
|
||||
|
||||
test('handles empty input', () => {
|
||||
expect(stripThinkTags('')).toBe('')
|
||||
})
|
||||
})
|
||||
|
||||
describe('createThinkTagFilter — streaming state machine', () => {
|
||||
test('passes through plain text', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('Hello, ')).toBe('Hello, ')
|
||||
expect(f.feed('world!')).toBe('world!')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('strips a complete think block in one chunk', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('pre<think>reason</think>post')).toBe('prepost')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('handles open tag split across deltas', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('before<th')).toBe('before')
|
||||
expect(f.feed('ink>reason</think>after')).toBe('after')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('handles close tag split across deltas', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('<think>reason</th')).toBe('')
|
||||
expect(f.feed('ink>keep')).toBe('keep')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('handles tag split on bare < boundary', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('leading <')).toBe('leading ')
|
||||
expect(f.feed('think>inner</think>tail')).toBe('tail')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('preserves partial non-tag < at boundary when next char rules it out', () => {
|
||||
const f = createThinkTagFilter()
|
||||
// "<d" — 'd' cannot start any of our tag names, so emit immediately
|
||||
expect(f.feed('pre<d')).toBe('pre<d')
|
||||
expect(f.feed('iv>rest')).toBe('iv>rest')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('case-insensitive streaming', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('<THINKING>x</THINKING>out')).toBe('out')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('unterminated open tag — flush drops remainder', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('<think>reasoning with no close ')).toBe('')
|
||||
expect(f.feed('and more reasoning')).toBe('')
|
||||
expect(f.flush()).toBe('')
|
||||
expect(f.isInsideBlock()).toBe(false)
|
||||
})
|
||||
|
||||
test('multiple blocks in single feed', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('<think>a</think>B<think>c</think>D')).toBe('BD')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('flush after clean stream emits nothing extra', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('complete message')).toBe('complete message')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('flush of bare < at end emits it (not a tag prefix)', () => {
|
||||
const f = createThinkTagFilter()
|
||||
// bare '<' held back; flush emits it since it has no tag-name chars
|
||||
expect(f.feed('x <')).toBe('x ')
|
||||
expect(f.flush()).toBe('<')
|
||||
})
|
||||
|
||||
test('flush of partial tag-name prefix at end drops it', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('x <thi')).toBe('x ')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('handles attributes on streaming open tag', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('<think type="plan">reason</think>ok')).toBe('ok')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('mid-delta transition: content, reasoning, content', () => {
|
||||
const f = createThinkTagFilter()
|
||||
expect(f.feed('Answer: 42\n<think>')).toBe('Answer: 42\n')
|
||||
expect(f.feed('double-check')).toBe('')
|
||||
expect(f.feed('</think>\nDone')).toBe('\nDone')
|
||||
expect(f.flush()).toBe('')
|
||||
})
|
||||
|
||||
test('orphan close tag mid-stream is stripped on flush via safety-net behavior', () => {
|
||||
// Filter alone treats orphan close as "we're not inside", so it emits as-is.
|
||||
// Safety net (stripThinkTags on final text) removes orphans.
|
||||
const f = createThinkTagFilter()
|
||||
const chunk1 = f.feed('trailing ')
|
||||
const chunk2 = f.feed('</think>done')
|
||||
const final = chunk1 + chunk2 + f.flush()
|
||||
// Orphan close appears in stream output; safety net cleans it
|
||||
expect(stripThinkTags(final)).toBe('trailing done')
|
||||
})
|
||||
})
|
||||
162
src/services/api/thinkTagSanitizer.ts
Normal file
162
src/services/api/thinkTagSanitizer.ts
Normal file
@@ -0,0 +1,162 @@
|
||||
/**
|
||||
* Think-tag sanitizer for reasoning content leaks.
|
||||
*
|
||||
* Some OpenAI-compatible reasoning models (MiniMax M2.7, GLM-4.5/5, DeepSeek, Kimi K2,
|
||||
* self-hosted vLLM builds) emit chain-of-thought inline inside the `content` field using
|
||||
* XML-like tags instead of the separate `reasoning_content` channel. Example:
|
||||
*
|
||||
* <think>the user wants foo, let me check bar</think>Here is the answer: ...
|
||||
*
|
||||
* This module strips those blocks structurally (tag-based), independent of English
|
||||
* phrasings. Three layers:
|
||||
*
|
||||
* 1. `createThinkTagFilter()` — streaming state machine. Feeds deltas, emits only
|
||||
* the visible (non-reasoning) portion, and buffers partial tags across chunk
|
||||
* boundaries so `</th` + `ink>` still parses correctly.
|
||||
*
|
||||
* 2. `stripThinkTags()` — whole-text cleanup. Removes closed pairs, unterminated
|
||||
* opens at block boundaries, and orphan open/close tags. Used for non-streaming
|
||||
* responses and as a safety net after stream close.
|
||||
*
|
||||
* 3. Flush discards buffered partial tags at stream end (false-negative bias —
|
||||
* prefer losing a partial reasoning fragment over leaking it).
|
||||
*/
|
||||
|
||||
const TAG_NAMES = [
|
||||
'think',
|
||||
'thinking',
|
||||
'reasoning',
|
||||
'thought',
|
||||
'reasoning_scratchpad',
|
||||
] as const
|
||||
|
||||
const TAG_ALT = TAG_NAMES.join('|')
|
||||
|
||||
const OPEN_TAG_RE = new RegExp(`<\\s*(?:${TAG_ALT})\\b[^>]*>`, 'i')
|
||||
const CLOSE_TAG_RE = new RegExp(`<\\s*/\\s*(?:${TAG_ALT})\\s*>`, 'i')
|
||||
|
||||
const CLOSED_PAIR_RE_G = new RegExp(
|
||||
`<\\s*(${TAG_ALT})\\b[^>]*>[\\s\\S]*?<\\s*/\\s*\\1\\s*>`,
|
||||
'gi',
|
||||
)
|
||||
const UNTERMINATED_OPEN_RE = new RegExp(
|
||||
`(?:^|\\n)[ \\t]*<\\s*(?:${TAG_ALT})\\b[^>]*>[\\s\\S]*$`,
|
||||
'i',
|
||||
)
|
||||
const ORPHAN_TAG_RE_G = new RegExp(
|
||||
`<\\s*/?\\s*(?:${TAG_ALT})\\b[^>]*>\\s*`,
|
||||
'gi',
|
||||
)
|
||||
|
||||
const MAX_PARTIAL_TAG = 64
|
||||
|
||||
/**
|
||||
* Remove reasoning/thinking blocks from a complete text body.
|
||||
*
|
||||
* Handles:
|
||||
* - Closed pairs: <think>...</think> (lazy match, anywhere in text)
|
||||
* - Unterminated open tags at a block boundary: strips from the tag to end of string
|
||||
* - Orphan open or close tags (no matching partner)
|
||||
*
|
||||
* False-negative bias: prefers leaving a few tag characters in rare edge cases over
|
||||
* stripping legitimate content.
|
||||
*/
|
||||
export function stripThinkTags(text: string): string {
|
||||
if (!text) return text
|
||||
let out = text
|
||||
out = out.replace(CLOSED_PAIR_RE_G, '')
|
||||
out = out.replace(UNTERMINATED_OPEN_RE, '')
|
||||
out = out.replace(ORPHAN_TAG_RE_G, '')
|
||||
return out
|
||||
}
|
||||
|
||||
export interface ThinkTagFilter {
|
||||
feed(chunk: string): string
|
||||
flush(): string
|
||||
isInsideBlock(): boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Streaming state machine. Feed deltas, emits visible (non-reasoning) text.
|
||||
* Handles tags split across chunk boundaries by holding back a short tail buffer
|
||||
* whenever the current buffer ends with what looks like a partial tag.
|
||||
*/
|
||||
export function createThinkTagFilter(): ThinkTagFilter {
|
||||
let inside = false
|
||||
let buffer = ''
|
||||
|
||||
function findPartialTagStart(s: string): number {
|
||||
const lastLt = s.lastIndexOf('<')
|
||||
if (lastLt === -1) return -1
|
||||
if (s.indexOf('>', lastLt) !== -1) return -1
|
||||
const tail = s.slice(lastLt)
|
||||
if (tail.length > MAX_PARTIAL_TAG) return -1
|
||||
|
||||
const m = /^<\s*\/?\s*([a-zA-Z_]\w*)?\s*$/.exec(tail)
|
||||
if (!m) return -1
|
||||
const partialName = (m[1] ?? '').toLowerCase()
|
||||
if (!partialName) return lastLt
|
||||
if (TAG_NAMES.some(name => name.startsWith(partialName))) return lastLt
|
||||
return -1
|
||||
}
|
||||
|
||||
function feed(chunk: string): string {
|
||||
if (!chunk) return ''
|
||||
buffer += chunk
|
||||
let out = ''
|
||||
|
||||
while (buffer.length > 0) {
|
||||
if (!inside) {
|
||||
const open = OPEN_TAG_RE.exec(buffer)
|
||||
if (open) {
|
||||
out += buffer.slice(0, open.index)
|
||||
buffer = buffer.slice(open.index + open[0].length)
|
||||
inside = true
|
||||
continue
|
||||
}
|
||||
|
||||
const partialStart = findPartialTagStart(buffer)
|
||||
if (partialStart === -1) {
|
||||
out += buffer
|
||||
buffer = ''
|
||||
} else {
|
||||
out += buffer.slice(0, partialStart)
|
||||
buffer = buffer.slice(partialStart)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
const close = CLOSE_TAG_RE.exec(buffer)
|
||||
if (close) {
|
||||
buffer = buffer.slice(close.index + close[0].length)
|
||||
inside = false
|
||||
continue
|
||||
}
|
||||
|
||||
const partialStart = findPartialTagStart(buffer)
|
||||
if (partialStart === -1) {
|
||||
buffer = ''
|
||||
} else {
|
||||
buffer = buffer.slice(partialStart)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
function flush(): string {
|
||||
const held = buffer
|
||||
const wasInside = inside
|
||||
buffer = ''
|
||||
inside = false
|
||||
|
||||
if (wasInside) return ''
|
||||
if (!held) return ''
|
||||
|
||||
if (/^<\s*\/?\s*[a-zA-Z_]/.test(held)) return ''
|
||||
return held
|
||||
}
|
||||
|
||||
return { feed, flush, isInsideBlock: () => inside }
|
||||
}
|
||||
@@ -70,7 +70,7 @@ describe('runAutoFixCheck', () => {
|
||||
|
||||
test('handles timeout gracefully', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'sleep 10',
|
||||
lint: 'node -e "setTimeout(() => {}, 10000)"',
|
||||
timeout: 100,
|
||||
|
||||
cwd: '/tmp',
|
||||
|
||||
@@ -46,14 +46,31 @@ async function runCommand(
|
||||
|
||||
const killTree = () => {
|
||||
try {
|
||||
if (!isWindows && proc.pid) {
|
||||
if (isWindows && proc.pid) {
|
||||
// shell=true on Windows can leave child commands running unless we
|
||||
// terminate the full process tree.
|
||||
const killer = spawn('taskkill', ['/pid', String(proc.pid), '/T', '/F'], {
|
||||
windowsHide: true,
|
||||
stdio: 'ignore',
|
||||
})
|
||||
killer.unref()
|
||||
return
|
||||
}
|
||||
|
||||
if (proc.pid) {
|
||||
// Kill the entire process group
|
||||
process.kill(-proc.pid, 'SIGTERM')
|
||||
} else {
|
||||
proc.kill('SIGTERM')
|
||||
return
|
||||
}
|
||||
|
||||
proc.kill('SIGTERM')
|
||||
} catch {
|
||||
// Process may have already exited
|
||||
// Process may have already exited; fallback to direct child kill.
|
||||
try {
|
||||
proc.kill('SIGTERM')
|
||||
} catch {
|
||||
// Ignore final fallback errors.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2524,7 +2524,7 @@ export async function transformResultContent(
|
||||
return [
|
||||
{
|
||||
type: 'text',
|
||||
text: resultContent.text,
|
||||
text: recursivelySanitizeUnicode(resultContent.text) as string,
|
||||
},
|
||||
]
|
||||
case 'audio': {
|
||||
@@ -2569,7 +2569,9 @@ export async function transformResultContent(
|
||||
return [
|
||||
{
|
||||
type: 'text',
|
||||
text: `${prefix}${resource.text}`,
|
||||
text: recursivelySanitizeUnicode(
|
||||
`${prefix}${resource.text}`,
|
||||
) as string,
|
||||
},
|
||||
]
|
||||
} else if ('blob' in resource) {
|
||||
|
||||
@@ -26,10 +26,10 @@ test('initializeWiki creates the expected wiki scaffold', async () => {
|
||||
|
||||
expect(result.alreadyExisted).toBe(false)
|
||||
expect(result.createdFiles).toEqual([
|
||||
'.openclaude/wiki/schema.md',
|
||||
'.openclaude/wiki/index.md',
|
||||
'.openclaude/wiki/log.md',
|
||||
'.openclaude/wiki/pages/architecture.md',
|
||||
join('.openclaude', 'wiki', 'schema.md'),
|
||||
join('.openclaude', 'wiki', 'index.md'),
|
||||
join('.openclaude', 'wiki', 'log.md'),
|
||||
join('.openclaude', 'wiki', 'pages', 'architecture.md'),
|
||||
])
|
||||
expect(await readFile(paths.schemaFile, 'utf8')).toContain(
|
||||
'# OpenClaude Wiki Schema',
|
||||
|
||||
@@ -59,7 +59,7 @@ export function generatePrompt(): string {
|
||||
## Configurable settings list
|
||||
The following settings are available for you to change:
|
||||
|
||||
### Global Settings (stored in ~/.claude.json)
|
||||
### Global Settings (stored in ~/.openclaude.json)
|
||||
${globalSettings.join('\n')}
|
||||
|
||||
### Project Settings (stored in settings.json)
|
||||
|
||||
@@ -15,6 +15,7 @@ import {
|
||||
} from '../../utils/mcpOutputStorage.js'
|
||||
import { getSettings_DEPRECATED } from '../../utils/settings/settings.js'
|
||||
import { asSystemPrompt } from '../../utils/systemPromptType.js'
|
||||
import { ssrfGuardedLookup } from '../../utils/hooks/ssrfGuard.js'
|
||||
import { isPreapprovedHost } from './preapproved.js'
|
||||
import { makeSecondaryModelPrompt } from './prompt.js'
|
||||
|
||||
@@ -281,6 +282,7 @@ export async function getWithPermittedRedirects(
|
||||
maxRedirects: 0,
|
||||
responseType: 'arraybuffer',
|
||||
maxContentLength: MAX_HTTP_CONTENT_LENGTH,
|
||||
lookup: ssrfGuardedLookup,
|
||||
headers: {
|
||||
Accept: 'text/markdown, text/html, */*',
|
||||
'User-Agent': getWebFetchUserAgent(),
|
||||
|
||||
@@ -693,7 +693,7 @@ export function refreshAwsAuth(awsAuthRefresh: string): Promise<boolean> {
|
||||
'AWS auth refresh timed out after 3 minutes. Run your auth command manually in a separate terminal.',
|
||||
)
|
||||
: chalk.red(
|
||||
'Error running awsAuthRefresh (in settings or ~/.claude.json):',
|
||||
'Error running awsAuthRefresh (in settings or ~/.openclaude.json):',
|
||||
)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.error(message)
|
||||
@@ -771,7 +771,7 @@ async function getAwsCredsFromCredentialExport(): Promise<{
|
||||
}
|
||||
} catch (e) {
|
||||
const message = chalk.red(
|
||||
'Error getting AWS credentials from awsCredentialExport (in settings or ~/.claude.json):',
|
||||
'Error getting AWS credentials from awsCredentialExport (in settings or ~/.openclaude.json):',
|
||||
)
|
||||
if (e instanceof Error) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
@@ -961,7 +961,7 @@ export function refreshGcpAuth(gcpAuthRefresh: string): Promise<boolean> {
|
||||
'GCP auth refresh timed out after 3 minutes. Run your auth command manually in a separate terminal.',
|
||||
)
|
||||
: chalk.red(
|
||||
'Error running gcpAuthRefresh (in settings or ~/.claude.json):',
|
||||
'Error running gcpAuthRefresh (in settings or ~/.openclaude.json):',
|
||||
)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.error(message)
|
||||
@@ -1959,7 +1959,7 @@ export async function validateForceLoginOrg(): Promise<OrgValidationResult> {
|
||||
|
||||
// Always fetch the authoritative org UUID from the profile endpoint.
|
||||
// Even keychain-sourced tokens verify server-side: the cached org UUID
|
||||
// in ~/.claude.json is user-writable and cannot be trusted.
|
||||
// in ~/.openclaude.json is user-writable and cannot be trusted.
|
||||
const { source } = getAuthTokenSource()
|
||||
const isEnvVarToken =
|
||||
source === 'CLAUDE_CODE_OAUTH_TOKEN' ||
|
||||
|
||||
@@ -28,7 +28,7 @@ import { getSettingsForSource } from './settings/settings.js'
|
||||
* is lazy-initialized) and ensure Node.js compatibility.
|
||||
*
|
||||
* This is safe to call before the trust dialog because we only read from
|
||||
* user-controlled files (~/.claude/settings.json and ~/.claude.json),
|
||||
* user-controlled files (~/.claude/settings.json and ~/.openclaude.json),
|
||||
* not from project-level settings.
|
||||
*/
|
||||
export function applyExtraCACertsFromConfig(): void {
|
||||
@@ -52,7 +52,7 @@ export function applyExtraCACertsFromConfig(): void {
|
||||
* after the trust dialog. But we need the CA cert early to establish the TLS
|
||||
* connection to an HTTPS proxy during init().
|
||||
*
|
||||
* We read from global config (~/.claude.json) and user settings
|
||||
* We read from global config (~/.openclaude.json) and user settings
|
||||
* (~/.claude/settings.json). These are user-controlled files that don't
|
||||
* require trust approval.
|
||||
*/
|
||||
|
||||
@@ -355,7 +355,7 @@ exec ${command}
|
||||
*
|
||||
* Only positive detections are persisted. A negative result from the
|
||||
* filesystem scan is not cached, because it may come from a machine that
|
||||
* shares ~/.claude.json but has no local Chrome (e.g. a remote dev
|
||||
* shares ~/.openclaude.json but has no local Chrome (e.g. a remote dev
|
||||
* environment using the bridge), and caching it would permanently poison
|
||||
* auto-enable for every session on every machine that reads that config.
|
||||
*/
|
||||
|
||||
@@ -918,7 +918,7 @@ let configCacheHits = 0
|
||||
let configCacheMisses = 0
|
||||
// Session-total count of actual disk writes to the global config file.
|
||||
// Exposed for internal-only dev diagnostics (see inc-4552) so anomalous write
|
||||
// rates surface in the UI before they corrupt ~/.claude.json.
|
||||
// rates surface in the UI before they corrupt ~/.openclaude.json.
|
||||
let globalConfigWriteCount = 0
|
||||
|
||||
export function getGlobalConfigWriteCount(): number {
|
||||
@@ -1257,7 +1257,7 @@ function saveConfigWithLock<A extends object>(
|
||||
const currentConfig = getConfig(file, createDefault)
|
||||
if (file === getGlobalClaudeFile() && wouldLoseAuthState(currentConfig)) {
|
||||
logForDebugging(
|
||||
'saveConfigWithLock: re-read config is missing auth that cache has; refusing to write to avoid wiping ~/.claude.json. See GH #3117.',
|
||||
'saveConfigWithLock: re-read config is missing auth that cache has; refusing to write to avoid wiping ~/.openclaude.json. See GH #3117.',
|
||||
{ level: 'error' },
|
||||
)
|
||||
logEvent('tengu_config_auth_loss_prevented', {})
|
||||
|
||||
@@ -253,7 +253,7 @@ async function resolveClaudePath(): Promise<string> {
|
||||
* Check whether the OS-level protocol handler is already registered AND
|
||||
* points at the expected `claude` binary. Reads the registration artifact
|
||||
* directly (symlink target, .desktop Exec line, registry value) rather than
|
||||
* a cached flag in ~/.claude.json, so:
|
||||
* a cached flag in ~/.openclaude.json, so:
|
||||
* - the check is per-machine (config can sync across machines; OS state can't)
|
||||
* - stale paths self-heal (install-method change → re-register next session)
|
||||
* - deleted artifacts self-heal
|
||||
@@ -311,7 +311,7 @@ export async function ensureDeepLinkProtocolRegistered(): Promise<void> {
|
||||
// EACCES/ENOSPC are deterministic — retrying next session won't help.
|
||||
// Throttle to once per 24h so a read-only ~/.local/share/applications
|
||||
// doesn't generate a failure event on every startup. Marker lives in
|
||||
// ~/.claude (per-machine, not synced) rather than ~/.claude.json (can sync).
|
||||
// ~/.claude (per-machine, not synced) rather than ~/.openclaude.json (can sync).
|
||||
const failureMarkerPath = path.join(
|
||||
getClaudeConfigHomeDir(),
|
||||
'.deep-link-register-failed',
|
||||
|
||||
62
src/utils/env.test.ts
Normal file
62
src/utils/env.test.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import { afterEach, beforeEach, expect, test } from 'bun:test'
|
||||
import { mkdtempSync, rmSync, writeFileSync } from 'fs'
|
||||
import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CONFIG_DIR: process.env.CLAUDE_CONFIG_DIR,
|
||||
CLAUDE_CODE_CUSTOM_OAUTH_URL: process.env.CLAUDE_CODE_CUSTOM_OAUTH_URL,
|
||||
USER_TYPE: process.env.USER_TYPE,
|
||||
}
|
||||
|
||||
let tempDir: string
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'openclaude-env-test-'))
|
||||
process.env.CLAUDE_CONFIG_DIR = tempDir
|
||||
delete process.env.CLAUDE_CODE_CUSTOM_OAUTH_URL
|
||||
delete process.env.USER_TYPE
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(tempDir, { recursive: true, force: true })
|
||||
if (originalEnv.CLAUDE_CONFIG_DIR === undefined) {
|
||||
delete process.env.CLAUDE_CONFIG_DIR
|
||||
} else {
|
||||
process.env.CLAUDE_CONFIG_DIR = originalEnv.CLAUDE_CONFIG_DIR
|
||||
}
|
||||
if (originalEnv.CLAUDE_CODE_CUSTOM_OAUTH_URL === undefined) {
|
||||
delete process.env.CLAUDE_CODE_CUSTOM_OAUTH_URL
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_CUSTOM_OAUTH_URL = originalEnv.CLAUDE_CODE_CUSTOM_OAUTH_URL
|
||||
}
|
||||
if (originalEnv.USER_TYPE === undefined) {
|
||||
delete process.env.USER_TYPE
|
||||
} else {
|
||||
process.env.USER_TYPE = originalEnv.USER_TYPE
|
||||
}
|
||||
})
|
||||
|
||||
async function importFreshEnvModule() {
|
||||
return import(`./env.js?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
// getGlobalClaudeFile — three migration branches
|
||||
|
||||
test('getGlobalClaudeFile: new install returns .openclaude.json when neither file exists', async () => {
|
||||
const { getGlobalClaudeFile } = await importFreshEnvModule()
|
||||
expect(getGlobalClaudeFile()).toBe(join(tempDir, '.openclaude.json'))
|
||||
})
|
||||
|
||||
test('getGlobalClaudeFile: existing user keeps .claude.json when only legacy file exists', async () => {
|
||||
writeFileSync(join(tempDir, '.claude.json'), '{}')
|
||||
const { getGlobalClaudeFile } = await importFreshEnvModule()
|
||||
expect(getGlobalClaudeFile()).toBe(join(tempDir, '.claude.json'))
|
||||
})
|
||||
|
||||
test('getGlobalClaudeFile: migrated user uses .openclaude.json when both files exist', async () => {
|
||||
writeFileSync(join(tempDir, '.claude.json'), '{}')
|
||||
writeFileSync(join(tempDir, '.openclaude.json'), '{}')
|
||||
const { getGlobalClaudeFile } = await importFreshEnvModule()
|
||||
expect(getGlobalClaudeFile()).toBe(join(tempDir, '.openclaude.json'))
|
||||
})
|
||||
@@ -21,8 +21,21 @@ export const getGlobalClaudeFile = memoize((): string => {
|
||||
return join(getClaudeConfigHomeDir(), '.config.json')
|
||||
}
|
||||
|
||||
const filename = `.claude${fileSuffixForOauthConfig()}.json`
|
||||
return join(process.env.CLAUDE_CONFIG_DIR || homedir(), filename)
|
||||
const oauthSuffix = fileSuffixForOauthConfig()
|
||||
const configDir = process.env.CLAUDE_CONFIG_DIR || homedir()
|
||||
|
||||
// Default to .openclaude.json. Fall back to .claude.json only if the new
|
||||
// file doesn't exist yet and the legacy one does (same migration pattern
|
||||
// as resolveClaudeConfigHomeDir for the config directory).
|
||||
const newFilename = `.openclaude${oauthSuffix}.json`
|
||||
const legacyFilename = `.claude${oauthSuffix}.json`
|
||||
if (
|
||||
!getFsImplementation().existsSync(join(configDir, newFilename)) &&
|
||||
getFsImplementation().existsSync(join(configDir, legacyFilename))
|
||||
) {
|
||||
return join(configDir, legacyFilename)
|
||||
}
|
||||
return join(configDir, newFilename)
|
||||
})
|
||||
|
||||
const hasInternetAccess = memoize(async (): Promise<boolean> => {
|
||||
|
||||
@@ -24,7 +24,7 @@ type CachedParse = { ok: true; value: unknown } | { ok: false }
|
||||
// lodash memoize default resolver = first arg only).
|
||||
// Skip caching above this size — the LRU stores the full string as the key,
|
||||
// so a 200KB config file would pin ~10MB in #keyList across 50 slots. Large
|
||||
// inputs like ~/.claude.json also change between reads (numStartups bumps on
|
||||
// inputs like ~/.openclaude.json also change between reads (numStartups bumps on
|
||||
// every CC startup), so the cache never hits anyway.
|
||||
const PARSE_CACHE_MAX_KEY_BYTES = 8 * 1024
|
||||
|
||||
|
||||
@@ -44,9 +44,10 @@ function getCandidateLocalBinaryPaths(localInstallDir: string): string[] {
|
||||
}
|
||||
|
||||
export function isManagedLocalInstallationPath(execPath: string): boolean {
|
||||
const normalizedExecPath = execPath.replace(/\\+/g, '/')
|
||||
return (
|
||||
execPath.includes('/.openclaude/local/node_modules/') ||
|
||||
execPath.includes('/.claude/local/node_modules/')
|
||||
normalizedExecPath.includes('/.openclaude/local/node_modules/') ||
|
||||
normalizedExecPath.includes('/.claude/local/node_modules/')
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ export function applySafeConfigEnvironmentVariables(): void {
|
||||
: null
|
||||
}
|
||||
|
||||
// Global config (~/.claude.json) is user-controlled. In CCD mode,
|
||||
// Global config (~/.openclaude.json) is user-controlled. In CCD mode,
|
||||
// filterSettingsEnv strips keys that were in the spawn env snapshot so
|
||||
// the desktop host's operational vars (OTEL, etc.) are not overridden.
|
||||
Object.assign(process.env, filterSettingsEnv(getGlobalConfig().env))
|
||||
|
||||
@@ -123,7 +123,6 @@ export const SAFE_ENV_VARS = new Set([
|
||||
'ANTHROPIC_DEFAULT_SONNET_MODEL_DESCRIPTION',
|
||||
'ANTHROPIC_DEFAULT_SONNET_MODEL_NAME',
|
||||
'ANTHROPIC_DEFAULT_SONNET_MODEL_SUPPORTED_CAPABILITIES',
|
||||
'ANTHROPIC_FOUNDRY_API_KEY',
|
||||
'ANTHROPIC_MODEL',
|
||||
'ANTHROPIC_SMALL_FAST_MODEL_AWS_REGION',
|
||||
'ANTHROPIC_SMALL_FAST_MODEL',
|
||||
|
||||
@@ -107,3 +107,60 @@ test('official OpenAI base URLs now keep provider detection on openai for aliase
|
||||
const { getAPIProvider } = await importFreshProvidersModule()
|
||||
expect(getAPIProvider()).toBe('openai')
|
||||
})
|
||||
|
||||
// isGithubNativeAnthropicMode
|
||||
|
||||
test('isGithubNativeAnthropicMode: false when CLAUDE_CODE_USE_GITHUB is not set', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.OPENAI_MODEL = 'claude-sonnet-4-5'
|
||||
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
||||
expect(isGithubNativeAnthropicMode()).toBe(false)
|
||||
})
|
||||
|
||||
test('isGithubNativeAnthropicMode: true for bare claude- model via OPENAI_MODEL', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = 'claude-sonnet-4-5'
|
||||
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
||||
expect(isGithubNativeAnthropicMode()).toBe(true)
|
||||
})
|
||||
|
||||
test('isGithubNativeAnthropicMode: true for github:copilot:claude- compound format', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = 'github:copilot:claude-sonnet-4'
|
||||
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
||||
expect(isGithubNativeAnthropicMode()).toBe(true)
|
||||
})
|
||||
|
||||
test('isGithubNativeAnthropicMode: true when resolvedModel is a claude- model', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = 'github:copilot'
|
||||
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
||||
expect(isGithubNativeAnthropicMode('claude-haiku-4-5')).toBe(true)
|
||||
})
|
||||
|
||||
test('isGithubNativeAnthropicMode: false for generic github:copilot alias', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = 'github:copilot'
|
||||
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
||||
expect(isGithubNativeAnthropicMode()).toBe(false)
|
||||
})
|
||||
|
||||
test('isGithubNativeAnthropicMode: false for non-Claude model', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
||||
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
||||
expect(isGithubNativeAnthropicMode()).toBe(false)
|
||||
})
|
||||
|
||||
test('isGithubNativeAnthropicMode: false for github:copilot:gpt- model', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = 'github:copilot:gpt-4o'
|
||||
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
||||
expect(isGithubNativeAnthropicMode()).toBe(false)
|
||||
})
|
||||
|
||||
@@ -45,6 +45,24 @@ export function getAPIProvider(): APIProvider {
|
||||
export function usesAnthropicAccountFlow(): boolean {
|
||||
return getAPIProvider() === 'firstParty'
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true when the GitHub provider should use Anthropic's native API
|
||||
* format instead of the OpenAI-compatible shim.
|
||||
*
|
||||
* Enabled when CLAUDE_CODE_USE_GITHUB=1 and the model string contains "claude-"
|
||||
* anywhere (handles bare names like "claude-sonnet-4" and compound formats like
|
||||
* "github:copilot:claude-sonnet-4" or any future provider-prefixed variants).
|
||||
*
|
||||
* api.githubcopilot.com supports Anthropic native format for Claude models,
|
||||
* enabling prompt caching via cache_control blocks which significantly reduces
|
||||
* per-turn token costs by caching the system prompt and tool definitions.
|
||||
*/
|
||||
export function isGithubNativeAnthropicMode(resolvedModel?: string): boolean {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) return false
|
||||
const model = resolvedModel?.trim() || process.env.OPENAI_MODEL?.trim() || ''
|
||||
return model.toLowerCase().includes('claude-')
|
||||
}
|
||||
function isCodexModel(): boolean {
|
||||
return shouldUseCodexTransport(
|
||||
process.env.OPENAI_MODEL || '',
|
||||
|
||||
@@ -64,6 +64,7 @@ export const DANGEROUS_FILES = [
|
||||
'.profile',
|
||||
'.ripgreprc',
|
||||
'.mcp.json',
|
||||
'.openclaude.json',
|
||||
'.claude.json',
|
||||
] as const
|
||||
|
||||
|
||||
@@ -532,6 +532,7 @@ export async function gitPull(
|
||||
): Promise<{ code: number; stderr: string }> {
|
||||
logForDebugging(`git pull: cwd=${cwd} ref=${ref ?? 'default'}`)
|
||||
const env = { ...process.env, ...GIT_NO_PROMPT_ENV }
|
||||
const baseArgs = ['-c', 'core.hooksPath=/dev/null']
|
||||
const credentialArgs = options?.disableCredentialHelper
|
||||
? ['-c', 'credential.helper=']
|
||||
: []
|
||||
@@ -539,7 +540,7 @@ export async function gitPull(
|
||||
if (ref) {
|
||||
const fetchResult = await execFileNoThrowWithCwd(
|
||||
gitExe(),
|
||||
[...credentialArgs, 'fetch', 'origin', ref],
|
||||
[...baseArgs, ...credentialArgs, 'fetch', 'origin', ref],
|
||||
{ cwd, timeout: getPluginGitTimeoutMs(), stdin: 'ignore', env },
|
||||
)
|
||||
|
||||
@@ -549,7 +550,7 @@ export async function gitPull(
|
||||
|
||||
const checkoutResult = await execFileNoThrowWithCwd(
|
||||
gitExe(),
|
||||
[...credentialArgs, 'checkout', ref],
|
||||
[...baseArgs, ...credentialArgs, 'checkout', ref],
|
||||
{ cwd, timeout: getPluginGitTimeoutMs(), stdin: 'ignore', env },
|
||||
)
|
||||
|
||||
@@ -559,7 +560,7 @@ export async function gitPull(
|
||||
|
||||
const pullResult = await execFileNoThrowWithCwd(
|
||||
gitExe(),
|
||||
[...credentialArgs, 'pull', 'origin', ref],
|
||||
[...baseArgs, ...credentialArgs, 'pull', 'origin', ref],
|
||||
{ cwd, timeout: getPluginGitTimeoutMs(), stdin: 'ignore', env },
|
||||
)
|
||||
if (pullResult.code !== 0) {
|
||||
@@ -571,7 +572,7 @@ export async function gitPull(
|
||||
|
||||
const result = await execFileNoThrowWithCwd(
|
||||
gitExe(),
|
||||
[...credentialArgs, 'pull', 'origin', 'HEAD'],
|
||||
[...baseArgs, ...credentialArgs, 'pull', 'origin', 'HEAD'],
|
||||
{ cwd, timeout: getPluginGitTimeoutMs(), stdin: 'ignore', env },
|
||||
)
|
||||
if (result.code !== 0) {
|
||||
@@ -625,6 +626,8 @@ async function gitSubmoduleUpdate(
|
||||
[
|
||||
'-c',
|
||||
'core.sshCommand=ssh -o BatchMode=yes -o StrictHostKeyChecking=yes',
|
||||
'-c',
|
||||
'core.hooksPath=/dev/null',
|
||||
...credentialArgs,
|
||||
'submodule',
|
||||
'update',
|
||||
@@ -810,6 +813,8 @@ export async function gitClone(
|
||||
const args = [
|
||||
'-c',
|
||||
'core.sshCommand=ssh -o BatchMode=yes -o StrictHostKeyChecking=yes',
|
||||
'-c',
|
||||
'core.hooksPath=/dev/null',
|
||||
'clone',
|
||||
'--depth',
|
||||
'1',
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
listOpenAICompatibleModels,
|
||||
} from './providerDiscovery.js'
|
||||
async function loadProviderDiscoveryModule() {
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
return import(`./providerDiscovery.js?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
const originalFetch = globalThis.fetch
|
||||
const originalEnv = {
|
||||
@@ -16,6 +16,8 @@ afterEach(() => {
|
||||
})
|
||||
|
||||
test('lists models from a local openai-compatible /models endpoint', async () => {
|
||||
const { listOpenAICompatibleModels } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock((input, init) => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
expect(url).toBe('http://localhost:1234/v1/models')
|
||||
@@ -47,6 +49,8 @@ test('lists models from a local openai-compatible /models endpoint', async () =>
|
||||
})
|
||||
|
||||
test('returns null when a local openai-compatible /models request fails', async () => {
|
||||
const { listOpenAICompatibleModels } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(new Response('not available', { status: 503 })),
|
||||
) as typeof globalThis.fetch
|
||||
@@ -56,13 +60,19 @@ test('returns null when a local openai-compatible /models request fails', async
|
||||
).resolves.toBeNull()
|
||||
})
|
||||
|
||||
test('detects LM Studio from the default localhost port', () => {
|
||||
test('detects LM Studio from the default localhost port', async () => {
|
||||
const { getLocalOpenAICompatibleProviderLabel } =
|
||||
await loadProviderDiscoveryModule()
|
||||
|
||||
expect(getLocalOpenAICompatibleProviderLabel('http://localhost:1234/v1')).toBe(
|
||||
'LM Studio',
|
||||
)
|
||||
})
|
||||
|
||||
test('detects common local openai-compatible providers by hostname', () => {
|
||||
test('detects common local openai-compatible providers by hostname', async () => {
|
||||
const { getLocalOpenAICompatibleProviderLabel } =
|
||||
await loadProviderDiscoveryModule()
|
||||
|
||||
expect(
|
||||
getLocalOpenAICompatibleProviderLabel('http://localai.local:8080/v1'),
|
||||
).toBe('LocalAI')
|
||||
@@ -71,8 +81,212 @@ test('detects common local openai-compatible providers by hostname', () => {
|
||||
).toBe('vLLM')
|
||||
})
|
||||
|
||||
test('falls back to a generic local openai-compatible label', () => {
|
||||
test('falls back to a generic local openai-compatible label', async () => {
|
||||
const { getLocalOpenAICompatibleProviderLabel } =
|
||||
await loadProviderDiscoveryModule()
|
||||
|
||||
expect(
|
||||
getLocalOpenAICompatibleProviderLabel('http://127.0.0.1:8080/v1'),
|
||||
).toBe('Local OpenAI-compatible')
|
||||
})
|
||||
|
||||
test('ollama generation readiness reports unreachable when tags endpoint is down', async () => {
|
||||
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
const calledUrls: string[] = []
|
||||
globalThis.fetch = mock(input => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
calledUrls.push(url)
|
||||
return Promise.resolve(new Response('not available', { status: 503 }))
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeOllamaGenerationReadiness({
|
||||
baseUrl: 'http://localhost:11434',
|
||||
}),
|
||||
).resolves.toMatchObject({
|
||||
state: 'unreachable',
|
||||
models: [],
|
||||
})
|
||||
|
||||
expect(calledUrls).toEqual([
|
||||
'http://localhost:11434/api/tags',
|
||||
])
|
||||
})
|
||||
|
||||
test('ollama generation readiness reports no models when server is reachable', async () => {
|
||||
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
const calledUrls: string[] = []
|
||||
globalThis.fetch = mock(input => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
calledUrls.push(url)
|
||||
return Promise.resolve(
|
||||
new Response(JSON.stringify({ models: [] }), {
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
}),
|
||||
)
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeOllamaGenerationReadiness({
|
||||
baseUrl: 'http://localhost:11434',
|
||||
}),
|
||||
).resolves.toMatchObject({
|
||||
state: 'no_models',
|
||||
models: [],
|
||||
})
|
||||
|
||||
expect(calledUrls).toEqual([
|
||||
'http://localhost:11434/api/tags',
|
||||
])
|
||||
})
|
||||
|
||||
test('ollama generation readiness reports generation_failed when requested model is missing', async () => {
|
||||
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
const calledUrls: string[] = []
|
||||
globalThis.fetch = mock(input => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
calledUrls.push(url)
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
models: [{ name: 'llama3.1:8b', size: 1024 }],
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
},
|
||||
),
|
||||
)
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeOllamaGenerationReadiness({
|
||||
baseUrl: 'http://localhost:11434',
|
||||
model: 'qwen2.5-coder:7b',
|
||||
}),
|
||||
).resolves.toMatchObject({
|
||||
state: 'generation_failed',
|
||||
probeModel: 'qwen2.5-coder:7b',
|
||||
detail: 'requested model not installed: qwen2.5-coder:7b',
|
||||
})
|
||||
|
||||
expect(calledUrls).toEqual(['http://localhost:11434/api/tags'])
|
||||
})
|
||||
|
||||
test('ollama generation readiness reports generation failures when chat probe fails', async () => {
|
||||
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock(input => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
if (url.endsWith('/api/tags')) {
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
models: [{ name: 'qwen2.5-coder:7b', size: 42 }],
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return Promise.resolve(new Response('model not found', { status: 404 }))
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeOllamaGenerationReadiness({
|
||||
baseUrl: 'http://localhost:11434',
|
||||
model: 'qwen2.5-coder:7b',
|
||||
}),
|
||||
).resolves.toMatchObject({
|
||||
state: 'generation_failed',
|
||||
probeModel: 'qwen2.5-coder:7b',
|
||||
})
|
||||
})
|
||||
|
||||
test('ollama generation readiness reports generation_failed when chat probe returns invalid JSON', async () => {
|
||||
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock(input => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
if (url.endsWith('/api/tags')) {
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
models: [{ name: 'llama3.1:8b', size: 1024 }],
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return Promise.resolve(
|
||||
new Response('<html>proxy error</html>', {
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'text/html' },
|
||||
}),
|
||||
)
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeOllamaGenerationReadiness({
|
||||
baseUrl: 'http://localhost:11434',
|
||||
}),
|
||||
).resolves.toMatchObject({
|
||||
state: 'generation_failed',
|
||||
probeModel: 'llama3.1:8b',
|
||||
detail: 'invalid JSON response',
|
||||
})
|
||||
})
|
||||
|
||||
test('ollama generation readiness reports ready when chat probe succeeds', async () => {
|
||||
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
|
||||
|
||||
globalThis.fetch = mock(input => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
if (url.endsWith('/api/tags')) {
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
models: [{ name: 'llama3.1:8b', size: 1024 }],
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
message: { role: 'assistant', content: 'OK' },
|
||||
done: true,
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
},
|
||||
),
|
||||
)
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
probeOllamaGenerationReadiness({
|
||||
baseUrl: 'http://localhost:11434',
|
||||
}),
|
||||
).resolves.toMatchObject({
|
||||
state: 'ready',
|
||||
probeModel: 'llama3.1:8b',
|
||||
})
|
||||
})
|
||||
@@ -4,6 +4,13 @@ import { DEFAULT_OPENAI_BASE_URL } from '../services/api/providerConfig.js'
|
||||
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
|
||||
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
|
||||
|
||||
export type OllamaGenerationReadiness = {
|
||||
state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed'
|
||||
models: OllamaModelDescriptor[]
|
||||
probeModel?: string
|
||||
detail?: string
|
||||
}
|
||||
|
||||
function withTimeoutSignal(timeoutMs: number): {
|
||||
signal: AbortSignal
|
||||
clear: () => void
|
||||
@@ -20,6 +27,83 @@ function trimTrailingSlash(value: string): string {
|
||||
return value.replace(/\/+$/, '')
|
||||
}
|
||||
|
||||
function compactDetail(value: string, maxLength = 180): string {
|
||||
const compact = value.trim().replace(/\s+/g, ' ')
|
||||
if (!compact) {
|
||||
return ''
|
||||
}
|
||||
|
||||
if (compact.length <= maxLength) {
|
||||
return compact
|
||||
}
|
||||
|
||||
return `${compact.slice(0, maxLength)}...`
|
||||
}
|
||||
|
||||
type OllamaTagsPayload = {
|
||||
models?: Array<{
|
||||
name?: string
|
||||
size?: number
|
||||
details?: {
|
||||
family?: string
|
||||
families?: string[]
|
||||
parameter_size?: string
|
||||
quantization_level?: string
|
||||
}
|
||||
}>
|
||||
}
|
||||
|
||||
function normalizeOllamaModels(
|
||||
payload: OllamaTagsPayload,
|
||||
): OllamaModelDescriptor[] {
|
||||
return (payload.models ?? [])
|
||||
.filter(model => Boolean(model.name))
|
||||
.map(model => ({
|
||||
name: model.name!,
|
||||
sizeBytes: typeof model.size === 'number' ? model.size : null,
|
||||
family: model.details?.family ?? null,
|
||||
families: model.details?.families ?? [],
|
||||
parameterSize: model.details?.parameter_size ?? null,
|
||||
quantizationLevel: model.details?.quantization_level ?? null,
|
||||
}))
|
||||
}
|
||||
|
||||
async function fetchOllamaModelsProbe(
|
||||
baseUrl?: string,
|
||||
timeoutMs = 5000,
|
||||
): Promise<{
|
||||
reachable: boolean
|
||||
models: OllamaModelDescriptor[]
|
||||
}> {
|
||||
const { signal, clear } = withTimeoutSignal(timeoutMs)
|
||||
try {
|
||||
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
return {
|
||||
reachable: false,
|
||||
models: [],
|
||||
}
|
||||
}
|
||||
|
||||
const payload = (await response.json().catch(() => ({}))) as OllamaTagsPayload
|
||||
return {
|
||||
reachable: true,
|
||||
models: normalizeOllamaModels(payload),
|
||||
}
|
||||
} catch {
|
||||
return {
|
||||
reachable: false,
|
||||
models: [],
|
||||
}
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
export function getOllamaApiBaseUrl(baseUrl?: string): string {
|
||||
const parsed = new URL(
|
||||
baseUrl || process.env.OLLAMA_BASE_URL || DEFAULT_OLLAMA_BASE_URL,
|
||||
@@ -121,61 +205,15 @@ export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string
|
||||
}
|
||||
|
||||
export async function hasLocalOllama(baseUrl?: string): Promise<boolean> {
|
||||
const { signal, clear } = withTimeoutSignal(1200)
|
||||
try {
|
||||
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
return response.ok
|
||||
} catch {
|
||||
return false
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
const { reachable } = await fetchOllamaModelsProbe(baseUrl, 1200)
|
||||
return reachable
|
||||
}
|
||||
|
||||
export async function listOllamaModels(
|
||||
baseUrl?: string,
|
||||
): Promise<OllamaModelDescriptor[]> {
|
||||
const { signal, clear } = withTimeoutSignal(5000)
|
||||
try {
|
||||
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
if (!response.ok) {
|
||||
return []
|
||||
}
|
||||
|
||||
const data = (await response.json()) as {
|
||||
models?: Array<{
|
||||
name?: string
|
||||
size?: number
|
||||
details?: {
|
||||
family?: string
|
||||
families?: string[]
|
||||
parameter_size?: string
|
||||
quantization_level?: string
|
||||
}
|
||||
}>
|
||||
}
|
||||
|
||||
return (data.models ?? [])
|
||||
.filter(model => Boolean(model.name))
|
||||
.map(model => ({
|
||||
name: model.name!,
|
||||
sizeBytes: typeof model.size === 'number' ? model.size : null,
|
||||
family: model.details?.family ?? null,
|
||||
families: model.details?.families ?? [],
|
||||
parameterSize: model.details?.parameter_size ?? null,
|
||||
quantizationLevel: model.details?.quantization_level ?? null,
|
||||
}))
|
||||
} catch {
|
||||
return []
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
const { models } = await fetchOllamaModelsProbe(baseUrl, 5000)
|
||||
return models
|
||||
}
|
||||
|
||||
export async function listOpenAICompatibleModels(options?: {
|
||||
@@ -294,3 +332,106 @@ export async function benchmarkOllamaModel(
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
export async function probeOllamaGenerationReadiness(options?: {
|
||||
baseUrl?: string
|
||||
model?: string
|
||||
timeoutMs?: number
|
||||
}): Promise<OllamaGenerationReadiness> {
|
||||
const timeoutMs = options?.timeoutMs ?? 8000
|
||||
const { reachable, models } = await fetchOllamaModelsProbe(
|
||||
options?.baseUrl,
|
||||
timeoutMs,
|
||||
)
|
||||
if (!reachable) {
|
||||
return {
|
||||
state: 'unreachable',
|
||||
models: [],
|
||||
}
|
||||
}
|
||||
|
||||
if (models.length === 0) {
|
||||
return {
|
||||
state: 'no_models',
|
||||
models: [],
|
||||
}
|
||||
}
|
||||
|
||||
const requestedModel = options?.model?.trim() || undefined
|
||||
if (requestedModel && !models.some(model => model.name === requestedModel)) {
|
||||
return {
|
||||
state: 'generation_failed',
|
||||
models,
|
||||
probeModel: requestedModel,
|
||||
detail: `requested model not installed: ${requestedModel}`,
|
||||
}
|
||||
}
|
||||
|
||||
const probeModel = requestedModel ?? models[0]!.name
|
||||
const { signal, clear } = withTimeoutSignal(timeoutMs)
|
||||
|
||||
try {
|
||||
const response = await fetch(`${getOllamaApiBaseUrl(options?.baseUrl)}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
signal,
|
||||
body: JSON.stringify({
|
||||
model: probeModel,
|
||||
stream: false,
|
||||
messages: [{ role: 'user', content: 'Reply with OK.' }],
|
||||
options: {
|
||||
temperature: 0,
|
||||
num_predict: 8,
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const responseBody = await response.text().catch(() => '')
|
||||
const detailSuffix = compactDetail(responseBody)
|
||||
return {
|
||||
state: 'generation_failed',
|
||||
models,
|
||||
probeModel,
|
||||
detail: detailSuffix
|
||||
? `status ${response.status}: ${detailSuffix}`
|
||||
: `status ${response.status}`,
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await response.json()
|
||||
} catch {
|
||||
return {
|
||||
state: 'generation_failed',
|
||||
models,
|
||||
probeModel,
|
||||
detail: 'invalid JSON response',
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
state: 'ready',
|
||||
models,
|
||||
probeModel,
|
||||
}
|
||||
} catch (error) {
|
||||
const detail =
|
||||
error instanceof Error
|
||||
? error.name === 'AbortError'
|
||||
? 'request timed out'
|
||||
: error.message
|
||||
: String(error)
|
||||
|
||||
return {
|
||||
state: 'generation_failed',
|
||||
models,
|
||||
probeModel,
|
||||
detail,
|
||||
}
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -456,10 +456,19 @@ const checkDependencies = memoize((): SandboxDependencyCheck => {
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Read sandbox.enabled only from trusted settings sources.
|
||||
* projectSettings is intentionally excluded — a malicious repo could
|
||||
* otherwise disable the sandbox via .claude/settings.json.
|
||||
*/
|
||||
function getSandboxEnabledSetting(): boolean {
|
||||
try {
|
||||
const settings = getSettings_DEPRECATED()
|
||||
return settings?.sandbox?.enabled ?? false
|
||||
return !!(
|
||||
getSettingsForSource('userSettings')?.sandbox?.enabled ||
|
||||
getSettingsForSource('localSettings')?.sandbox?.enabled ||
|
||||
getSettingsForSource('flagSettings')?.sandbox?.enabled ||
|
||||
getSettingsForSource('policySettings')?.sandbox?.enabled
|
||||
)
|
||||
} catch (error) {
|
||||
logForDebugging(`Failed to get settings for sandbox check: ${error}`)
|
||||
return false
|
||||
|
||||
@@ -300,9 +300,9 @@ export function getRelativeSettingsFilePathForSource(
|
||||
): string {
|
||||
switch (source) {
|
||||
case 'projectSettings':
|
||||
return join('.openclaude', 'settings.json')
|
||||
return '.openclaude/settings.json'
|
||||
case 'localSettings':
|
||||
return join('.openclaude', 'settings.local.json')
|
||||
return '.openclaude/settings.local.json'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -207,6 +207,10 @@ export function createPermissionRequest(params: {
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use sendPermissionRequestViaMailbox() instead. This file-based
|
||||
* approach writes to an unauthenticated directory where any local process can
|
||||
* forge requests. Retained for backward compatibility but no longer called.
|
||||
*
|
||||
* Write a permission request to the pending directory with file locking
|
||||
* Called by worker agents when they need permission approval from the leader
|
||||
*
|
||||
@@ -250,6 +254,10 @@ export async function writePermissionRequest(
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated No longer called — permission requests are sent via mailbox.
|
||||
* The pending directory is an unauthenticated channel. Retained for backward
|
||||
* compatibility.
|
||||
*
|
||||
* Read all pending permission requests for a team
|
||||
* Called by the team leader to see what requests need attention
|
||||
*/
|
||||
@@ -312,6 +320,11 @@ export async function readPendingPermissions(
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated No longer called — permission responses are delivered via mailbox
|
||||
* (processMailboxPermissionResponse). The resolved directory is an unauthenticated
|
||||
* channel where any local process can forge approvals. Retained for backward
|
||||
* compatibility.
|
||||
*
|
||||
* Read a resolved permission request by ID
|
||||
* Called by workers to check if their request has been resolved
|
||||
*
|
||||
@@ -352,6 +365,10 @@ export async function readResolvedPermission(
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use sendPermissionResponseViaMailbox() instead. This file-based
|
||||
* approach writes to an unauthenticated directory where any local process can
|
||||
* forge approvals. Retained for backward compatibility but no longer called.
|
||||
*
|
||||
* Resolve a permission request
|
||||
* Called by the team leader (or worker in self-resolution cases)
|
||||
*
|
||||
@@ -536,6 +553,10 @@ export type PermissionResponse = {
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use processMailboxPermissionResponse() via useInboxPoller instead.
|
||||
* File-based polling reads from an unauthenticated directory where any local
|
||||
* process can forge approval files. Retained for backward compatibility.
|
||||
*
|
||||
* Poll for a permission response (worker-side convenience function)
|
||||
* Converts the resolved request into a simpler response format
|
||||
*
|
||||
@@ -564,6 +585,9 @@ export async function pollForResponse(
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated File-based response cleanup is no longer needed — responses are
|
||||
* delivered via mailbox. Retained for backward compatibility.
|
||||
*
|
||||
* Remove a worker's response after processing
|
||||
* This is an alias for deleteResolvedPermission for backward compatibility
|
||||
*/
|
||||
@@ -601,6 +625,9 @@ export function isSwarmWorker(): boolean {
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated File-based resolved permissions are no longer written. Responses
|
||||
* are delivered via mailbox. Retained for backward compatibility.
|
||||
*
|
||||
* Delete a resolved permission file
|
||||
* Called after a worker has processed the resolution
|
||||
*/
|
||||
@@ -635,8 +662,8 @@ export async function deleteResolvedPermission(
|
||||
}
|
||||
|
||||
/**
|
||||
* Submit a permission request (alias for writePermissionRequest)
|
||||
* Provided for backward compatibility with worker integration code
|
||||
* @deprecated Alias for writePermissionRequest, which is itself deprecated.
|
||||
* Use sendPermissionRequestViaMailbox() instead.
|
||||
*/
|
||||
export const submitPermissionRequest = writePermissionRequest
|
||||
|
||||
|
||||
38
src/utils/urlRedaction.test.ts
Normal file
38
src/utils/urlRedaction.test.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { redactUrlForDisplay } from './urlRedaction.ts'
|
||||
|
||||
describe('redactUrlForDisplay', () => {
|
||||
test('redacts credentials and sensitive query params for valid URLs', () => {
|
||||
const redacted = redactUrlForDisplay(
|
||||
'http://user:pass@localhost:11434/v1?api_key=secret&foo=bar',
|
||||
)
|
||||
|
||||
expect(redacted).toBe(
|
||||
'http://redacted:redacted@localhost:11434/v1?api_key=redacted&foo=bar',
|
||||
)
|
||||
})
|
||||
|
||||
test('redacts token-like query parameter names', () => {
|
||||
const redacted = redactUrlForDisplay(
|
||||
'https://example.com/v1?x_access_token=abc123&model=qwen2.5-coder',
|
||||
)
|
||||
|
||||
expect(redacted).toBe(
|
||||
'https://example.com/v1?x_access_token=redacted&model=qwen2.5-coder',
|
||||
)
|
||||
})
|
||||
|
||||
test('falls back to regex redaction for malformed URLs', () => {
|
||||
const redacted = redactUrlForDisplay(
|
||||
'//user:pass@localhost:11434?token=abc&mode=test',
|
||||
)
|
||||
|
||||
expect(redacted).toBe('//redacted@localhost:11434?token=redacted&mode=test')
|
||||
})
|
||||
|
||||
test('keeps non-sensitive URLs unchanged', () => {
|
||||
const url = 'http://localhost:11434/v1?model=llama3.1:8b'
|
||||
expect(redactUrlForDisplay(url)).toBe(url)
|
||||
})
|
||||
})
|
||||
48
src/utils/urlRedaction.ts
Normal file
48
src/utils/urlRedaction.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
const SENSITIVE_URL_QUERY_PARAM_TOKENS = [
|
||||
'api_key',
|
||||
'apikey',
|
||||
'key',
|
||||
'token',
|
||||
'access_token',
|
||||
'refresh_token',
|
||||
'signature',
|
||||
'sig',
|
||||
'secret',
|
||||
'password',
|
||||
'passwd',
|
||||
'pwd',
|
||||
'auth',
|
||||
'authorization',
|
||||
]
|
||||
|
||||
function shouldRedactUrlQueryParam(name: string): boolean {
|
||||
const lower = name.toLowerCase()
|
||||
return SENSITIVE_URL_QUERY_PARAM_TOKENS.some(token => lower.includes(token))
|
||||
}
|
||||
|
||||
export function redactUrlForDisplay(rawUrl: string): string {
|
||||
try {
|
||||
const parsed = new URL(rawUrl)
|
||||
if (parsed.username) {
|
||||
parsed.username = 'redacted'
|
||||
}
|
||||
if (parsed.password) {
|
||||
parsed.password = 'redacted'
|
||||
}
|
||||
|
||||
for (const key of parsed.searchParams.keys()) {
|
||||
if (shouldRedactUrlQueryParam(key)) {
|
||||
parsed.searchParams.set(key, 'redacted')
|
||||
}
|
||||
}
|
||||
|
||||
return parsed.toString()
|
||||
} catch {
|
||||
return rawUrl
|
||||
.replace(/\/\/[^/@\s]+(?::[^/@\s]*)?@/g, '//redacted@')
|
||||
.replace(
|
||||
/([?&](?:token|access_token|refresh_token|api_key|apikey|key|password|passwd|pwd|auth|authorization|signature|sig|secret)=)[^&#]*/gi,
|
||||
'$1redacted',
|
||||
)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user