Compare commits

..

1 Commits

Author SHA1 Message Date
Juan Camilo
02599e0b6f fix(api): consolidate 3P provider compatibility fixes
- Strip store field from request body for local providers (Ollama, vLLM)
  that reject unknown JSON fields with 400 errors
- Add Gemini 3.x model context windows and output token limits
  (gemini-3-flash-preview, gemini-3.1-pro-preview, google/ OpenRouter variants)
- Preserve reasoning_content on assistant tool-call message replays
  for providers that require it (Kimi k2.5, DeepSeek reasoner)
- Use conservative max_output_tokens fallback (4096/16384) for unknown
  3P models to prevent vLLM/Ollama 400 errors from exceeding max_model_len

Consolidates fixes from: #258, #268, #237, #643, #666, #677

Co-authored-by: auriti <auriti@users.noreply.github.com>
Co-authored-by: Gustavo-Falci <Gustavo-Falci@users.noreply.github.com>
Co-authored-by: lttlin <lttlin@users.noreply.github.com>
Co-authored-by: Durannd <Durannd@users.noreply.github.com>
2026-04-20 10:08:09 +02:00
52 changed files with 529 additions and 1933 deletions

View File

@@ -20,23 +20,6 @@ describe('formatReachabilityFailureDetail', () => {
)
})
test('redacts credentials and sensitive query parameters in endpoint details', () => {
const detail = formatReachabilityFailureDetail(
'http://user:pass@localhost:11434/v1/models?token=abc123&mode=test',
502,
'bad gateway',
{
transport: 'chat_completions',
requestedModel: 'llama3.1:8b',
resolvedModel: 'llama3.1:8b',
},
)
expect(detail).toBe(
'Unexpected status 502 from http://redacted:redacted@localhost:11434/v1/models?token=redacted&mode=test. Body: bad gateway',
)
})
test('adds alias/entitlement hint for codex model support 400s', () => {
const detail = formatReachabilityFailureDetail(
'https://chatgpt.com/backend-api/codex/responses',

View File

@@ -7,11 +7,6 @@ import {
resolveProviderRequest,
isLocalProviderUrl as isProviderLocalUrl,
} from '../src/services/api/providerConfig.js'
import {
getLocalOpenAICompatibleProviderLabel,
probeOllamaGenerationReadiness,
} from '../src/utils/providerDiscovery.js'
import { redactUrlForDisplay } from '../src/utils/urlRedaction.js'
type CheckResult = {
ok: boolean
@@ -74,7 +69,7 @@ export function formatReachabilityFailureDetail(
},
): string {
const compactBody = responseBody.trim().replace(/\s+/g, ' ').slice(0, 240)
const base = `Unexpected status ${status} from ${redactUrlForDisplay(endpoint)}.`
const base = `Unexpected status ${status} from ${endpoint}.`
const bodySuffix = compactBody ? ` Body: ${compactBody}` : ''
if (request.transport !== 'codex_responses' || status !== 400) {
@@ -260,7 +255,7 @@ function checkOpenAIEnv(): CheckResult[] {
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
}
results.push(pass('OPENAI_BASE_URL', redactUrlForDisplay(request.baseUrl)))
results.push(pass('OPENAI_BASE_URL', request.baseUrl))
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials(process.env)
@@ -313,7 +308,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
}
if (useGithub && !useOpenAI) {
if (useGithub) {
return pass(
'Provider reachability',
'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).',
@@ -331,7 +326,6 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
const endpoint = request.transport === 'codex_responses'
? `${request.baseUrl}/responses`
: `${request.baseUrl}/models`
const redactedEndpoint = redactUrlForDisplay(endpoint)
const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), 4000)
@@ -381,10 +375,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
})
if (response.status === 200 || response.status === 401 || response.status === 403) {
return pass(
'Provider reachability',
`Reached ${redactedEndpoint} (status ${response.status}).`,
)
return pass('Provider reachability', `Reached ${endpoint} (status ${response.status}).`)
}
const responseBody = await response.text().catch(() => '')
@@ -400,100 +391,12 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
)
} catch (error) {
const message = error instanceof Error ? error.message : String(error)
return fail(
'Provider reachability',
`Failed to reach ${redactedEndpoint}: ${message}`,
)
return fail('Provider reachability', `Failed to reach ${endpoint}: ${message}`)
} finally {
clearTimeout(timeout)
}
}
async function checkProviderGenerationReadiness(): Promise<CheckResult> {
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
if (!useGemini && !useOpenAI && !useGithub && !useMistral) {
return pass('Provider generation readiness', 'Skipped (OpenAI-compatible mode disabled).')
}
if (useGithub && !useOpenAI) {
return pass(
'Provider generation readiness',
'Skipped for GitHub Models (runtime generation uses a different endpoint flow).',
)
}
if (useGemini || useMistral) {
return pass(
'Provider generation readiness',
'Skipped for managed provider mode.',
)
}
if (!useOpenAI) {
return pass('Provider generation readiness', 'Skipped (OpenAI-compatible mode disabled).')
}
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
if (request.transport === 'codex_responses') {
return pass(
'Provider generation readiness',
'Skipped for Codex responses (reachability probe already performs a lightweight generation request).',
)
}
if (!isLocalBaseUrl(request.baseUrl)) {
return pass('Provider generation readiness', 'Skipped for non-local provider URL.')
}
const localProviderLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
if (localProviderLabel !== 'Ollama') {
return pass(
'Provider generation readiness',
`Skipped for ${localProviderLabel} (no provider-specific generation probe).`,
)
}
const readiness = await probeOllamaGenerationReadiness({
baseUrl: request.baseUrl,
model: request.requestedModel,
})
if (readiness.state === 'ready') {
return pass(
'Provider generation readiness',
`Generated a test response with ${readiness.probeModel ?? request.requestedModel}.`,
)
}
if (readiness.state === 'unreachable') {
return fail(
'Provider generation readiness',
`Could not reach Ollama at ${redactUrlForDisplay(request.baseUrl)}.`,
)
}
if (readiness.state === 'no_models') {
return fail(
'Provider generation readiness',
'Ollama is reachable, but no installed models were found. Pull a model first (for example: ollama pull qwen2.5-coder:7b).',
)
}
const detailSuffix = readiness.detail ? ` Detail: ${readiness.detail}.` : ''
return fail(
'Provider generation readiness',
`Ollama is reachable, but generation failed for ${readiness.probeModel ?? request.requestedModel}.${detailSuffix}`,
)
}
function isAtomicChatUrl(baseUrl: string): boolean {
try {
const parsed = new URL(baseUrl)
@@ -664,7 +567,6 @@ async function main(): Promise<void> {
results.push(checkBuildArtifacts())
results.push(...checkOpenAIEnv())
results.push(await checkBaseUrlReachability())
results.push(await checkProviderGenerationReadiness())
results.push(checkOllamaProcessorMode())
if (!options.json) {

View File

@@ -21,11 +21,11 @@ describe('Gemini store field fix', () => {
test('isGeminiMode is imported and used in openaiShim', async () => {
const content = await file('services/api/openaiShim.ts').text()
// Verify the fix: store deletion should check for Gemini mode
// Verify the fix: store deletion should check for Gemini mode and local providers
expect(content).toContain('isGeminiMode()')
expect(content).toContain("mistral and gemini don't recognize body.store")
// Ensure the delete body.store is guarded for both Mistral and Gemini
expect(content).toMatch(/isMistral\s*\|\|\s*isGeminiMode\(\)/)
expect(content).toContain("Strip store for providers that don't recognize it")
// Ensure the delete body.store is guarded for Mistral, Gemini, and local providers
expect(content).toMatch(/isMistral\s*\|\|\s*isGeminiMode\(\)\s*\|\|\s*isLocal/)
})
test('store: false is still set by default (OpenAI needs it)', async () => {

View File

@@ -1,191 +0,0 @@
/**
* Security hardening regression tests.
*
* Covers:
* 1. MCP tool result Unicode sanitization
* 2. Sandbox settings source filtering (exclude projectSettings)
* 3. Plugin git clone/pull hooks disabled
* 4. ANTHROPIC_FOUNDRY_API_KEY removed from SAFE_ENV_VARS
* 5. WebFetch SSRF protection via ssrfGuardedLookup
*/
import { describe, test, expect } from 'bun:test'
import { resolve } from 'path'
const SRC = resolve(import.meta.dir, '..')
const file = (relative: string) => Bun.file(resolve(SRC, relative))
// ---------------------------------------------------------------------------
// Fix 1: MCP tool result Unicode sanitization
// ---------------------------------------------------------------------------
describe('MCP tool result sanitization', () => {
test('transformResultContent sanitizes text content', async () => {
const content = await file('services/mcp/client.ts').text()
// Tool definitions are already sanitized (line ~1798)
expect(content).toContain('recursivelySanitizeUnicode(result.tools)')
// Tool results must also be sanitized
expect(content).toMatch(
/case 'text':[\s\S]*?recursivelySanitizeUnicode\(resultContent\.text\)/,
)
})
test('resource text content is also sanitized', async () => {
const content = await file('services/mcp/client.ts').text()
expect(content).toMatch(
/recursivelySanitizeUnicode\(\s*`\$\{prefix\}\$\{resource\.text\}`/,
)
})
})
// ---------------------------------------------------------------------------
// Fix 2: Sandbox settings source filtering
// ---------------------------------------------------------------------------
describe('Sandbox settings trust boundary', () => {
test('getSandboxEnabledSetting does not use getSettings_DEPRECATED', async () => {
const content = await file('utils/sandbox/sandbox-adapter.ts').text()
// Extract the getSandboxEnabledSetting function body
const fnMatch = content.match(
/function getSandboxEnabledSetting\(\)[^{]*\{([\s\S]*?)\n\}/,
)
expect(fnMatch).not.toBeNull()
const fnBody = fnMatch![1]
// Must NOT use getSettings_DEPRECATED (reads all sources including project)
expect(fnBody).not.toContain('getSettings_DEPRECATED')
// Must use getSettingsForSource for individual trusted sources
expect(fnBody).toContain("getSettingsForSource('userSettings')")
expect(fnBody).toContain("getSettingsForSource('policySettings')")
// Must NOT read from projectSettings
expect(fnBody).not.toContain("'projectSettings'")
})
})
// ---------------------------------------------------------------------------
// Fix 3: Plugin git hooks disabled
// ---------------------------------------------------------------------------
describe('Plugin git operations disable hooks', () => {
test('gitClone includes core.hooksPath=/dev/null', async () => {
const content = await file('utils/plugins/marketplaceManager.ts').text()
// The clone args must disable hooks
const cloneSection = content.slice(
content.indexOf('export async function gitClone('),
content.indexOf('export async function gitClone(') + 2000,
)
expect(cloneSection).toContain("'core.hooksPath=/dev/null'")
})
test('gitPull includes core.hooksPath=/dev/null', async () => {
const content = await file('utils/plugins/marketplaceManager.ts').text()
const pullSection = content.slice(
content.indexOf('export async function gitPull('),
content.indexOf('export async function gitPull(') + 2000,
)
expect(pullSection).toContain("'core.hooksPath=/dev/null'")
})
test('gitSubmoduleUpdate includes core.hooksPath=/dev/null', async () => {
const content = await file('utils/plugins/marketplaceManager.ts').text()
const subSection = content.slice(
content.indexOf('async function gitSubmoduleUpdate('),
content.indexOf('async function gitSubmoduleUpdate(') + 1000,
)
expect(subSection).toContain("'core.hooksPath=/dev/null'")
})
})
// ---------------------------------------------------------------------------
// Fix 4: ANTHROPIC_FOUNDRY_API_KEY not in SAFE_ENV_VARS
// ---------------------------------------------------------------------------
describe('SAFE_ENV_VARS excludes credentials', () => {
test('ANTHROPIC_FOUNDRY_API_KEY is not in SAFE_ENV_VARS', async () => {
const content = await file('utils/managedEnvConstants.ts').text()
// Extract the SAFE_ENV_VARS set definition
const safeStart = content.indexOf('export const SAFE_ENV_VARS')
const safeEnd = content.indexOf('])', safeStart)
const safeSection = content.slice(safeStart, safeEnd)
expect(safeSection).not.toContain('ANTHROPIC_FOUNDRY_API_KEY')
})
})
// ---------------------------------------------------------------------------
// Fix 5: WebFetch SSRF protection
// ---------------------------------------------------------------------------
describe('WebFetch SSRF guard', () => {
test('getWithPermittedRedirects uses ssrfGuardedLookup', async () => {
const content = await file('tools/WebFetchTool/utils.ts').text()
expect(content).toContain(
"import { ssrfGuardedLookup } from '../../utils/hooks/ssrfGuard.js'",
)
// The axios.get call in getWithPermittedRedirects must include lookup
const fnSection = content.slice(
content.indexOf('export async function getWithPermittedRedirects('),
content.indexOf('export async function getWithPermittedRedirects(') +
1000,
)
expect(fnSection).toContain('lookup: ssrfGuardedLookup')
})
})
// ---------------------------------------------------------------------------
// Fix 6: Swarm permission file polling removed (security hardening)
// ---------------------------------------------------------------------------
describe('Swarm permission file polling removed', () => {
test('useSwarmPermissionPoller hook no longer exists', async () => {
const content = await file(
'hooks/useSwarmPermissionPoller.ts',
).text()
// The file-based polling hook must not exist — it read from an
// unauthenticated resolved/ directory where any local process could
// forge approval files.
expect(content).not.toContain('function useSwarmPermissionPoller(')
// The file-based processResponse must not exist
expect(content).not.toContain('function processResponse(')
})
test('poller does not import from permissionSync', async () => {
const content = await file(
'hooks/useSwarmPermissionPoller.ts',
).text()
// Must not import anything from permissionSync — all file-based
// functions have been removed from this module's dependencies
expect(content).not.toContain('permissionSync')
})
test('file-based permission functions are marked deprecated', async () => {
const content = await file(
'utils/swarm/permissionSync.ts',
).text()
// All file-based functions must have @deprecated JSDoc
const deprecatedFns = [
'writePermissionRequest',
'readPendingPermissions',
'readResolvedPermission',
'resolvePermission',
'pollForResponse',
'removeWorkerResponse',
]
for (const fn of deprecatedFns) {
// Find the function and check that @deprecated appears before it
const fnIndex = content.indexOf(`export async function ${fn}(`)
if (fnIndex === -1) continue // submitPermissionRequest is a const, not async function
const preceding = content.slice(Math.max(0, fnIndex - 500), fnIndex)
expect(preceding).toContain('@deprecated')
}
})
test('mailbox-based functions are NOT deprecated', async () => {
const content = await file(
'utils/swarm/permissionSync.ts',
).text()
// These are the active path — must not be deprecated
const activeFns = [
'sendPermissionRequestViaMailbox',
'sendPermissionResponseViaMailbox',
]
for (const fn of activeFns) {
const fnIndex = content.indexOf(`export async function ${fn}(`)
expect(fnIndex).not.toBe(-1)
const preceding = content.slice(Math.max(0, fnIndex - 300), fnIndex)
expect(preceding).not.toContain('@deprecated')
}
})
})

View File

@@ -66,44 +66,10 @@ import {
import {
getOllamaChatBaseUrl,
getLocalOpenAICompatibleProviderLabel,
probeOllamaGenerationReadiness,
type OllamaGenerationReadiness,
hasLocalOllama,
listOllamaModels,
} from '../../utils/providerDiscovery.js'
function describeOllamaReadinessIssue(
readiness: OllamaGenerationReadiness,
options?: {
baseUrl?: string
allowManualFallback?: boolean
},
): string {
const endpoint = options?.baseUrl ?? 'http://localhost:11434'
if (readiness.state === 'unreachable') {
return `Could not reach Ollama at ${endpoint}. Start Ollama first, then run /provider again.`
}
if (readiness.state === 'no_models') {
const manualSuffix = options?.allowManualFallback
? ', or enter details manually'
: ''
return `Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first${manualSuffix}.`
}
if (readiness.state === 'generation_failed') {
const modelHint = readiness.probeModel ?? 'the selected model'
const detailSuffix = readiness.detail
? ` Details: ${readiness.detail}.`
: ''
const manualSuffix = options?.allowManualFallback
? ' You can also enter details manually.'
: ''
return `Ollama is reachable and models are installed, but a generation probe failed for ${modelHint}.${detailSuffix} Run "ollama run ${modelHint}" once and retry.${manualSuffix}`
}
return ''
}
type ProviderChoice = 'auto' | ProviderProfile | 'codex-oauth' | 'clear'
type Step =
@@ -749,7 +715,6 @@ function AutoRecommendationStep({
| {
state: 'openai'
defaultModel: string
reason: string
}
| {
state: 'error'
@@ -763,27 +728,19 @@ function AutoRecommendationStep({
void (async () => {
const defaultModel = getGoalDefaultOpenAIModel(goal)
try {
const readiness = await probeOllamaGenerationReadiness()
if (readiness.state !== 'ready') {
const ollamaAvailable = await hasLocalOllama()
if (!ollamaAvailable) {
if (!cancelled) {
setStatus({
state: 'openai',
defaultModel,
reason: describeOllamaReadinessIssue(readiness),
})
setStatus({ state: 'openai', defaultModel })
}
return
}
const recommended = recommendOllamaModel(readiness.models, goal)
const models = await listOllamaModels()
const recommended = recommendOllamaModel(models, goal)
if (!recommended) {
if (!cancelled) {
setStatus({
state: 'openai',
defaultModel,
reason:
'Ollama responded to a generation probe, but no recommended chat model matched this goal.',
})
setStatus({ state: 'openai', defaultModel })
}
return
}
@@ -839,10 +796,10 @@ function AutoRecommendationStep({
<Dialog title="Auto setup fallback" onCancel={onCancel}>
<Box flexDirection="column" gap={1}>
<Text>
Auto setup can continue into OpenAI-compatible setup with a default model of{' '}
No viable local Ollama chat model was detected. Auto setup can
continue into OpenAI-compatible setup with a default model of{' '}
{status.defaultModel}.
</Text>
<Text dimColor>{status.reason}</Text>
<Select
options={[
{ label: 'Continue to OpenAI-compatible setup', value: 'continue' },
@@ -926,19 +883,32 @@ function OllamaModelStep({
let cancelled = false
void (async () => {
const readiness = await probeOllamaGenerationReadiness()
if (readiness.state !== 'ready') {
const available = await hasLocalOllama()
if (!available) {
if (!cancelled) {
setStatus({
state: 'unavailable',
message: describeOllamaReadinessIssue(readiness),
message:
'Could not reach Ollama at http://localhost:11434. Start Ollama first, then run /provider again.',
})
}
return
}
const ranked = rankOllamaModels(readiness.models, 'balanced')
const recommended = recommendOllamaModel(readiness.models, 'balanced')
const models = await listOllamaModels()
if (models.length === 0) {
if (!cancelled) {
setStatus({
state: 'unavailable',
message:
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first.',
})
}
return
}
const ranked = rankOllamaModels(models, 'balanced')
const recommended = recommendOllamaModel(models, 'balanced')
if (!cancelled) {
setStatus({
state: 'ready',

View File

@@ -149,21 +149,17 @@ function mockProviderManagerDependencies(
applySavedProfileToCurrentSession?: (...args: unknown[]) => Promise<string | null>
clearCodexCredentials?: () => { success: boolean; warning?: string }
getProviderProfiles?: () => unknown[]
probeOllamaGenerationReadiness?: () => Promise<{
state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed'
models: Array<
{
hasLocalOllama?: () => Promise<boolean>
listOllamaModels?: () => Promise<
Array<{
name: string
sizeBytes?: number | null
family?: string | null
families?: string[]
parameterSize?: string | null
quantizationLevel?: string | null
}
>
probeModel?: string
detail?: string
}>
>
codexSyncRead?: () => unknown
codexAsyncRead?: () => Promise<unknown>
updateProviderProfile?: (...args: unknown[]) => unknown
@@ -193,12 +189,8 @@ function mockProviderManagerDependencies(
})
mock.module('../utils/providerDiscovery.js', () => ({
probeOllamaGenerationReadiness:
options?.probeOllamaGenerationReadiness ??
(async () => ({
state: 'unreachable' as const,
models: [],
})),
hasLocalOllama: options?.hasLocalOllama ?? (async () => false),
listOllamaModels: options?.listOllamaModels ?? (async () => []),
}))
mock.module('../utils/githubModelsCredentials.js', () => ({
@@ -463,9 +455,8 @@ test('ProviderManager first-run Ollama preset auto-detects installed models', as
async () => undefined,
{
addProviderProfile,
probeOllamaGenerationReadiness: async () => ({
state: 'ready',
models: [
hasLocalOllama: async () => true,
listOllamaModels: async () => [
{
name: 'gemma4:31b-cloud',
family: 'gemma',
@@ -477,8 +468,6 @@ test('ProviderManager first-run Ollama preset auto-detects installed models', as
parameterSize: '2.5b',
},
],
probeModel: 'gemma4:31b-cloud',
}),
},
)

View File

@@ -37,14 +37,13 @@ import {
readGithubModelsTokenAsync,
} from '../utils/githubModelsCredentials.js'
import {
probeOllamaGenerationReadiness,
type OllamaGenerationReadiness,
hasLocalOllama,
listOllamaModels,
} from '../utils/providerDiscovery.js'
import {
rankOllamaModels,
recommendOllamaModel,
} from '../utils/providerRecommendation.js'
import { redactUrlForDisplay } from '../utils/urlRedaction.js'
import { updateSettingsForSource } from '../utils/settings/settings.js'
import {
type OptionWithDescription,
@@ -53,6 +52,7 @@ import {
import { Pane } from './design-system/Pane.js'
import TextInput from './TextInput.js'
import { useCodexOAuthFlow } from './useCodexOAuthFlow.js'
import { useSetAppState } from '../state/AppState.js'
export type ProviderManagerResult = {
action: 'saved' | 'cancelled'
@@ -222,29 +222,6 @@ function getGithubProviderSummary(
return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}`
}
function describeOllamaSelectionIssue(
readiness: OllamaGenerationReadiness,
baseUrl: string,
): string {
if (readiness.state === 'unreachable') {
return `Could not reach Ollama at ${redactUrlForDisplay(baseUrl)}. Start Ollama first, or enter the endpoint manually.`
}
if (readiness.state === 'no_models') {
return 'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.'
}
if (readiness.state === 'generation_failed') {
const modelHint = readiness.probeModel ?? 'the selected model'
const detailSuffix = readiness.detail
? ` Details: ${readiness.detail}.`
: ''
return `Ollama is reachable and models are installed, but a generation probe failed for ${modelHint}.${detailSuffix} Run "ollama run ${modelHint}" once and retry, or enter details manually.`
}
return ''
}
function findCodexOAuthProfile(
profiles: ProviderProfile[],
profileId?: string,
@@ -473,21 +450,32 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
setOllamaSelection({ state: 'loading' })
void (async () => {
const readiness = await probeOllamaGenerationReadiness({
baseUrl: draft.baseUrl,
})
if (readiness.state !== 'ready') {
const available = await hasLocalOllama(draft.baseUrl)
if (!available) {
if (!cancelled) {
setOllamaSelection({
state: 'unavailable',
message: describeOllamaSelectionIssue(readiness, draft.baseUrl),
message:
'Could not reach Ollama. Start Ollama first, or enter the endpoint manually.',
})
}
return
}
const ranked = rankOllamaModels(readiness.models, 'balanced')
const recommended = recommendOllamaModel(readiness.models, 'balanced')
const models = await listOllamaModels(draft.baseUrl)
if (models.length === 0) {
if (!cancelled) {
setOllamaSelection({
state: 'unavailable',
message:
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.',
})
}
return
}
const ranked = rankOllamaModels(models, 'balanced')
const recommended = recommendOllamaModel(models, 'balanced')
if (!cancelled) {
setOllamaSelection({
state: 'ready',

View File

@@ -53,20 +53,17 @@ describe('getProjectMemoryPathForSelector', () => {
})
test('defaults to a new AGENTS.md in the current cwd when no project file is loaded', () => {
const cwd = join('/repo', 'packages', 'app')
expect(getProjectMemoryPathForSelector([], cwd)).toBe(
join(cwd, 'AGENTS.md'),
expect(getProjectMemoryPathForSelector([], '/repo/packages/app')).toBe(
'/repo/packages/app/AGENTS.md',
)
})
test('ignores loaded project instruction files outside the current cwd ancestry', () => {
const outsideRepoPath = join('/other-worktree', 'AGENTS.md')
const cwd = join('/repo', 'packages', 'app')
expect(
getProjectMemoryPathForSelector(
[projectFile(outsideRepoPath)],
cwd,
[projectFile('/other-worktree/AGENTS.md')],
'/repo/packages/app',
),
).toBe(join(cwd, 'AGENTS.md'))
).toBe('/repo/packages/app/AGENTS.md')
})
})

View File

@@ -19,7 +19,7 @@ async function _temp() {
logForDebugging("Showing marketplace config save failure notification");
notifs.push({
key: "marketplace-config-save-failed",
jsx: <Text color="error">Failed to save marketplace retry info · Check ~/.openclaude.json permissions</Text>,
jsx: <Text color="error">Failed to save marketplace retry info · Check ~/.claude.json permissions</Text>,
priority: "immediate",
timeoutMs: 10000
});

View File

@@ -1,23 +1,34 @@
/**
* Swarm Permission Callback Registry
* Swarm Permission Poller Hook
*
* Manages callback registrations for permission requests and responses
* in agent swarms. Responses are delivered exclusively via the mailbox
* system (useInboxPoller → processMailboxPermissionResponse).
* This hook polls for permission responses from the team leader when running
* as a worker agent in a swarm. When a response is received, it calls the
* appropriate callback (onAllow/onReject) to continue execution.
*
* The legacy file-based polling (resolved/ directory) has been removed
* because it created an unauthenticated attack surface — any local process
* could forge approval files. The mailbox path is the sole active channel.
* This hook should be used in conjunction with the worker-side integration
* in useCanUseTool.ts, which creates pending requests that this hook monitors.
*/
import { useCallback, useEffect, useRef } from 'react'
import { useInterval } from 'usehooks-ts'
import { logForDebugging } from '../utils/debug.js'
import { errorMessage } from '../utils/errors.js'
import {
type PermissionUpdate,
permissionUpdateSchema,
} from '../utils/permissions/PermissionUpdateSchema.js'
import {
isSwarmWorker,
type PermissionResponse,
pollForResponse,
removeWorkerResponse,
} from '../utils/swarm/permissionSync.js'
import { getAgentName, getTeamName } from '../utils/teammate.js'
const POLL_INTERVAL_MS = 500
/**
* Validate permissionUpdates from external sources (mailbox IPC).
* Validate permissionUpdates from external sources (mailbox IPC, disk polling).
* Malformed entries from buggy/old teammate processes are filtered out rather
* than propagated unchecked into callback.onAllow().
*/
@@ -214,9 +225,106 @@ export function processSandboxPermissionResponse(params: {
return true
}
// Legacy file-based polling (useSwarmPermissionPoller, processResponse)
// has been removed. Permission responses are now delivered exclusively
// via the mailbox system:
// Leader: sendPermissionResponseViaMailbox() → writeToMailbox()
// Worker: useInboxPoller → processMailboxPermissionResponse()
// See: fix(security) — remove unauthenticated file-based permission channel
/**
* Process a permission response by invoking the registered callback
*/
function processResponse(response: PermissionResponse): boolean {
const callback = pendingCallbacks.get(response.requestId)
if (!callback) {
logForDebugging(
`[SwarmPermissionPoller] No callback registered for request ${response.requestId}`,
)
return false
}
logForDebugging(
`[SwarmPermissionPoller] Processing response for request ${response.requestId}: ${response.decision}`,
)
// Remove from registry before invoking callback
pendingCallbacks.delete(response.requestId)
if (response.decision === 'approved') {
const permissionUpdates = parsePermissionUpdates(response.permissionUpdates)
const updatedInput = response.updatedInput
callback.onAllow(updatedInput, permissionUpdates)
} else {
callback.onReject(response.feedback)
}
return true
}
/**
* Hook that polls for permission responses when running as a swarm worker.
*
* This hook:
* 1. Only activates when isSwarmWorker() returns true
* 2. Polls every 500ms for responses
* 3. When a response is found, invokes the registered callback
* 4. Cleans up the response file after processing
*/
export function useSwarmPermissionPoller(): void {
const isProcessingRef = useRef(false)
const poll = useCallback(async () => {
// Don't poll if not a swarm worker
if (!isSwarmWorker()) {
return
}
// Prevent concurrent polling
if (isProcessingRef.current) {
return
}
// Don't poll if no callbacks are registered
if (pendingCallbacks.size === 0) {
return
}
isProcessingRef.current = true
try {
const agentName = getAgentName()
const teamName = getTeamName()
if (!agentName || !teamName) {
return
}
// Check each pending request for a response
for (const [requestId, _callback] of pendingCallbacks) {
const response = await pollForResponse(requestId, agentName, teamName)
if (response) {
// Process the response
const processed = processResponse(response)
if (processed) {
// Clean up the response from the worker's inbox
await removeWorkerResponse(requestId, agentName, teamName)
}
}
}
} catch (error) {
logForDebugging(
`[SwarmPermissionPoller] Error during poll: ${errorMessage(error)}`,
)
} finally {
isProcessingRef.current = false
}
}, [])
// Only poll if we're a swarm worker
const shouldPoll = isSwarmWorker()
useInterval(() => void poll(), shouldPoll ? POLL_INTERVAL_MS : null)
// Initial poll on mount
useEffect(() => {
if (isSwarmWorker()) {
void poll()
}
}, [poll])
}

View File

@@ -11,16 +11,14 @@ const execFileNoThrowMock = mock(
async () => ({ code: 0, stdout: '', stderr: '' }),
)
function installOscMocks(): void {
mock.module('../../utils/execFileNoThrow.js', () => ({
mock.module('../../utils/execFileNoThrow.js', () => ({
execFileNoThrow: execFileNoThrowMock,
execFileNoThrowWithCwd: execFileNoThrowMock,
}))
}))
mock.module('../../utils/tempfile.js', () => ({
mock.module('../../utils/tempfile.js', () => ({
generateTempFilePath: generateTempFilePathMock,
}))
}
}))
async function importFreshOscModule() {
return import(`./osc.ts?ts=${Date.now()}-${Math.random()}`)
@@ -47,7 +45,6 @@ async function waitForExecCall(
describe('Windows clipboard fallback', () => {
beforeEach(() => {
installOscMocks()
execFileNoThrowMock.mockClear()
generateTempFilePathMock.mockClear()
process.env = { ...originalEnv }
@@ -65,12 +62,14 @@ describe('Windows clipboard fallback', () => {
const { setClipboard } = await importFreshOscModule()
await setClipboard('Привет мир')
const windowsCall = await waitForExecCall('powershell')
await flushClipboardCopy()
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'clip')).toBe(
false,
)
expect(windowsCall).toBeDefined()
expect(
execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'powershell'),
).toBe(true)
})
test('passes Windows clipboard text through a UTF-8 temp file instead of stdin', async () => {
@@ -98,7 +97,6 @@ describe('Windows clipboard fallback', () => {
describe('clipboard path behavior remains stable', () => {
beforeEach(() => {
installOscMocks()
execFileNoThrowMock.mockClear()
process.env = { ...originalEnv }
delete process.env['SSH_CONNECTION']

View File

@@ -12,7 +12,7 @@ import {
* One-shot migration: clear skipAutoPermissionPrompt for users who accepted
* the old 2-option AutoModeOptInDialog but don't have auto as their default.
* Re-surfaces the dialog so they see the new "make it my default mode" option.
* Guard lives in GlobalConfig (~/.openclaude.json), not settings.json, so it
* Guard lives in GlobalConfig (~/.claude.json), not settings.json, so it
* survives settings resets and doesn't re-arm itself.
*
* Only runs when tengu_auto_mode_config.enabled === 'enabled'. For 'opt-in'

View File

@@ -3873,7 +3873,7 @@ export function REPL({
// empty to non-empty, not on every length change -- otherwise a render loop
// (concurrent onQuery thrashing, etc.) spams saveGlobalConfig, which hits
// ELOCKED under concurrent sessions and falls back to unlocked writes.
// That write storm is the primary trigger for ~/.openclaude.json corruption
// That write storm is the primary trigger for ~/.claude.json corruption
// (GH #3117).
const hasCountedQueueUseRef = useRef(false);
useEffect(() => {

View File

@@ -334,7 +334,7 @@ async function processRemoteEvalPayload(
// Empty object is truthy — without the length check, `{features: {}}`
// (transient server bug, truncated response) would pass, clear the maps
// below, return true, and syncRemoteEvalToDisk would wholesale-write `{}`
// to disk: total flag blackout for every process sharing ~/.openclaude.json.
// to disk: total flag blackout for every process sharing ~/.claude.json.
if (!payload?.features || Object.keys(payload.features).length === 0) {
return false
}

View File

@@ -23,7 +23,6 @@ import { randomUUID } from 'crypto'
import {
getAPIProvider,
isFirstPartyAnthropicBaseUrl,
isGithubNativeAnthropicMode,
} from 'src/utils/model/providers.js'
import {
getAttributionHeader,
@@ -335,13 +334,8 @@ export function getPromptCachingEnabled(model: string): boolean {
// Prompt caching is an Anthropic-specific feature. Third-party providers
// do not understand cache_control blocks and strict backends (e.g. Azure
// Foundry) reject or flag requests that contain them.
//
// Exception: when the GitHub provider is configured in native Anthropic API
// mode (CLAUDE_CODE_GITHUB_ANTHROPIC_API=1), requests are sent in Anthropic
// format, so cache_control blocks are supported.
const provider = getAPIProvider()
const isNativeGithub = isGithubNativeAnthropicMode(model)
if (provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex' && !isNativeGithub) {
if (provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex') {
return false
}

View File

@@ -14,7 +14,6 @@ import { getSmallFastModel } from 'src/utils/model/model.js'
import {
getAPIProvider,
isFirstPartyAnthropicBaseUrl,
isGithubNativeAnthropicMode,
} from 'src/utils/model/providers.js'
import { getProxyFetchOptions } from 'src/utils/proxy.js'
import {
@@ -175,25 +174,6 @@ export async function getAnthropicClient({
providerOverride,
}) as unknown as Anthropic
}
// GitHub provider in native Anthropic API mode: send requests in Anthropic
// format so cache_control blocks are honoured and prompt caching works.
// Requires the GitHub endpoint (OPENAI_BASE_URL) to support Anthropic's
// messages API — set CLAUDE_CODE_GITHUB_ANTHROPIC_API=1 to opt in.
if (isGithubNativeAnthropicMode(model)) {
const githubBaseUrl =
process.env.OPENAI_BASE_URL?.replace(/\/$/, '') ??
'https://api.githubcopilot.com'
const githubToken =
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? ''
const nativeArgs: ConstructorParameters<typeof Anthropic>[0] = {
...ARGS,
baseURL: githubBaseUrl,
authToken: githubToken,
// No apiKey — we authenticate via Bearer token (authToken)
apiKey: null,
}
return new Anthropic(nativeArgs)
}
if (
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||

View File

@@ -320,7 +320,10 @@ export function classifyOpenAIHttpFailure(options: {
}
}
if (options.status >= 400 && isMalformedProviderResponse(body)) {
if (
(options.status >= 200 && options.status < 300 && isMalformedProviderResponse(body)) ||
(options.status >= 400 && isMalformedProviderResponse(body))
) {
return {
source: 'http',
category: 'malformed_provider_response',

View File

@@ -117,170 +117,3 @@ test('redacts credentials in transport diagnostic URL logs', async () => {
expect(logLine).not.toContain('user:supersecret')
expect(logLine).not.toContain('supersecret@')
})
test('logs self-heal localhost fallback with redacted from/to URLs', async () => {
const debugSpy = mock(() => {})
mock.module('../../utils/debug.js', () => ({
logForDebugging: debugSpy,
}))
const nonce = `${Date.now()}-${Math.random()}`
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
process.env.OPENAI_BASE_URL = 'http://user:supersecret@localhost:11434/v1'
process.env.OPENAI_API_KEY = 'supersecret'
globalThis.fetch = mock(async (input: string | Request) => {
const url = typeof input === 'string' ? input : input.url
if (url.includes('localhost')) {
throw Object.assign(new TypeError('fetch failed'), {
code: 'ENOTFOUND',
})
}
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'qwen2.5-coder:7b',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 5,
completion_tokens: 2,
total_tokens: 7,
},
}),
{
status: 200,
headers: {
'Content-Type': 'application/json',
},
},
)
}) as typeof globalThis.fetch
const client = createOpenAIShimClient({}) as {
beta: {
messages: {
create: (params: Record<string, unknown>) => Promise<unknown>
}
}
}
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
}),
).resolves.toBeDefined()
const fallbackLog = debugSpy.mock.calls.find(call =>
typeof call?.[0] === 'string' &&
call[0].includes('self-heal retry reason=localhost_resolution_failed'),
)
expect(fallbackLog).toBeDefined()
const logLine = String(fallbackLog?.[0])
expect(logLine).toContain('from=http://redacted:redacted@localhost:11434/v1/chat/completions')
expect(logLine).toContain('to=http://redacted:redacted@127.0.0.1:11434/v1/chat/completions')
expect(logLine).not.toContain('supersecret')
})
test('logs self-heal toolless retry for local tool-call incompatibility', async () => {
const debugSpy = mock(() => {})
mock.module('../../utils/debug.js', () => ({
logForDebugging: debugSpy,
}))
const nonce = `${Date.now()}-${Math.random()}`
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_API_KEY = 'ollama'
let callCount = 0
globalThis.fetch = mock(async () => {
callCount += 1
if (callCount === 1) {
return new Response('tool_calls are not supported', {
status: 400,
headers: {
'Content-Type': 'text/plain',
},
})
}
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'qwen2.5-coder:7b',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 7,
completion_tokens: 3,
total_tokens: 10,
},
}),
{
status: 200,
headers: {
'Content-Type': 'application/json',
},
},
)
}) as typeof globalThis.fetch
const client = createOpenAIShimClient({}) as {
beta: {
messages: {
create: (params: Record<string, unknown>) => Promise<unknown>
}
}
}
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
tools: [
{
name: 'Read',
description: 'Read file',
input_schema: {
type: 'object',
properties: {
filePath: { type: 'string' },
},
required: ['filePath'],
},
},
],
max_tokens: 64,
stream: false,
}),
).resolves.toBeDefined()
const fallbackLog = debugSpy.mock.calls.find(call =>
typeof call?.[0] === 'string' &&
call[0].includes('self-heal retry reason=tool_call_incompatible mode=toolless'),
)
expect(fallbackLog).toBeDefined()
expect(fallbackLog?.[1]).toEqual({ level: 'warn' })
})

View File

@@ -2931,204 +2931,6 @@ test('classifies chat-completions endpoint 404 failures with endpoint_not_found
}),
).rejects.toThrow('openai_category=endpoint_not_found')
})
test('self-heals localhost resolution failures by retrying local loopback base URL', async () => {
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
const requestUrls: string[] = []
globalThis.fetch = (async (input, _init) => {
const url = typeof input === 'string' ? input : input.url
requestUrls.push(url)
if (url.includes('localhost')) {
const error = Object.assign(new TypeError('fetch failed'), {
code: 'ENOTFOUND',
})
throw error
}
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'qwen2.5-coder:7b',
choices: [
{
message: {
role: 'assistant',
content: 'hello from loopback',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 4,
completion_tokens: 3,
total_tokens: 7,
},
}),
{
status: 200,
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
}),
).resolves.toBeDefined()
expect(requestUrls[0]).toBe('http://localhost:11434/v1/chat/completions')
expect(requestUrls).toContain('http://127.0.0.1:11434/v1/chat/completions')
})
test('self-heals local endpoint_not_found by retrying with /v1 base URL', async () => {
process.env.OPENAI_BASE_URL = 'http://localhost:11434'
const requestUrls: string[] = []
globalThis.fetch = (async (input, _init) => {
const url = typeof input === 'string' ? input : input.url
requestUrls.push(url)
if (url === 'http://localhost:11434/chat/completions') {
return new Response('Not Found', {
status: 404,
headers: {
'Content-Type': 'text/plain',
},
})
}
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'qwen2.5-coder:7b',
choices: [
{
message: {
role: 'assistant',
content: 'hello from /v1',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 5,
completion_tokens: 2,
total_tokens: 7,
},
}),
{
status: 200,
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
}),
).resolves.toBeDefined()
expect(requestUrls).toEqual([
'http://localhost:11434/chat/completions',
'http://localhost:11434/v1/chat/completions',
])
})
test('self-heals tool-call incompatibility by retrying local Ollama requests without tools', async () => {
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
const requestBodies: Array<Record<string, unknown>> = []
globalThis.fetch = (async (_input, init) => {
const requestBody = JSON.parse(String(init?.body)) as Record<string, unknown>
requestBodies.push(requestBody)
if (requestBodies.length === 1) {
return new Response('tool_calls are not supported', {
status: 400,
headers: {
'Content-Type': 'text/plain',
},
})
}
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'qwen2.5-coder:7b',
choices: [
{
message: {
role: 'assistant',
content: 'fallback without tools',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 8,
completion_tokens: 4,
total_tokens: 12,
},
}),
{
status: 200,
headers: {
'Content-Type': 'application/json',
},
},
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
tools: [
{
name: 'Read',
description: 'Read a file',
input_schema: {
type: 'object',
properties: {
filePath: { type: 'string' },
},
required: ['filePath'],
},
},
],
max_tokens: 64,
stream: false,
}),
).resolves.toBeDefined()
expect(requestBodies).toHaveLength(2)
expect(Array.isArray(requestBodies[0]?.tools)).toBe(true)
expect(requestBodies[0]?.tool_choice).toBeUndefined()
expect(
requestBodies[1]?.tools === undefined ||
(Array.isArray(requestBodies[1]?.tools) && requestBodies[1]?.tools.length === 0),
).toBe(true)
expect(requestBodies[1]?.tool_choice).toBeUndefined()
})
test('preserves valid tool_result and drops orphan tool_result', async () => {
let requestBody: Record<string, unknown> | undefined
@@ -3197,7 +2999,7 @@ test('preserves valid tool_result and drops orphan tool_result', async () => {
{
role: 'user',
content: 'What happened?',
},
}
],
},
],
@@ -3217,3 +3019,123 @@ test('preserves valid tool_result and drops orphan tool_result', async () => {
const orphanMessage = toolMessages.find(m => m.tool_call_id === 'orphan_call_2')
expect(orphanMessage).toBeUndefined()
})
test('request body does not contain store field for local providers', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
object: 'chat.completion',
model: 'test-model',
choices: [{ index: 0, message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 10, completion_tokens: 2, total_tokens: 12 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}) as FetchType
const client = createOpenAIShimClient({ defaultHeaders: {} }) as unknown as OpenAIShimClient
await client.beta.messages.create({
model: 'some-model',
messages: [{ role: 'user', content: [{ type: 'text', text: 'hi' }] }],
max_tokens: 64,
stream: false,
})
expect(requestBody).toBeDefined()
expect('store' in requestBody!).toBe(false)
})
test('preserves reasoning_content on assistant messages with tool_calls during replay', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
object: 'chat.completion',
model: 'test-model',
choices: [{ index: 0, message: { role: 'assistant', content: 'done' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 10, completion_tokens: 2, total_tokens: 12 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}) as FetchType
const client = createOpenAIShimClient({ defaultHeaders: {} }) as unknown as OpenAIShimClient
await client.beta.messages.create({
model: 'kimi-k2.5',
messages: [
{ role: 'user', content: [{ type: 'text', text: 'read file' }] },
{
role: 'assistant',
content: [
{ type: 'thinking', thinking: 'I should use the read tool' },
{ type: 'tool_use', id: 'call_1', name: 'Read', input: { file_path: 'test.ts' } },
],
},
{
role: 'user',
content: [
{ type: 'tool_result', tool_use_id: 'call_1', content: 'file contents here' },
],
},
],
max_tokens: 64,
stream: false,
})
const messages = requestBody?.messages as Array<Record<string, unknown>>
const assistantMsg = messages.find(m => m.role === 'assistant' && m.tool_calls)
expect(assistantMsg).toBeDefined()
expect(assistantMsg!.reasoning_content).toBe('I should use the read tool')
})
test('does not add reasoning_content on assistant messages without tool_calls', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
object: 'chat.completion',
model: 'test-model',
choices: [{ index: 0, message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 10, completion_tokens: 2, total_tokens: 12 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}) as FetchType
const client = createOpenAIShimClient({ defaultHeaders: {} }) as unknown as OpenAIShimClient
await client.beta.messages.create({
model: 'deepseek-reasoner',
messages: [
{ role: 'user', content: [{ type: 'text', text: 'explain' }] },
{
role: 'assistant',
content: [
{ type: 'thinking', thinking: 'Let me think about this' },
{ type: 'text', text: 'Here is the explanation' },
],
},
{ role: 'user', content: [{ type: 'text', text: 'thanks' }] },
],
max_tokens: 64,
stream: false,
})
const messages = requestBody?.messages as Array<Record<string, unknown>>
const assistantMsg = messages.find(m => m.role === 'assistant' && !m.tool_calls)
expect(assistantMsg).toBeDefined()
expect(assistantMsg!.reasoning_content).toBeUndefined()
})

View File

@@ -48,12 +48,10 @@ import {
} from './codexShim.js'
import { fetchWithProxyRetry } from './fetchWithProxyRetry.js'
import {
getLocalProviderRetryBaseUrls,
getGithubEndpointType,
isLocalProviderUrl,
resolveRuntimeCodexCredentials,
resolveProviderRequest,
shouldAttemptLocalToollessRetry,
getGithubEndpointType,
} from './providerConfig.js'
import {
buildOpenAICompatibilityErrorMessage,
@@ -194,6 +192,7 @@ function sleepMs(ms: number): Promise<void> {
interface OpenAIMessage {
role: 'system' | 'user' | 'assistant' | 'tool'
content?: string | Array<{ type: string; text?: string; image_url?: { url: string } }>
reasoning_content?: string
tool_calls?: Array<{
id: string
type: 'function'
@@ -418,6 +417,16 @@ function convertMessages(
}
if (toolUses.length > 0) {
// Preserve thinking text as reasoning_content for providers that
// require it on replayed assistant tool-call messages (e.g. Kimi,
// DeepSeek). Without this, follow-up requests fail with 400:
// "reasoning_content is missing in assistant tool call message".
// Note: only the first thinking block per turn is captured (.find);
// Anthropic's API typically produces one thinking block per turn.
if (thinkingBlock) {
assistantMsg.reasoning_content = (thinkingBlock as { thinking?: string }).thinking ?? ''
}
assistantMsg.tool_calls = toolUses.map(
(tu: {
id?: string
@@ -1347,9 +1356,10 @@ class OpenAIShimMessages {
delete body.max_completion_tokens
}
// mistral and gemini don't recognize body.store — Gemini returns 400
// "Invalid JSON payload received. Unknown name 'store': Cannot find field."
if (isMistral || isGeminiMode()) {
// Strip store for providers that don't recognize it. Only OpenAI's own
// API supports this field — Gemini returns 400, local servers (vLLM,
// Ollama) reject unknown fields, and other providers silently ignore it.
if (isMistral || isGeminiMode() || isLocal) {
delete body.store
}
@@ -1429,93 +1439,46 @@ class OpenAIShimMessages {
headers['X-GitHub-Api-Version'] = '2022-11-28'
}
const buildChatCompletionsUrl = (baseUrl: string): string => {
// Azure Cognitive Services / Azure OpenAI require a deployment-specific
// path and an api-version query parameter.
// Build the chat completions URL
// Azure Cognitive Services / Azure OpenAI require a deployment-specific path
// and an api-version query parameter.
// Standard format: {base}/openai/deployments/{model}/chat/completions?api-version={version}
// Non-Azure: {base}/chat/completions
let chatCompletionsUrl: string
if (isAzure) {
const apiVersion = process.env.AZURE_OPENAI_API_VERSION ?? '2024-12-01-preview'
const deployment = request.resolvedModel ?? process.env.OPENAI_MODEL ?? 'gpt-4o'
// If base URL already contains /deployments/, use it as-is with api-version.
if (/\/deployments\//i.test(baseUrl)) {
const normalizedBase = baseUrl.replace(/\/+$/, '')
return `${normalizedBase}/chat/completions?api-version=${apiVersion}`
// If base URL already contains /deployments/, use it as-is with api-version
if (/\/deployments\//i.test(request.baseUrl)) {
const base = request.baseUrl.replace(/\/+$/, '')
chatCompletionsUrl = `${base}/chat/completions?api-version=${apiVersion}`
} else {
// Strip trailing /v1 or /openai/v1 if present, then build Azure path
const base = request.baseUrl.replace(/\/(openai\/)?v1\/?$/, '').replace(/\/+$/, '')
chatCompletionsUrl = `${base}/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`
}
} else {
chatCompletionsUrl = `${request.baseUrl}/chat/completions`
}
// Strip trailing /v1 or /openai/v1 if present, then build Azure path.
const normalizedBase = baseUrl
.replace(/\/(openai\/)?v1\/?$/, '')
.replace(/\/+$/, '')
return `${normalizedBase}/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`
}
return `${baseUrl}/chat/completions`
}
const localRetryBaseUrls = isLocal
? getLocalProviderRetryBaseUrls(request.baseUrl)
: []
let activeBaseUrl = request.baseUrl
let chatCompletionsUrl = buildChatCompletionsUrl(activeBaseUrl)
const attemptedLocalBaseUrls = new Set<string>([activeBaseUrl])
let didRetryWithoutTools = false
const promoteNextLocalBaseUrl = (
reason: 'endpoint_not_found' | 'localhost_resolution_failed',
): boolean => {
for (const candidateBaseUrl of localRetryBaseUrls) {
if (attemptedLocalBaseUrls.has(candidateBaseUrl)) {
continue
}
const previousUrl = chatCompletionsUrl
attemptedLocalBaseUrls.add(candidateBaseUrl)
activeBaseUrl = candidateBaseUrl
chatCompletionsUrl = buildChatCompletionsUrl(activeBaseUrl)
logForDebugging(
`[OpenAIShim] self-heal retry reason=${reason} method=POST from=${redactUrlForDiagnostics(previousUrl)} to=${redactUrlForDiagnostics(chatCompletionsUrl)} model=${request.resolvedModel}`,
{ level: 'warn' },
)
return true
}
return false
}
let serializedBody = JSON.stringify(body)
const refreshSerializedBody = (): void => {
serializedBody = JSON.stringify(body)
}
const buildFetchInit = () => ({
const fetchInit = {
method: 'POST' as const,
headers,
body: serializedBody,
body: JSON.stringify(body),
signal: options?.signal,
})
}
const maxSelfHealAttempts = isLocal
? localRetryBaseUrls.length + 1
: 0
const maxAttempts = (isGithub ? GITHUB_429_MAX_RETRIES : 1) + maxSelfHealAttempts
const maxAttempts = isGithub ? GITHUB_429_MAX_RETRIES : 1
const throwClassifiedTransportError = (
error: unknown,
requestUrl: string,
preclassifiedFailure?: ReturnType<typeof classifyOpenAINetworkFailure>,
): never => {
if (options?.signal?.aborted) {
throw error
}
const failure =
preclassifiedFailure ??
classifyOpenAINetworkFailure(error, {
const failure = classifyOpenAINetworkFailure(error, {
url: requestUrl,
})
const redactedUrl = redactUrlForDiagnostics(requestUrl)
@@ -1548,11 +1511,8 @@ class OpenAIShimMessages {
responseHeaders: Headers,
requestUrl: string,
rateHint = '',
preclassifiedFailure?: ReturnType<typeof classifyOpenAIHttpFailure>,
): never => {
const failure =
preclassifiedFailure ??
classifyOpenAIHttpFailure({
const failure = classifyOpenAIHttpFailure({
status,
body: errorBody,
})
@@ -1577,13 +1537,10 @@ class OpenAIShimMessages {
let response: Response | undefined
for (let attempt = 0; attempt < maxAttempts; attempt++) {
try {
response = await fetchWithProxyRetry(
chatCompletionsUrl,
buildFetchInit(),
)
response = await fetchWithProxyRetry(chatCompletionsUrl, fetchInit)
} catch (error) {
const isAbortError =
options?.signal?.aborted === true ||
fetchInit.signal?.aborted === true ||
(typeof DOMException !== 'undefined' &&
error instanceof DOMException &&
error.name === 'AbortError') ||
@@ -1596,19 +1553,7 @@ class OpenAIShimMessages {
throw error
}
const failure = classifyOpenAINetworkFailure(error, {
url: chatCompletionsUrl,
})
if (
isLocal &&
failure.category === 'localhost_resolution_failed' &&
promoteNextLocalBaseUrl('localhost_resolution_failed')
) {
continue
}
throwClassifiedTransportError(error, chatCompletionsUrl, failure)
throwClassifiedTransportError(error, chatCompletionsUrl)
}
if (response.ok) {
@@ -1700,10 +1645,6 @@ class OpenAIShimMessages {
return responsesResponse
}
const responsesErrorBody = await responsesResponse.text().catch(() => 'unknown error')
const responsesFailure = classifyOpenAIHttpFailure({
status: responsesResponse.status,
body: responsesErrorBody,
})
let responsesErrorResponse: object | undefined
try { responsesErrorResponse = JSON.parse(responsesErrorBody) } catch { /* raw text */ }
throwClassifiedHttpError(
@@ -1712,49 +1653,10 @@ class OpenAIShimMessages {
responsesErrorResponse,
responsesResponse.headers,
responsesUrl,
'',
responsesFailure,
)
}
}
const failure = classifyOpenAIHttpFailure({
status: response.status,
body: errorBody,
})
if (
isLocal &&
failure.category === 'endpoint_not_found' &&
promoteNextLocalBaseUrl('endpoint_not_found')
) {
continue
}
const hasToolsPayload =
Array.isArray(body.tools) &&
body.tools.length > 0
if (
!didRetryWithoutTools &&
failure.category === 'tool_call_incompatible' &&
shouldAttemptLocalToollessRetry({
baseUrl: activeBaseUrl,
hasTools: hasToolsPayload,
})
) {
didRetryWithoutTools = true
delete body.tools
delete body.tool_choice
refreshSerializedBody()
logForDebugging(
`[OpenAIShim] self-heal retry reason=tool_call_incompatible mode=toolless method=POST url=${redactUrlForDiagnostics(chatCompletionsUrl)} model=${request.resolvedModel}`,
{ level: 'warn' },
)
continue
}
let errorResponse: object | undefined
try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ }
throwClassifiedHttpError(
@@ -1764,7 +1666,6 @@ class OpenAIShimMessages {
response.headers as unknown as Headers,
chatCompletionsUrl,
rateHint,
failure,
)
}

View File

@@ -2,10 +2,8 @@ import { afterEach, expect, test } from 'bun:test'
import {
getAdditionalModelOptionsCacheScope,
getLocalProviderRetryBaseUrls,
isLocalProviderUrl,
resolveProviderRequest,
shouldAttemptLocalToollessRetry,
} from './providerConfig.js'
const originalEnv = {
@@ -85,42 +83,3 @@ test('skips local model cache scope for remote openai-compatible providers', ()
expect(getAdditionalModelOptionsCacheScope()).toBeNull()
})
test('derives local retry base URLs with /v1 and loopback fallback candidates', () => {
expect(getLocalProviderRetryBaseUrls('http://localhost:11434')).toEqual([
'http://localhost:11434/v1',
'http://127.0.0.1:11434',
'http://127.0.0.1:11434/v1',
])
})
test('does not derive local retry base URLs for remote providers', () => {
expect(getLocalProviderRetryBaseUrls('https://api.openai.com/v1')).toEqual([])
})
test('enables local toolless retry for likely Ollama endpoints with tools', () => {
expect(
shouldAttemptLocalToollessRetry({
baseUrl: 'http://localhost:11434/v1',
hasTools: true,
}),
).toBe(true)
})
test('disables local toolless retry when no tools are present', () => {
expect(
shouldAttemptLocalToollessRetry({
baseUrl: 'http://localhost:11434/v1',
hasTools: false,
}),
).toBe(false)
})
test('disables local toolless retry for non-Ollama local endpoints', () => {
expect(
shouldAttemptLocalToollessRetry({
baseUrl: 'http://localhost:1234/v1',
hasTools: true,
}),
).toBe(false)
})

View File

@@ -305,101 +305,6 @@ export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
}
}
function trimTrailingSlash(value: string): string {
return value.replace(/\/+$/, '')
}
function normalizePathWithV1(pathname: string): string {
const trimmed = trimTrailingSlash(pathname)
if (!trimmed || trimmed === '/') {
return '/v1'
}
if (trimmed.toLowerCase().endsWith('/v1')) {
return trimmed
}
return `${trimmed}/v1`
}
function isLikelyOllamaEndpoint(baseUrl: string): boolean {
try {
const parsed = new URL(baseUrl)
const hostname = parsed.hostname.toLowerCase()
const pathname = parsed.pathname.toLowerCase()
if (parsed.port === '11434') {
return true
}
return (
hostname.includes('ollama') ||
pathname.includes('ollama')
)
} catch {
return false
}
}
export function getLocalProviderRetryBaseUrls(baseUrl: string): string[] {
if (!isLocalProviderUrl(baseUrl)) {
return []
}
try {
const parsed = new URL(baseUrl)
const original = trimTrailingSlash(parsed.toString())
const seen = new Set<string>([original])
const candidates: string[] = []
const addCandidate = (hostname: string, pathname: string): void => {
const next = new URL(parsed.toString())
next.hostname = hostname
next.pathname = pathname
next.search = ''
next.hash = ''
const normalized = trimTrailingSlash(next.toString())
if (seen.has(normalized)) {
return
}
seen.add(normalized)
candidates.push(normalized)
}
const v1Pathname = normalizePathWithV1(parsed.pathname)
if (v1Pathname !== trimTrailingSlash(parsed.pathname)) {
addCandidate(parsed.hostname, v1Pathname)
}
const hostname = parsed.hostname.toLowerCase().replace(/^\[|\]$/g, '')
if (hostname === 'localhost' || hostname === '::1') {
addCandidate('127.0.0.1', parsed.pathname || '/')
addCandidate('127.0.0.1', v1Pathname)
}
return candidates
} catch {
return []
}
}
export function shouldAttemptLocalToollessRetry(options: {
baseUrl: string
hasTools: boolean
}): boolean {
if (!options.hasTools) {
return false
}
if (!isLocalProviderUrl(options.baseUrl)) {
return false
}
return isLikelyOllamaEndpoint(options.baseUrl)
}
export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
if (!baseUrl) return false
try {
@@ -507,9 +412,6 @@ export function resolveProviderRequest(options?: {
? normalizedGeminiEnvBaseUrl
: asNamedEnvUrl(process.env.OPENAI_BASE_URL, 'OPENAI_BASE_URL')
// In Mistral mode, a literal "undefined" MISTRAL_BASE_URL is treated as
// misconfiguration and falls back to OPENAI_API_BASE, then
// DEFAULT_MISTRAL_BASE_URL for a safe default endpoint.
const fallbackEnvBaseUrl = isMistralMode
? (primaryEnvBaseUrl === undefined
? asNamedEnvUrl(process.env.OPENAI_API_BASE, 'OPENAI_API_BASE') ?? DEFAULT_MISTRAL_BASE_URL

View File

@@ -70,7 +70,7 @@ describe('runAutoFixCheck', () => {
test('handles timeout gracefully', async () => {
const result = await runAutoFixCheck({
lint: 'node -e "setTimeout(() => {}, 10000)"',
lint: 'sleep 10',
timeout: 100,
cwd: '/tmp',

View File

@@ -46,31 +46,14 @@ async function runCommand(
const killTree = () => {
try {
if (isWindows && proc.pid) {
// shell=true on Windows can leave child commands running unless we
// terminate the full process tree.
const killer = spawn('taskkill', ['/pid', String(proc.pid), '/T', '/F'], {
windowsHide: true,
stdio: 'ignore',
})
killer.unref()
return
}
if (proc.pid) {
if (!isWindows && proc.pid) {
// Kill the entire process group
process.kill(-proc.pid, 'SIGTERM')
return
}
} else {
proc.kill('SIGTERM')
} catch {
// Process may have already exited; fallback to direct child kill.
try {
proc.kill('SIGTERM')
} catch {
// Ignore final fallback errors.
}
} catch {
// Process may have already exited
}
}

View File

@@ -2524,7 +2524,7 @@ export async function transformResultContent(
return [
{
type: 'text',
text: recursivelySanitizeUnicode(resultContent.text) as string,
text: resultContent.text,
},
]
case 'audio': {
@@ -2569,9 +2569,7 @@ export async function transformResultContent(
return [
{
type: 'text',
text: recursivelySanitizeUnicode(
`${prefix}${resource.text}`,
) as string,
text: `${prefix}${resource.text}`,
},
]
} else if ('blob' in resource) {

View File

@@ -26,10 +26,10 @@ test('initializeWiki creates the expected wiki scaffold', async () => {
expect(result.alreadyExisted).toBe(false)
expect(result.createdFiles).toEqual([
join('.openclaude', 'wiki', 'schema.md'),
join('.openclaude', 'wiki', 'index.md'),
join('.openclaude', 'wiki', 'log.md'),
join('.openclaude', 'wiki', 'pages', 'architecture.md'),
'.openclaude/wiki/schema.md',
'.openclaude/wiki/index.md',
'.openclaude/wiki/log.md',
'.openclaude/wiki/pages/architecture.md',
])
expect(await readFile(paths.schemaFile, 'utf8')).toContain(
'# OpenClaude Wiki Schema',

View File

@@ -59,7 +59,7 @@ export function generatePrompt(): string {
## Configurable settings list
The following settings are available for you to change:
### Global Settings (stored in ~/.openclaude.json)
### Global Settings (stored in ~/.claude.json)
${globalSettings.join('\n')}
### Project Settings (stored in settings.json)

View File

@@ -15,7 +15,6 @@ import {
} from '../../utils/mcpOutputStorage.js'
import { getSettings_DEPRECATED } from '../../utils/settings/settings.js'
import { asSystemPrompt } from '../../utils/systemPromptType.js'
import { ssrfGuardedLookup } from '../../utils/hooks/ssrfGuard.js'
import { isPreapprovedHost } from './preapproved.js'
import { makeSecondaryModelPrompt } from './prompt.js'
@@ -282,7 +281,6 @@ export async function getWithPermittedRedirects(
maxRedirects: 0,
responseType: 'arraybuffer',
maxContentLength: MAX_HTTP_CONTENT_LENGTH,
lookup: ssrfGuardedLookup,
headers: {
Accept: 'text/markdown, text/html, */*',
'User-Agent': getWebFetchUserAgent(),

View File

@@ -693,7 +693,7 @@ export function refreshAwsAuth(awsAuthRefresh: string): Promise<boolean> {
'AWS auth refresh timed out after 3 minutes. Run your auth command manually in a separate terminal.',
)
: chalk.red(
'Error running awsAuthRefresh (in settings or ~/.openclaude.json):',
'Error running awsAuthRefresh (in settings or ~/.claude.json):',
)
// biome-ignore lint/suspicious/noConsole:: intentional console output
console.error(message)
@@ -771,7 +771,7 @@ async function getAwsCredsFromCredentialExport(): Promise<{
}
} catch (e) {
const message = chalk.red(
'Error getting AWS credentials from awsCredentialExport (in settings or ~/.openclaude.json):',
'Error getting AWS credentials from awsCredentialExport (in settings or ~/.claude.json):',
)
if (e instanceof Error) {
// biome-ignore lint/suspicious/noConsole:: intentional console output
@@ -961,7 +961,7 @@ export function refreshGcpAuth(gcpAuthRefresh: string): Promise<boolean> {
'GCP auth refresh timed out after 3 minutes. Run your auth command manually in a separate terminal.',
)
: chalk.red(
'Error running gcpAuthRefresh (in settings or ~/.openclaude.json):',
'Error running gcpAuthRefresh (in settings or ~/.claude.json):',
)
// biome-ignore lint/suspicious/noConsole:: intentional console output
console.error(message)
@@ -1959,7 +1959,7 @@ export async function validateForceLoginOrg(): Promise<OrgValidationResult> {
// Always fetch the authoritative org UUID from the profile endpoint.
// Even keychain-sourced tokens verify server-side: the cached org UUID
// in ~/.openclaude.json is user-writable and cannot be trusted.
// in ~/.claude.json is user-writable and cannot be trusted.
const { source } = getAuthTokenSource()
const isEnvVarToken =
source === 'CLAUDE_CODE_OAUTH_TOKEN' ||

View File

@@ -28,7 +28,7 @@ import { getSettingsForSource } from './settings/settings.js'
* is lazy-initialized) and ensure Node.js compatibility.
*
* This is safe to call before the trust dialog because we only read from
* user-controlled files (~/.claude/settings.json and ~/.openclaude.json),
* user-controlled files (~/.claude/settings.json and ~/.claude.json),
* not from project-level settings.
*/
export function applyExtraCACertsFromConfig(): void {
@@ -52,7 +52,7 @@ export function applyExtraCACertsFromConfig(): void {
* after the trust dialog. But we need the CA cert early to establish the TLS
* connection to an HTTPS proxy during init().
*
* We read from global config (~/.openclaude.json) and user settings
* We read from global config (~/.claude.json) and user settings
* (~/.claude/settings.json). These are user-controlled files that don't
* require trust approval.
*/

View File

@@ -355,7 +355,7 @@ exec ${command}
*
* Only positive detections are persisted. A negative result from the
* filesystem scan is not cached, because it may come from a machine that
* shares ~/.openclaude.json but has no local Chrome (e.g. a remote dev
* shares ~/.claude.json but has no local Chrome (e.g. a remote dev
* environment using the bridge), and caching it would permanently poison
* auto-enable for every session on every machine that reads that config.
*/

View File

@@ -918,7 +918,7 @@ let configCacheHits = 0
let configCacheMisses = 0
// Session-total count of actual disk writes to the global config file.
// Exposed for internal-only dev diagnostics (see inc-4552) so anomalous write
// rates surface in the UI before they corrupt ~/.openclaude.json.
// rates surface in the UI before they corrupt ~/.claude.json.
let globalConfigWriteCount = 0
export function getGlobalConfigWriteCount(): number {
@@ -1257,7 +1257,7 @@ function saveConfigWithLock<A extends object>(
const currentConfig = getConfig(file, createDefault)
if (file === getGlobalClaudeFile() && wouldLoseAuthState(currentConfig)) {
logForDebugging(
'saveConfigWithLock: re-read config is missing auth that cache has; refusing to write to avoid wiping ~/.openclaude.json. See GH #3117.',
'saveConfigWithLock: re-read config is missing auth that cache has; refusing to write to avoid wiping ~/.claude.json. See GH #3117.',
{ level: 'error' },
)
logEvent('tengu_config_auth_loss_prevented', {})

View File

@@ -190,16 +190,20 @@ export function getModelMaxOutputTokens(model: string): {
}
// OpenAI-compatible provider — use known output limits to avoid 400 errors
if (
const isOpenAICompatProvider =
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
) {
if (isOpenAICompatProvider) {
const openaiMax = getOpenAIMaxOutputTokens(model)
if (openaiMax !== undefined) {
return { default: openaiMax, upperLimit: openaiMax }
}
// Unknown 3P model — use conservative default to avoid vLLM/Ollama 400
// errors when the default 32k exceeds the model's max_model_len.
// Users can override with CLAUDE_CODE_MAX_OUTPUT_TOKENS.
return { default: 4_096, upperLimit: 16_384 }
}
const m = getCanonicalName(model)

View File

@@ -253,7 +253,7 @@ async function resolveClaudePath(): Promise<string> {
* Check whether the OS-level protocol handler is already registered AND
* points at the expected `claude` binary. Reads the registration artifact
* directly (symlink target, .desktop Exec line, registry value) rather than
* a cached flag in ~/.openclaude.json, so:
* a cached flag in ~/.claude.json, so:
* - the check is per-machine (config can sync across machines; OS state can't)
* - stale paths self-heal (install-method change → re-register next session)
* - deleted artifacts self-heal
@@ -311,7 +311,7 @@ export async function ensureDeepLinkProtocolRegistered(): Promise<void> {
// EACCES/ENOSPC are deterministic — retrying next session won't help.
// Throttle to once per 24h so a read-only ~/.local/share/applications
// doesn't generate a failure event on every startup. Marker lives in
// ~/.claude (per-machine, not synced) rather than ~/.openclaude.json (can sync).
// ~/.claude (per-machine, not synced) rather than ~/.claude.json (can sync).
const failureMarkerPath = path.join(
getClaudeConfigHomeDir(),
'.deep-link-register-failed',

View File

@@ -1,62 +0,0 @@
import { afterEach, beforeEach, expect, test } from 'bun:test'
import { mkdtempSync, rmSync, writeFileSync } from 'fs'
import { tmpdir } from 'os'
import { join } from 'path'
const originalEnv = {
CLAUDE_CONFIG_DIR: process.env.CLAUDE_CONFIG_DIR,
CLAUDE_CODE_CUSTOM_OAUTH_URL: process.env.CLAUDE_CODE_CUSTOM_OAUTH_URL,
USER_TYPE: process.env.USER_TYPE,
}
let tempDir: string
beforeEach(() => {
tempDir = mkdtempSync(join(tmpdir(), 'openclaude-env-test-'))
process.env.CLAUDE_CONFIG_DIR = tempDir
delete process.env.CLAUDE_CODE_CUSTOM_OAUTH_URL
delete process.env.USER_TYPE
})
afterEach(() => {
rmSync(tempDir, { recursive: true, force: true })
if (originalEnv.CLAUDE_CONFIG_DIR === undefined) {
delete process.env.CLAUDE_CONFIG_DIR
} else {
process.env.CLAUDE_CONFIG_DIR = originalEnv.CLAUDE_CONFIG_DIR
}
if (originalEnv.CLAUDE_CODE_CUSTOM_OAUTH_URL === undefined) {
delete process.env.CLAUDE_CODE_CUSTOM_OAUTH_URL
} else {
process.env.CLAUDE_CODE_CUSTOM_OAUTH_URL = originalEnv.CLAUDE_CODE_CUSTOM_OAUTH_URL
}
if (originalEnv.USER_TYPE === undefined) {
delete process.env.USER_TYPE
} else {
process.env.USER_TYPE = originalEnv.USER_TYPE
}
})
async function importFreshEnvModule() {
return import(`./env.js?ts=${Date.now()}-${Math.random()}`)
}
// getGlobalClaudeFile — three migration branches
test('getGlobalClaudeFile: new install returns .openclaude.json when neither file exists', async () => {
const { getGlobalClaudeFile } = await importFreshEnvModule()
expect(getGlobalClaudeFile()).toBe(join(tempDir, '.openclaude.json'))
})
test('getGlobalClaudeFile: existing user keeps .claude.json when only legacy file exists', async () => {
writeFileSync(join(tempDir, '.claude.json'), '{}')
const { getGlobalClaudeFile } = await importFreshEnvModule()
expect(getGlobalClaudeFile()).toBe(join(tempDir, '.claude.json'))
})
test('getGlobalClaudeFile: migrated user uses .openclaude.json when both files exist', async () => {
writeFileSync(join(tempDir, '.claude.json'), '{}')
writeFileSync(join(tempDir, '.openclaude.json'), '{}')
const { getGlobalClaudeFile } = await importFreshEnvModule()
expect(getGlobalClaudeFile()).toBe(join(tempDir, '.openclaude.json'))
})

View File

@@ -21,21 +21,8 @@ export const getGlobalClaudeFile = memoize((): string => {
return join(getClaudeConfigHomeDir(), '.config.json')
}
const oauthSuffix = fileSuffixForOauthConfig()
const configDir = process.env.CLAUDE_CONFIG_DIR || homedir()
// Default to .openclaude.json. Fall back to .claude.json only if the new
// file doesn't exist yet and the legacy one does (same migration pattern
// as resolveClaudeConfigHomeDir for the config directory).
const newFilename = `.openclaude${oauthSuffix}.json`
const legacyFilename = `.claude${oauthSuffix}.json`
if (
!getFsImplementation().existsSync(join(configDir, newFilename)) &&
getFsImplementation().existsSync(join(configDir, legacyFilename))
) {
return join(configDir, legacyFilename)
}
return join(configDir, newFilename)
const filename = `.claude${fileSuffixForOauthConfig()}.json`
return join(process.env.CLAUDE_CONFIG_DIR || homedir(), filename)
})
const hasInternetAccess = memoize(async (): Promise<boolean> => {

View File

@@ -24,7 +24,7 @@ type CachedParse = { ok: true; value: unknown } | { ok: false }
// lodash memoize default resolver = first arg only).
// Skip caching above this size — the LRU stores the full string as the key,
// so a 200KB config file would pin ~10MB in #keyList across 50 slots. Large
// inputs like ~/.openclaude.json also change between reads (numStartups bumps on
// inputs like ~/.claude.json also change between reads (numStartups bumps on
// every CC startup), so the cache never hits anyway.
const PARSE_CACHE_MAX_KEY_BYTES = 8 * 1024

View File

@@ -44,10 +44,9 @@ function getCandidateLocalBinaryPaths(localInstallDir: string): string[] {
}
export function isManagedLocalInstallationPath(execPath: string): boolean {
const normalizedExecPath = execPath.replace(/\\+/g, '/')
return (
normalizedExecPath.includes('/.openclaude/local/node_modules/') ||
normalizedExecPath.includes('/.claude/local/node_modules/')
execPath.includes('/.openclaude/local/node_modules/') ||
execPath.includes('/.claude/local/node_modules/')
)
}

View File

@@ -131,7 +131,7 @@ export function applySafeConfigEnvironmentVariables(): void {
: null
}
// Global config (~/.openclaude.json) is user-controlled. In CCD mode,
// Global config (~/.claude.json) is user-controlled. In CCD mode,
// filterSettingsEnv strips keys that were in the spawn env snapshot so
// the desktop host's operational vars (OTEL, etc.) are not overridden.
Object.assign(process.env, filterSettingsEnv(getGlobalConfig().env))

View File

@@ -123,6 +123,7 @@ export const SAFE_ENV_VARS = new Set([
'ANTHROPIC_DEFAULT_SONNET_MODEL_DESCRIPTION',
'ANTHROPIC_DEFAULT_SONNET_MODEL_NAME',
'ANTHROPIC_DEFAULT_SONNET_MODEL_SUPPORTED_CAPABILITIES',
'ANTHROPIC_FOUNDRY_API_KEY',
'ANTHROPIC_MODEL',
'ANTHROPIC_SMALL_FAST_MODEL_AWS_REGION',
'ANTHROPIC_SMALL_FAST_MODEL',

View File

@@ -177,14 +177,18 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'MiniMax-M2': 204_800,
// Google (via OpenRouter)
'google/gemini-2.0-flash':1_048_576,
'google/gemini-2.0-flash': 1_048_576,
'google/gemini-2.5-pro': 1_048_576,
'google/gemini-3-flash-preview': 1_048_576,
'google/gemini-3.1-pro-preview': 1_048_576,
// Google (native via CLAUDE_CODE_USE_GEMINI)
'gemini-2.0-flash': 1_048_576,
'gemini-2.5-pro': 1_048_576,
'gemini-2.5-flash': 1_048_576,
'gemini-3-flash-preview': 1_048_576,
'gemini-3.1-pro': 1_048_576,
'gemini-3.1-pro-preview': 1_048_576,
'gemini-3.1-flash-lite-preview': 1_048_576,
// Ollama local models
@@ -331,12 +335,16 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
// Google (via OpenRouter)
'google/gemini-2.0-flash': 8_192,
'google/gemini-2.5-pro': 65_536,
'google/gemini-3-flash-preview': 65_536,
'google/gemini-3.1-pro-preview': 65_536,
// Google (native via CLAUDE_CODE_USE_GEMINI)
'gemini-2.0-flash': 8_192,
'gemini-2.5-pro': 65_536,
'gemini-2.5-flash': 65_536,
'gemini-3-flash-preview': 65_536,
'gemini-3.1-pro': 65_536,
'gemini-3.1-pro-preview': 65_536,
'gemini-3.1-flash-lite-preview': 65_536,
// Ollama local models (conservative safe defaults)

View File

@@ -107,60 +107,3 @@ test('official OpenAI base URLs now keep provider detection on openai for aliase
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('openai')
})
// isGithubNativeAnthropicMode
test('isGithubNativeAnthropicMode: false when CLAUDE_CODE_USE_GITHUB is not set', async () => {
clearProviderEnv()
process.env.OPENAI_MODEL = 'claude-sonnet-4-5'
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
expect(isGithubNativeAnthropicMode()).toBe(false)
})
test('isGithubNativeAnthropicMode: true for bare claude- model via OPENAI_MODEL', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'claude-sonnet-4-5'
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
expect(isGithubNativeAnthropicMode()).toBe(true)
})
test('isGithubNativeAnthropicMode: true for github:copilot:claude- compound format', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'github:copilot:claude-sonnet-4'
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
expect(isGithubNativeAnthropicMode()).toBe(true)
})
test('isGithubNativeAnthropicMode: true when resolvedModel is a claude- model', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'github:copilot'
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
expect(isGithubNativeAnthropicMode('claude-haiku-4-5')).toBe(true)
})
test('isGithubNativeAnthropicMode: false for generic github:copilot alias', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'github:copilot'
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
expect(isGithubNativeAnthropicMode()).toBe(false)
})
test('isGithubNativeAnthropicMode: false for non-Claude model', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'gpt-4o'
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
expect(isGithubNativeAnthropicMode()).toBe(false)
})
test('isGithubNativeAnthropicMode: false for github:copilot:gpt- model', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'github:copilot:gpt-4o'
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
expect(isGithubNativeAnthropicMode()).toBe(false)
})

View File

@@ -45,24 +45,6 @@ export function getAPIProvider(): APIProvider {
export function usesAnthropicAccountFlow(): boolean {
return getAPIProvider() === 'firstParty'
}
/**
* Returns true when the GitHub provider should use Anthropic's native API
* format instead of the OpenAI-compatible shim.
*
* Enabled when CLAUDE_CODE_USE_GITHUB=1 and the model string contains "claude-"
* anywhere (handles bare names like "claude-sonnet-4" and compound formats like
* "github:copilot:claude-sonnet-4" or any future provider-prefixed variants).
*
* api.githubcopilot.com supports Anthropic native format for Claude models,
* enabling prompt caching via cache_control blocks which significantly reduces
* per-turn token costs by caching the system prompt and tool definitions.
*/
export function isGithubNativeAnthropicMode(resolvedModel?: string): boolean {
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) return false
const model = resolvedModel?.trim() || process.env.OPENAI_MODEL?.trim() || ''
return model.toLowerCase().includes('claude-')
}
function isCodexModel(): boolean {
return shouldUseCodexTransport(
process.env.OPENAI_MODEL || '',

View File

@@ -64,7 +64,6 @@ export const DANGEROUS_FILES = [
'.profile',
'.ripgreprc',
'.mcp.json',
'.openclaude.json',
'.claude.json',
] as const

View File

@@ -532,7 +532,6 @@ export async function gitPull(
): Promise<{ code: number; stderr: string }> {
logForDebugging(`git pull: cwd=${cwd} ref=${ref ?? 'default'}`)
const env = { ...process.env, ...GIT_NO_PROMPT_ENV }
const baseArgs = ['-c', 'core.hooksPath=/dev/null']
const credentialArgs = options?.disableCredentialHelper
? ['-c', 'credential.helper=']
: []
@@ -540,7 +539,7 @@ export async function gitPull(
if (ref) {
const fetchResult = await execFileNoThrowWithCwd(
gitExe(),
[...baseArgs, ...credentialArgs, 'fetch', 'origin', ref],
[...credentialArgs, 'fetch', 'origin', ref],
{ cwd, timeout: getPluginGitTimeoutMs(), stdin: 'ignore', env },
)
@@ -550,7 +549,7 @@ export async function gitPull(
const checkoutResult = await execFileNoThrowWithCwd(
gitExe(),
[...baseArgs, ...credentialArgs, 'checkout', ref],
[...credentialArgs, 'checkout', ref],
{ cwd, timeout: getPluginGitTimeoutMs(), stdin: 'ignore', env },
)
@@ -560,7 +559,7 @@ export async function gitPull(
const pullResult = await execFileNoThrowWithCwd(
gitExe(),
[...baseArgs, ...credentialArgs, 'pull', 'origin', ref],
[...credentialArgs, 'pull', 'origin', ref],
{ cwd, timeout: getPluginGitTimeoutMs(), stdin: 'ignore', env },
)
if (pullResult.code !== 0) {
@@ -572,7 +571,7 @@ export async function gitPull(
const result = await execFileNoThrowWithCwd(
gitExe(),
[...baseArgs, ...credentialArgs, 'pull', 'origin', 'HEAD'],
[...credentialArgs, 'pull', 'origin', 'HEAD'],
{ cwd, timeout: getPluginGitTimeoutMs(), stdin: 'ignore', env },
)
if (result.code !== 0) {
@@ -626,8 +625,6 @@ async function gitSubmoduleUpdate(
[
'-c',
'core.sshCommand=ssh -o BatchMode=yes -o StrictHostKeyChecking=yes',
'-c',
'core.hooksPath=/dev/null',
...credentialArgs,
'submodule',
'update',
@@ -813,8 +810,6 @@ export async function gitClone(
const args = [
'-c',
'core.sshCommand=ssh -o BatchMode=yes -o StrictHostKeyChecking=yes',
'-c',
'core.hooksPath=/dev/null',
'clone',
'--depth',
'1',

View File

@@ -1,9 +1,9 @@
import { afterEach, expect, mock, test } from 'bun:test'
async function loadProviderDiscoveryModule() {
// @ts-expect-error cache-busting query string for Bun module mocks
return import(`./providerDiscovery.js?ts=${Date.now()}-${Math.random()}`)
}
import {
getLocalOpenAICompatibleProviderLabel,
listOpenAICompatibleModels,
} from './providerDiscovery.js'
const originalFetch = globalThis.fetch
const originalEnv = {
@@ -16,8 +16,6 @@ afterEach(() => {
})
test('lists models from a local openai-compatible /models endpoint', async () => {
const { listOpenAICompatibleModels } = await loadProviderDiscoveryModule()
globalThis.fetch = mock((input, init) => {
const url = typeof input === 'string' ? input : input.url
expect(url).toBe('http://localhost:1234/v1/models')
@@ -49,8 +47,6 @@ test('lists models from a local openai-compatible /models endpoint', async () =>
})
test('returns null when a local openai-compatible /models request fails', async () => {
const { listOpenAICompatibleModels } = await loadProviderDiscoveryModule()
globalThis.fetch = mock(() =>
Promise.resolve(new Response('not available', { status: 503 })),
) as typeof globalThis.fetch
@@ -60,19 +56,13 @@ test('returns null when a local openai-compatible /models request fails', async
).resolves.toBeNull()
})
test('detects LM Studio from the default localhost port', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()
test('detects LM Studio from the default localhost port', () => {
expect(getLocalOpenAICompatibleProviderLabel('http://localhost:1234/v1')).toBe(
'LM Studio',
)
})
test('detects common local openai-compatible providers by hostname', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()
test('detects common local openai-compatible providers by hostname', () => {
expect(
getLocalOpenAICompatibleProviderLabel('http://localai.local:8080/v1'),
).toBe('LocalAI')
@@ -81,212 +71,8 @@ test('detects common local openai-compatible providers by hostname', async () =>
).toBe('vLLM')
})
test('falls back to a generic local openai-compatible label', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()
test('falls back to a generic local openai-compatible label', () => {
expect(
getLocalOpenAICompatibleProviderLabel('http://127.0.0.1:8080/v1'),
).toBe('Local OpenAI-compatible')
})
test('ollama generation readiness reports unreachable when tags endpoint is down', async () => {
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
const calledUrls: string[] = []
globalThis.fetch = mock(input => {
const url = typeof input === 'string' ? input : input.url
calledUrls.push(url)
return Promise.resolve(new Response('not available', { status: 503 }))
}) as typeof globalThis.fetch
await expect(
probeOllamaGenerationReadiness({
baseUrl: 'http://localhost:11434',
}),
).resolves.toMatchObject({
state: 'unreachable',
models: [],
})
expect(calledUrls).toEqual([
'http://localhost:11434/api/tags',
])
})
test('ollama generation readiness reports no models when server is reachable', async () => {
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
const calledUrls: string[] = []
globalThis.fetch = mock(input => {
const url = typeof input === 'string' ? input : input.url
calledUrls.push(url)
return Promise.resolve(
new Response(JSON.stringify({ models: [] }), {
status: 200,
headers: { 'Content-Type': 'application/json' },
}),
)
}) as typeof globalThis.fetch
await expect(
probeOllamaGenerationReadiness({
baseUrl: 'http://localhost:11434',
}),
).resolves.toMatchObject({
state: 'no_models',
models: [],
})
expect(calledUrls).toEqual([
'http://localhost:11434/api/tags',
])
})
test('ollama generation readiness reports generation_failed when requested model is missing', async () => {
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
const calledUrls: string[] = []
globalThis.fetch = mock(input => {
const url = typeof input === 'string' ? input : input.url
calledUrls.push(url)
return Promise.resolve(
new Response(
JSON.stringify({
models: [{ name: 'llama3.1:8b', size: 1024 }],
}),
{
status: 200,
headers: { 'Content-Type': 'application/json' },
},
),
)
}) as typeof globalThis.fetch
await expect(
probeOllamaGenerationReadiness({
baseUrl: 'http://localhost:11434',
model: 'qwen2.5-coder:7b',
}),
).resolves.toMatchObject({
state: 'generation_failed',
probeModel: 'qwen2.5-coder:7b',
detail: 'requested model not installed: qwen2.5-coder:7b',
})
expect(calledUrls).toEqual(['http://localhost:11434/api/tags'])
})
test('ollama generation readiness reports generation failures when chat probe fails', async () => {
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
globalThis.fetch = mock(input => {
const url = typeof input === 'string' ? input : input.url
if (url.endsWith('/api/tags')) {
return Promise.resolve(
new Response(
JSON.stringify({
models: [{ name: 'qwen2.5-coder:7b', size: 42 }],
}),
{
status: 200,
headers: { 'Content-Type': 'application/json' },
},
),
)
}
return Promise.resolve(new Response('model not found', { status: 404 }))
}) as typeof globalThis.fetch
await expect(
probeOllamaGenerationReadiness({
baseUrl: 'http://localhost:11434',
model: 'qwen2.5-coder:7b',
}),
).resolves.toMatchObject({
state: 'generation_failed',
probeModel: 'qwen2.5-coder:7b',
})
})
test('ollama generation readiness reports generation_failed when chat probe returns invalid JSON', async () => {
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
globalThis.fetch = mock(input => {
const url = typeof input === 'string' ? input : input.url
if (url.endsWith('/api/tags')) {
return Promise.resolve(
new Response(
JSON.stringify({
models: [{ name: 'llama3.1:8b', size: 1024 }],
}),
{
status: 200,
headers: { 'Content-Type': 'application/json' },
},
),
)
}
return Promise.resolve(
new Response('<html>proxy error</html>', {
status: 200,
headers: { 'Content-Type': 'text/html' },
}),
)
}) as typeof globalThis.fetch
await expect(
probeOllamaGenerationReadiness({
baseUrl: 'http://localhost:11434',
}),
).resolves.toMatchObject({
state: 'generation_failed',
probeModel: 'llama3.1:8b',
detail: 'invalid JSON response',
})
})
test('ollama generation readiness reports ready when chat probe succeeds', async () => {
const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule()
globalThis.fetch = mock(input => {
const url = typeof input === 'string' ? input : input.url
if (url.endsWith('/api/tags')) {
return Promise.resolve(
new Response(
JSON.stringify({
models: [{ name: 'llama3.1:8b', size: 1024 }],
}),
{
status: 200,
headers: { 'Content-Type': 'application/json' },
},
),
)
}
return Promise.resolve(
new Response(
JSON.stringify({
message: { role: 'assistant', content: 'OK' },
done: true,
}),
{
status: 200,
headers: { 'Content-Type': 'application/json' },
},
),
)
}) as typeof globalThis.fetch
await expect(
probeOllamaGenerationReadiness({
baseUrl: 'http://localhost:11434',
}),
).resolves.toMatchObject({
state: 'ready',
probeModel: 'llama3.1:8b',
})
})

View File

@@ -4,13 +4,6 @@ import { DEFAULT_OPENAI_BASE_URL } from '../services/api/providerConfig.js'
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
export type OllamaGenerationReadiness = {
state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed'
models: OllamaModelDescriptor[]
probeModel?: string
detail?: string
}
function withTimeoutSignal(timeoutMs: number): {
signal: AbortSignal
clear: () => void
@@ -27,83 +20,6 @@ function trimTrailingSlash(value: string): string {
return value.replace(/\/+$/, '')
}
function compactDetail(value: string, maxLength = 180): string {
const compact = value.trim().replace(/\s+/g, ' ')
if (!compact) {
return ''
}
if (compact.length <= maxLength) {
return compact
}
return `${compact.slice(0, maxLength)}...`
}
type OllamaTagsPayload = {
models?: Array<{
name?: string
size?: number
details?: {
family?: string
families?: string[]
parameter_size?: string
quantization_level?: string
}
}>
}
function normalizeOllamaModels(
payload: OllamaTagsPayload,
): OllamaModelDescriptor[] {
return (payload.models ?? [])
.filter(model => Boolean(model.name))
.map(model => ({
name: model.name!,
sizeBytes: typeof model.size === 'number' ? model.size : null,
family: model.details?.family ?? null,
families: model.details?.families ?? [],
parameterSize: model.details?.parameter_size ?? null,
quantizationLevel: model.details?.quantization_level ?? null,
}))
}
async function fetchOllamaModelsProbe(
baseUrl?: string,
timeoutMs = 5000,
): Promise<{
reachable: boolean
models: OllamaModelDescriptor[]
}> {
const { signal, clear } = withTimeoutSignal(timeoutMs)
try {
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, {
method: 'GET',
signal,
})
if (!response.ok) {
return {
reachable: false,
models: [],
}
}
const payload = (await response.json().catch(() => ({}))) as OllamaTagsPayload
return {
reachable: true,
models: normalizeOllamaModels(payload),
}
} catch {
return {
reachable: false,
models: [],
}
} finally {
clear()
}
}
export function getOllamaApiBaseUrl(baseUrl?: string): string {
const parsed = new URL(
baseUrl || process.env.OLLAMA_BASE_URL || DEFAULT_OLLAMA_BASE_URL,
@@ -205,15 +121,61 @@ export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string
}
export async function hasLocalOllama(baseUrl?: string): Promise<boolean> {
const { reachable } = await fetchOllamaModelsProbe(baseUrl, 1200)
return reachable
const { signal, clear } = withTimeoutSignal(1200)
try {
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, {
method: 'GET',
signal,
})
return response.ok
} catch {
return false
} finally {
clear()
}
}
export async function listOllamaModels(
baseUrl?: string,
): Promise<OllamaModelDescriptor[]> {
const { models } = await fetchOllamaModelsProbe(baseUrl, 5000)
return models
const { signal, clear } = withTimeoutSignal(5000)
try {
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, {
method: 'GET',
signal,
})
if (!response.ok) {
return []
}
const data = (await response.json()) as {
models?: Array<{
name?: string
size?: number
details?: {
family?: string
families?: string[]
parameter_size?: string
quantization_level?: string
}
}>
}
return (data.models ?? [])
.filter(model => Boolean(model.name))
.map(model => ({
name: model.name!,
sizeBytes: typeof model.size === 'number' ? model.size : null,
family: model.details?.family ?? null,
families: model.details?.families ?? [],
parameterSize: model.details?.parameter_size ?? null,
quantizationLevel: model.details?.quantization_level ?? null,
}))
} catch {
return []
} finally {
clear()
}
}
export async function listOpenAICompatibleModels(options?: {
@@ -332,106 +294,3 @@ export async function benchmarkOllamaModel(
clear()
}
}
export async function probeOllamaGenerationReadiness(options?: {
baseUrl?: string
model?: string
timeoutMs?: number
}): Promise<OllamaGenerationReadiness> {
const timeoutMs = options?.timeoutMs ?? 8000
const { reachable, models } = await fetchOllamaModelsProbe(
options?.baseUrl,
timeoutMs,
)
if (!reachable) {
return {
state: 'unreachable',
models: [],
}
}
if (models.length === 0) {
return {
state: 'no_models',
models: [],
}
}
const requestedModel = options?.model?.trim() || undefined
if (requestedModel && !models.some(model => model.name === requestedModel)) {
return {
state: 'generation_failed',
models,
probeModel: requestedModel,
detail: `requested model not installed: ${requestedModel}`,
}
}
const probeModel = requestedModel ?? models[0]!.name
const { signal, clear } = withTimeoutSignal(timeoutMs)
try {
const response = await fetch(`${getOllamaApiBaseUrl(options?.baseUrl)}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
signal,
body: JSON.stringify({
model: probeModel,
stream: false,
messages: [{ role: 'user', content: 'Reply with OK.' }],
options: {
temperature: 0,
num_predict: 8,
},
}),
})
if (!response.ok) {
const responseBody = await response.text().catch(() => '')
const detailSuffix = compactDetail(responseBody)
return {
state: 'generation_failed',
models,
probeModel,
detail: detailSuffix
? `status ${response.status}: ${detailSuffix}`
: `status ${response.status}`,
}
}
try {
await response.json()
} catch {
return {
state: 'generation_failed',
models,
probeModel,
detail: 'invalid JSON response',
}
}
return {
state: 'ready',
models,
probeModel,
}
} catch (error) {
const detail =
error instanceof Error
? error.name === 'AbortError'
? 'request timed out'
: error.message
: String(error)
return {
state: 'generation_failed',
models,
probeModel,
detail,
}
} finally {
clear()
}
}

View File

@@ -456,19 +456,10 @@ const checkDependencies = memoize((): SandboxDependencyCheck => {
})
})
/**
* Read sandbox.enabled only from trusted settings sources.
* projectSettings is intentionally excluded — a malicious repo could
* otherwise disable the sandbox via .claude/settings.json.
*/
function getSandboxEnabledSetting(): boolean {
try {
return !!(
getSettingsForSource('userSettings')?.sandbox?.enabled ||
getSettingsForSource('localSettings')?.sandbox?.enabled ||
getSettingsForSource('flagSettings')?.sandbox?.enabled ||
getSettingsForSource('policySettings')?.sandbox?.enabled
)
const settings = getSettings_DEPRECATED()
return settings?.sandbox?.enabled ?? false
} catch (error) {
logForDebugging(`Failed to get settings for sandbox check: ${error}`)
return false

View File

@@ -300,9 +300,9 @@ export function getRelativeSettingsFilePathForSource(
): string {
switch (source) {
case 'projectSettings':
return '.openclaude/settings.json'
return join('.openclaude', 'settings.json')
case 'localSettings':
return '.openclaude/settings.local.json'
return join('.openclaude', 'settings.local.json')
}
}

View File

@@ -207,10 +207,6 @@ export function createPermissionRequest(params: {
}
/**
* @deprecated Use sendPermissionRequestViaMailbox() instead. This file-based
* approach writes to an unauthenticated directory where any local process can
* forge requests. Retained for backward compatibility but no longer called.
*
* Write a permission request to the pending directory with file locking
* Called by worker agents when they need permission approval from the leader
*
@@ -254,10 +250,6 @@ export async function writePermissionRequest(
}
/**
* @deprecated No longer called — permission requests are sent via mailbox.
* The pending directory is an unauthenticated channel. Retained for backward
* compatibility.
*
* Read all pending permission requests for a team
* Called by the team leader to see what requests need attention
*/
@@ -320,11 +312,6 @@ export async function readPendingPermissions(
}
/**
* @deprecated No longer called — permission responses are delivered via mailbox
* (processMailboxPermissionResponse). The resolved directory is an unauthenticated
* channel where any local process can forge approvals. Retained for backward
* compatibility.
*
* Read a resolved permission request by ID
* Called by workers to check if their request has been resolved
*
@@ -365,10 +352,6 @@ export async function readResolvedPermission(
}
/**
* @deprecated Use sendPermissionResponseViaMailbox() instead. This file-based
* approach writes to an unauthenticated directory where any local process can
* forge approvals. Retained for backward compatibility but no longer called.
*
* Resolve a permission request
* Called by the team leader (or worker in self-resolution cases)
*
@@ -553,10 +536,6 @@ export type PermissionResponse = {
}
/**
* @deprecated Use processMailboxPermissionResponse() via useInboxPoller instead.
* File-based polling reads from an unauthenticated directory where any local
* process can forge approval files. Retained for backward compatibility.
*
* Poll for a permission response (worker-side convenience function)
* Converts the resolved request into a simpler response format
*
@@ -585,9 +564,6 @@ export async function pollForResponse(
}
/**
* @deprecated File-based response cleanup is no longer needed — responses are
* delivered via mailbox. Retained for backward compatibility.
*
* Remove a worker's response after processing
* This is an alias for deleteResolvedPermission for backward compatibility
*/
@@ -625,9 +601,6 @@ export function isSwarmWorker(): boolean {
}
/**
* @deprecated File-based resolved permissions are no longer written. Responses
* are delivered via mailbox. Retained for backward compatibility.
*
* Delete a resolved permission file
* Called after a worker has processed the resolution
*/
@@ -662,8 +635,8 @@ export async function deleteResolvedPermission(
}
/**
* @deprecated Alias for writePermissionRequest, which is itself deprecated.
* Use sendPermissionRequestViaMailbox() instead.
* Submit a permission request (alias for writePermissionRequest)
* Provided for backward compatibility with worker integration code
*/
export const submitPermissionRequest = writePermissionRequest

View File

@@ -1,38 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { redactUrlForDisplay } from './urlRedaction.ts'
describe('redactUrlForDisplay', () => {
test('redacts credentials and sensitive query params for valid URLs', () => {
const redacted = redactUrlForDisplay(
'http://user:pass@localhost:11434/v1?api_key=secret&foo=bar',
)
expect(redacted).toBe(
'http://redacted:redacted@localhost:11434/v1?api_key=redacted&foo=bar',
)
})
test('redacts token-like query parameter names', () => {
const redacted = redactUrlForDisplay(
'https://example.com/v1?x_access_token=abc123&model=qwen2.5-coder',
)
expect(redacted).toBe(
'https://example.com/v1?x_access_token=redacted&model=qwen2.5-coder',
)
})
test('falls back to regex redaction for malformed URLs', () => {
const redacted = redactUrlForDisplay(
'//user:pass@localhost:11434?token=abc&mode=test',
)
expect(redacted).toBe('//redacted@localhost:11434?token=redacted&mode=test')
})
test('keeps non-sensitive URLs unchanged', () => {
const url = 'http://localhost:11434/v1?model=llama3.1:8b'
expect(redactUrlForDisplay(url)).toBe(url)
})
})

View File

@@ -1,48 +0,0 @@
const SENSITIVE_URL_QUERY_PARAM_TOKENS = [
'api_key',
'apikey',
'key',
'token',
'access_token',
'refresh_token',
'signature',
'sig',
'secret',
'password',
'passwd',
'pwd',
'auth',
'authorization',
]
function shouldRedactUrlQueryParam(name: string): boolean {
const lower = name.toLowerCase()
return SENSITIVE_URL_QUERY_PARAM_TOKENS.some(token => lower.includes(token))
}
export function redactUrlForDisplay(rawUrl: string): string {
try {
const parsed = new URL(rawUrl)
if (parsed.username) {
parsed.username = 'redacted'
}
if (parsed.password) {
parsed.password = 'redacted'
}
for (const key of parsed.searchParams.keys()) {
if (shouldRedactUrlQueryParam(key)) {
parsed.searchParams.set(key, 'redacted')
}
}
return parsed.toString()
} catch {
return rawUrl
.replace(/\/\/[^/@\s]+(?::[^/@\s]*)?@/g, '//redacted@')
.replace(
/([?&](?:token|access_token|refresh_token|api_key|apikey|key|password|passwd|pwd|auth|authorization|signature|sig|secret)=)[^&#]*/gi,
'$1redacted',
)
}
}