From 8c6a10517fc6bf8a846ae30535bc38e7b3259dd0 Mon Sep 17 00:00:00 2001 From: gnanam1990 Date: Thu, 2 Apr 2026 09:05:00 +0530 Subject: [PATCH 01/35] fix: show correct version in startup screen MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit StartupScreen.ts was reading the version via globalThis['MACRO_DISPLAY_VERSION'] which is never populated — the Bun bundler inlines it as MACRO.DISPLAY_VERSION (dot notation), not as a globalThis key. Result: startup screen always showed the hardcoded fallback 'v0.1.4' regardless of the installed version. Fix: use MACRO.DISPLAY_VERSION ?? MACRO.VERSION directly, consistent with cli.tsx, main.tsx, and logoV2Utils.ts. Fixes #95 Co-Authored-By: Claude Sonnet 4.6 --- src/components/StartupScreen.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/components/StartupScreen.ts b/src/components/StartupScreen.ts index 602b3d08..6fd347a4 100644 --- a/src/components/StartupScreen.ts +++ b/src/components/StartupScreen.ts @@ -5,6 +5,8 @@ * Addresses: https://github.com/Gitlawb/openclaude/issues/55 */ +declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string } + const ESC = '\x1b[' const RESET = `${ESC}0m` const DIM = `${ESC}2m` @@ -172,7 +174,7 @@ export function printStartupScreen(): void { out.push(boxRow(sRow, W, sLen)) out.push(`${rgb(...BORDER)}\u255a${'\u2550'.repeat(W - 2)}\u255d${RESET}`) - out.push(` ${DIM}${rgb(...DIMCOL)}openclaude v${(globalThis as Record)['MACRO_DISPLAY_VERSION'] ?? '0.1.4'}${RESET}`) + out.push(` ${DIM}${rgb(...DIMCOL)}openclaude v${MACRO.DISPLAY_VERSION ?? MACRO.VERSION}${RESET}`) out.push('') process.stdout.write(out.join('\n') + '\n') From 47b19c9a00f4c45d70b96c83204133fc2d7d0130 Mon Sep 17 00:00:00 2001 From: gnanam1990 Date: Thu, 2 Apr 2026 09:11:12 +0530 Subject: [PATCH 02/35] fix: style version number in startup screen accent orange Apply the existing ACCENT colour (rgb 240 148 100) to the version string so it stands out against the dim label, matching the warm orange used throughout the startup screen for stars and status text. Requested in #95. Co-Authored-By: Claude Sonnet 4.6 --- src/components/StartupScreen.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/components/StartupScreen.ts b/src/components/StartupScreen.ts index 6fd347a4..ded4f457 100644 --- a/src/components/StartupScreen.ts +++ b/src/components/StartupScreen.ts @@ -174,7 +174,7 @@ export function printStartupScreen(): void { out.push(boxRow(sRow, W, sLen)) out.push(`${rgb(...BORDER)}\u255a${'\u2550'.repeat(W - 2)}\u255d${RESET}`) - out.push(` ${DIM}${rgb(...DIMCOL)}openclaude v${MACRO.DISPLAY_VERSION ?? MACRO.VERSION}${RESET}`) + out.push(` ${DIM}${rgb(...DIMCOL)}openclaude ${RESET}${rgb(...ACCENT)}v${MACRO.DISPLAY_VERSION ?? MACRO.VERSION}${RESET}`) out.push('') process.stdout.write(out.join('\n') + '\n') From f3ebd7d256e7454658cc65b5f29d3b8d0226d80d Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 2 Apr 2026 12:01:01 +0800 Subject: [PATCH 03/35] fix: convert max_tokens to max_completion_tokens for Azure OpenAI Azure OpenAI API rejects the max_tokens parameter and requires max_completion_tokens instead. This change ensures the conversion is robust by validating that max_tokens is a positive number before using it, preventing edge cases like null or "null" string values from being incorrectly sent. Co-Authored-By: Claude Opus 4.6 --- src/services/api/openaiShim.ts | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 9b77d07e..9a500490 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -656,10 +656,20 @@ class OpenAIShimMessages { messages: openaiMessages, stream: params.stream ?? false, } - if (params.max_tokens !== undefined) { - body.max_completion_tokens = params.max_tokens - } else if ((params as Record).max_completion_tokens !== undefined) { - body.max_completion_tokens = (params as Record).max_completion_tokens + // Convert max_tokens to max_completion_tokens for OpenAI API compatibility. + // Azure OpenAI requires max_completion_tokens and does not accept max_tokens. + // Ensure max_tokens is a valid positive number before using it. + const maxTokensValue = typeof params.max_tokens === 'number' && params.max_tokens > 0 + ? params.max_tokens + : undefined + const maxCompletionTokensValue = typeof (params as Record).max_completion_tokens === 'number' + ? (params as Record).max_completion_tokens as number + : undefined + + if (maxTokensValue !== undefined) { + body.max_completion_tokens = maxTokensValue + } else if (maxCompletionTokensValue !== undefined) { + body.max_completion_tokens = maxCompletionTokensValue } if (params.stream) { From 5f75f67a270bbfcfa6ae452a0cac5cc0e5c2b6da Mon Sep 17 00:00:00 2001 From: Mikey <5217366+BrainSlugs83@users.noreply.github.com> Date: Wed, 1 Apr 2026 21:29:42 -0700 Subject: [PATCH 04/35] security: pin all dependencies to exact versions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removes caret (^) ranges from all 74 dependencies in package.json, locking each to the exact version resolved in bun.lock. Motivation: the axios supply chain attack of March 31 2026 demonstrated that caret ranges are a live attack vector. axios@^1.14.0 would have resolved to the trojanized 1.14.1 (bundled plain-crypto-js RAT, C2 sfrclak.com). Both 1.14.1 and 0.30.4 were unpublished within 24h. Key pins: axios ^1.14.0 → 1.14.0 (trojanized 1.14.1 blocked) undici ^7.3.0 → 7.24.6 (7 CVEs between 7.3 and 7.24) yaml ^2.7.0 → 2.8.3 (CVE-2026-33532 fix) ajv ^8.17.0 → 8.18.0 (ReDoS fix) lodash-es ^4.17.21 → 4.17.23 (prototype pollution fix) zod ^3.24.0 → 3.25.76 (large range locked) All 74 deps verified: integrity hashes match npm registry, no known supply chain incidents, no postinstall scripts in lockfile. --- bun.lock | 148 +++++++++++++++++++++++++-------------------------- package.json | 148 +++++++++++++++++++++++++-------------------------- 2 files changed, 148 insertions(+), 148 deletions(-) diff --git a/bun.lock b/bun.lock index 7051fc49..ce4a898c 100644 --- a/bun.lock +++ b/bun.lock @@ -5,82 +5,82 @@ "": { "name": "openclaude", "dependencies": { - "@alcalzone/ansi-tokenize": "^0.3.0", - "@anthropic-ai/bedrock-sdk": "^0.26.0", - "@anthropic-ai/foundry-sdk": "^0.2.0", - "@anthropic-ai/sandbox-runtime": "^0.0.46", - "@anthropic-ai/sdk": "^0.81.0", - "@anthropic-ai/vertex-sdk": "^0.14.0", - "@commander-js/extra-typings": "^12.0.0", - "@growthbook/growthbook": "^1.3.0", - "@modelcontextprotocol/sdk": "^1.12.0", - "@opentelemetry/api": "^1.9.1", - "@opentelemetry/api-logs": "^0.214.0", - "@opentelemetry/core": "^2.6.1", - "@opentelemetry/exporter-logs-otlp-http": "^0.214.0", - "@opentelemetry/exporter-trace-otlp-grpc": "^0.57.0", - "@opentelemetry/resources": "^2.6.1", - "@opentelemetry/sdk-logs": "^0.214.0", - "@opentelemetry/sdk-metrics": "^2.6.1", - "@opentelemetry/sdk-trace-base": "^2.6.1", - "@opentelemetry/sdk-trace-node": "^2.6.1", - "@opentelemetry/semantic-conventions": "^1.40.0", - "ajv": "^8.17.0", - "auto-bind": "^5.0.1", - "axios": "^1.14.0", - "bidi-js": "^1.0.3", - "chalk": "^5.4.0", - "chokidar": "^4.0.0", - "cli-boxes": "^3.0.0", - "cli-highlight": "^2.1.0", - "code-excerpt": "^4.0.0", - "commander": "^12.0.0", - "diff": "^7.0.0", - "emoji-regex": "^10.4.0", - "env-paths": "^3.0.0", - "execa": "^9.5.0", - "fflate": "^0.8.2", - "figures": "^6.1.0", - "fuse.js": "^7.1.0", - "get-east-asian-width": "^1.3.0", - "google-auth-library": "^9.15.0", - "https-proxy-agent": "^7.0.6", - "ignore": "^7.0.0", - "indent-string": "^5.0.0", - "jsonc-parser": "^3.3.1", - "lodash-es": "^4.17.21", - "lru-cache": "^11.0.0", - "marked": "^15.0.0", - "p-map": "^7.0.3", - "picomatch": "^4.0.0", - "proper-lockfile": "^4.1.2", - "qrcode": "^1.5.4", - "react": "^19.2.4", - "react-compiler-runtime": "^1.0.0", - "react-reconciler": "^0.33.0", - "semver": "^7.6.3", - "shell-quote": "^1.8.2", - "signal-exit": "^4.1.0", - "stack-utils": "^2.0.6", - "strip-ansi": "^7.1.0", - "supports-hyperlinks": "^3.1.0", - "tree-kill": "^1.2.2", - "turndown": "^7.2.0", - "type-fest": "^4.30.0", - "undici": "^7.3.0", - "usehooks-ts": "^3.1.1", - "vscode-languageserver-protocol": "^3.17.5", - "wrap-ansi": "^9.0.0", - "ws": "^8.18.0", - "xss": "^1.0.15", - "yaml": "^2.7.0", - "zod": "^3.24.0", + "@alcalzone/ansi-tokenize": "0.3.0", + "@anthropic-ai/bedrock-sdk": "0.26.4", + "@anthropic-ai/foundry-sdk": "0.2.3", + "@anthropic-ai/sandbox-runtime": "0.0.46", + "@anthropic-ai/sdk": "0.81.0", + "@anthropic-ai/vertex-sdk": "0.14.4", + "@commander-js/extra-typings": "12.1.0", + "@growthbook/growthbook": "1.6.5", + "@modelcontextprotocol/sdk": "1.29.0", + "@opentelemetry/api": "1.9.1", + "@opentelemetry/api-logs": "0.214.0", + "@opentelemetry/core": "2.6.1", + "@opentelemetry/exporter-logs-otlp-http": "0.214.0", + "@opentelemetry/exporter-trace-otlp-grpc": "0.57.2", + "@opentelemetry/resources": "2.6.1", + "@opentelemetry/sdk-logs": "0.214.0", + "@opentelemetry/sdk-metrics": "2.6.1", + "@opentelemetry/sdk-trace-base": "2.6.1", + "@opentelemetry/sdk-trace-node": "2.6.1", + "@opentelemetry/semantic-conventions": "1.40.0", + "ajv": "8.18.0", + "auto-bind": "5.0.1", + "axios": "1.14.0", + "bidi-js": "1.0.3", + "chalk": "5.6.2", + "chokidar": "4.0.3", + "cli-boxes": "3.0.0", + "cli-highlight": "2.1.11", + "code-excerpt": "4.0.0", + "commander": "12.1.0", + "diff": "7.0.0", + "emoji-regex": "10.6.0", + "env-paths": "3.0.0", + "execa": "9.6.1", + "fflate": "0.8.2", + "figures": "6.1.0", + "fuse.js": "7.1.0", + "get-east-asian-width": "1.5.0", + "google-auth-library": "9.15.1", + "https-proxy-agent": "7.0.6", + "ignore": "7.0.5", + "indent-string": "5.0.0", + "jsonc-parser": "3.3.1", + "lodash-es": "4.17.23", + "lru-cache": "11.2.7", + "marked": "15.0.12", + "p-map": "7.0.4", + "picomatch": "4.0.4", + "proper-lockfile": "4.1.2", + "qrcode": "1.5.4", + "react": "19.2.4", + "react-compiler-runtime": "1.0.0", + "react-reconciler": "0.33.0", + "semver": "7.7.4", + "shell-quote": "1.8.3", + "signal-exit": "4.1.0", + "stack-utils": "2.0.6", + "strip-ansi": "7.2.0", + "supports-hyperlinks": "3.2.0", + "tree-kill": "1.2.2", + "turndown": "7.2.2", + "type-fest": "4.41.0", + "undici": "7.24.6", + "usehooks-ts": "3.1.1", + "vscode-languageserver-protocol": "3.17.5", + "wrap-ansi": "9.0.2", + "ws": "8.20.0", + "xss": "1.0.15", + "yaml": "2.8.3", + "zod": "3.25.76", }, "devDependencies": { - "@types/bun": "^1.2.0", - "@types/node": "^25.5.0", - "@types/react": "^19.2.14", - "typescript": "^5.7.0", + "@types/bun": "1.3.11", + "@types/node": "25.5.0", + "@types/react": "19.2.14", + "typescript": "5.9.3", }, }, }, diff --git a/package.json b/package.json index c70b2bed..5f5351b8 100644 --- a/package.json +++ b/package.json @@ -42,82 +42,82 @@ "prepack": "npm run build" }, "dependencies": { - "@alcalzone/ansi-tokenize": "^0.3.0", - "@anthropic-ai/bedrock-sdk": "^0.26.0", - "@anthropic-ai/foundry-sdk": "^0.2.0", - "@anthropic-ai/sandbox-runtime": "^0.0.46", - "@anthropic-ai/sdk": "^0.81.0", - "@anthropic-ai/vertex-sdk": "^0.14.0", - "@commander-js/extra-typings": "^12.0.0", - "@growthbook/growthbook": "^1.3.0", - "@modelcontextprotocol/sdk": "^1.12.0", - "@opentelemetry/api": "^1.9.1", - "@opentelemetry/api-logs": "^0.214.0", - "@opentelemetry/core": "^2.6.1", - "@opentelemetry/exporter-logs-otlp-http": "^0.214.0", - "@opentelemetry/exporter-trace-otlp-grpc": "^0.57.0", - "@opentelemetry/resources": "^2.6.1", - "@opentelemetry/sdk-logs": "^0.214.0", - "@opentelemetry/sdk-metrics": "^2.6.1", - "@opentelemetry/sdk-trace-base": "^2.6.1", - "@opentelemetry/sdk-trace-node": "^2.6.1", - "@opentelemetry/semantic-conventions": "^1.40.0", - "ajv": "^8.17.0", - "auto-bind": "^5.0.1", - "axios": "^1.14.0", - "bidi-js": "^1.0.3", - "chalk": "^5.4.0", - "chokidar": "^4.0.0", - "cli-boxes": "^3.0.0", - "cli-highlight": "^2.1.0", - "code-excerpt": "^4.0.0", - "commander": "^12.0.0", - "diff": "^7.0.0", - "emoji-regex": "^10.4.0", - "env-paths": "^3.0.0", - "execa": "^9.5.0", - "fflate": "^0.8.2", - "figures": "^6.1.0", - "fuse.js": "^7.1.0", - "get-east-asian-width": "^1.3.0", - "google-auth-library": "^9.15.0", - "https-proxy-agent": "^7.0.6", - "ignore": "^7.0.0", - "indent-string": "^5.0.0", - "jsonc-parser": "^3.3.1", - "lodash-es": "^4.17.21", - "lru-cache": "^11.0.0", - "marked": "^15.0.0", - "p-map": "^7.0.3", - "picomatch": "^4.0.0", - "proper-lockfile": "^4.1.2", - "qrcode": "^1.5.4", - "react": "^19.2.4", - "react-compiler-runtime": "^1.0.0", - "react-reconciler": "^0.33.0", - "semver": "^7.6.3", - "shell-quote": "^1.8.2", - "signal-exit": "^4.1.0", - "stack-utils": "^2.0.6", - "strip-ansi": "^7.1.0", - "supports-hyperlinks": "^3.1.0", - "tree-kill": "^1.2.2", - "turndown": "^7.2.0", - "type-fest": "^4.30.0", - "undici": "^7.3.0", - "usehooks-ts": "^3.1.1", - "vscode-languageserver-protocol": "^3.17.5", - "wrap-ansi": "^9.0.0", - "ws": "^8.18.0", - "xss": "^1.0.15", - "yaml": "^2.7.0", - "zod": "^3.24.0" + "@alcalzone/ansi-tokenize": "0.3.0", + "@anthropic-ai/bedrock-sdk": "0.26.4", + "@anthropic-ai/foundry-sdk": "0.2.3", + "@anthropic-ai/sandbox-runtime": "0.0.46", + "@anthropic-ai/sdk": "0.81.0", + "@anthropic-ai/vertex-sdk": "0.14.4", + "@commander-js/extra-typings": "12.1.0", + "@growthbook/growthbook": "1.6.5", + "@modelcontextprotocol/sdk": "1.29.0", + "@opentelemetry/api": "1.9.1", + "@opentelemetry/api-logs": "0.214.0", + "@opentelemetry/core": "2.6.1", + "@opentelemetry/exporter-logs-otlp-http": "0.214.0", + "@opentelemetry/exporter-trace-otlp-grpc": "0.57.2", + "@opentelemetry/resources": "2.6.1", + "@opentelemetry/sdk-logs": "0.214.0", + "@opentelemetry/sdk-metrics": "2.6.1", + "@opentelemetry/sdk-trace-base": "2.6.1", + "@opentelemetry/sdk-trace-node": "2.6.1", + "@opentelemetry/semantic-conventions": "1.40.0", + "ajv": "8.18.0", + "auto-bind": "5.0.1", + "axios": "1.14.0", + "bidi-js": "1.0.3", + "chalk": "5.6.2", + "chokidar": "4.0.3", + "cli-boxes": "3.0.0", + "cli-highlight": "2.1.11", + "code-excerpt": "4.0.0", + "commander": "12.1.0", + "diff": "7.0.0", + "emoji-regex": "10.6.0", + "env-paths": "3.0.0", + "execa": "9.6.1", + "fflate": "0.8.2", + "figures": "6.1.0", + "fuse.js": "7.1.0", + "get-east-asian-width": "1.5.0", + "google-auth-library": "9.15.1", + "https-proxy-agent": "7.0.6", + "ignore": "7.0.5", + "indent-string": "5.0.0", + "jsonc-parser": "3.3.1", + "lodash-es": "4.17.23", + "lru-cache": "11.2.7", + "marked": "15.0.12", + "p-map": "7.0.4", + "picomatch": "4.0.4", + "proper-lockfile": "4.1.2", + "qrcode": "1.5.4", + "react": "19.2.4", + "react-compiler-runtime": "1.0.0", + "react-reconciler": "0.33.0", + "semver": "7.7.4", + "shell-quote": "1.8.3", + "signal-exit": "4.1.0", + "stack-utils": "2.0.6", + "strip-ansi": "7.2.0", + "supports-hyperlinks": "3.2.0", + "tree-kill": "1.2.2", + "turndown": "7.2.2", + "type-fest": "4.41.0", + "undici": "7.24.6", + "usehooks-ts": "3.1.1", + "vscode-languageserver-protocol": "3.17.5", + "wrap-ansi": "9.0.2", + "ws": "8.20.0", + "xss": "1.0.15", + "yaml": "2.8.3", + "zod": "3.25.76" }, "devDependencies": { - "@types/bun": "^1.2.0", - "@types/node": "^25.5.0", - "@types/react": "^19.2.14", - "typescript": "^5.7.0" + "@types/bun": "1.3.11", + "@types/node": "25.5.0", + "@types/react": "19.2.14", + "typescript": "5.9.3" }, "engines": { "node": ">=20.0.0" From ffbc1f8f6e37d3a0f84a76d6ebecf7301676f7e5 Mon Sep 17 00:00:00 2001 From: Vasanthdev2004 Date: Thu, 2 Apr 2026 10:05:16 +0530 Subject: [PATCH 05/35] fix: support CSI-u printable input on Windows --- src/ink/parse-keypress.test.ts | 49 ++++++++++++++++++++++++++++++++++ src/ink/parse-keypress.ts | 28 ++++++++++++++++++- 2 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 src/ink/parse-keypress.test.ts diff --git a/src/ink/parse-keypress.test.ts b/src/ink/parse-keypress.test.ts new file mode 100644 index 00000000..23035286 --- /dev/null +++ b/src/ink/parse-keypress.test.ts @@ -0,0 +1,49 @@ +import { expect, test } from 'bun:test' + +import { + INITIAL_STATE, + parseMultipleKeypresses, + type ParsedKey, +} from './parse-keypress.ts' +import { InputEvent } from './events/input-event.ts' + +function parseInputEvent(sequence: string): InputEvent { + const [items] = parseMultipleKeypresses(INITIAL_STATE, sequence) + + expect(items).toHaveLength(1) + + const item = items[0] + expect(item?.kind).toBe('key') + + return new InputEvent(item as ParsedKey) +} + +test('treats CSI-u modifier 0 as unmodified printable input', () => { + const event = parseInputEvent('\x1b[47;0u') + + expect(event.input).toBe('/') + expect(event.key.ctrl).toBe(false) + expect(event.key.meta).toBe(false) + expect(event.key.shift).toBe(false) + expect(event.key.super).toBe(false) +}) + +test('preserves printable Unicode CSI-u input', () => { + const event = parseInputEvent('\x1b[231u') + + expect(event.input).toBe('ç') + expect(event.key.ctrl).toBe(false) + expect(event.key.meta).toBe(false) + expect(event.key.shift).toBe(false) + expect(event.key.super).toBe(false) +}) + +test('preserves printable Unicode CSI-u input with explicit modifier 0', () => { + const event = parseInputEvent('\x1b[231;0u') + + expect(event.input).toBe('ç') + expect(event.key.ctrl).toBe(false) + expect(event.key.meta).toBe(false) + expect(event.key.shift).toBe(false) + expect(event.key.super).toBe(false) +}) diff --git a/src/ink/parse-keypress.ts b/src/ink/parse-keypress.ts index a7e43adc..ac4162fd 100644 --- a/src/ink/parse-keypress.ts +++ b/src/ink/parse-keypress.ts @@ -468,7 +468,10 @@ function decodeModifier(modifier: number): { ctrl: boolean super: boolean } { - const m = modifier - 1 + // Some Windows VT stacks use 0 instead of 1 for an unmodified CSI-u key. + // Clamp to the protocol default so plain printable keys don't look like + // ctrl+meta+shift+super all at once. + const m = Math.max(modifier, 1) - 1 return { shift: !!(m & 1), meta: !!(m & 2), @@ -477,6 +480,14 @@ function decodeModifier(modifier: number): { } } +function isPrivateUseCodepoint(codepoint: number): boolean { + return ( + (codepoint >= 0xe000 && codepoint <= 0xf8ff) || + (codepoint >= 0xf0000 && codepoint <= 0xffffd) || + (codepoint >= 0x100000 && codepoint <= 0x10fffd) + ) +} + /** * Map keycode to key name for modifyOtherKeys/CSI u sequences. * Handles both ASCII keycodes and Kitty keyboard protocol functional keys. @@ -536,6 +547,21 @@ function keycodeToName(keycode: number): string | undefined { if (keycode >= 32 && keycode <= 126) { return String.fromCharCode(keycode).toLowerCase() } + + // CSI-u can carry printable Unicode codepoints directly on some + // Windows terminals and keyboard layouts. Keep kitty's private-use + // functional key range excluded so special keys still stay non-text. + if ( + keycode > 0x1f && + keycode !== 0x7f && + (keycode < 0x80 || keycode > 0x9f) && + keycode <= 0x10ffff && + (keycode < 0xd800 || keycode > 0xdfff) && + !isPrivateUseCodepoint(keycode) + ) { + return String.fromCodePoint(keycode) + } + return undefined } } From 4918caa22b9a7f52d5b0dc18b4c6718dd23e775f Mon Sep 17 00:00:00 2001 From: Dark Yagami Date: Thu, 2 Apr 2026 10:18:27 +0530 Subject: [PATCH 06/35] Update resume command in gracefulShutdown message --- src/utils/gracefulShutdown.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/gracefulShutdown.ts b/src/utils/gracefulShutdown.ts index 03e6233e..4e003000 100644 --- a/src/utils/gracefulShutdown.ts +++ b/src/utils/gracefulShutdown.ts @@ -173,7 +173,7 @@ function printResumeHint(): void { writeSync( 1, chalk.dim( - `\nResume this session with:\nclaude --resume ${resumeArg}\n`, + `\nResume this session with:\nopenclaude --resume ${resumeArg}\n`, ), ) resumeHintPrinted = true From 2bade922ef92915891d94ad80627676a37b33956 Mon Sep 17 00:00:00 2001 From: Vasanthdev2004 Date: Thu, 2 Apr 2026 10:19:36 +0530 Subject: [PATCH 07/35] fix: add clearer ripgrep install guidance --- README.md | 4 +++ src/utils/ripgrep.test.ts | 27 ++++++++++++++++++ src/utils/ripgrep.ts | 60 +++++++++++++++++++++++++++++++++++++-- 3 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 src/utils/ripgrep.test.ts diff --git a/README.md b/README.md index 5c94ed80..b7c058a9 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,10 @@ All of Claude Code's tools work — bash, file read/write/edit, grep, glob, agen npm install -g @gitlawb/openclaude ``` +If you install via npm and later see `ripgrep not found`, install ripgrep +system-wide and confirm `rg --version` works in the same terminal before +starting OpenClaude. + ### Option B: From source (requires Bun) Use Bun `1.3.11` or newer for source builds on Windows. Older Bun versions such as `1.3.4` can fail with a large batch of unresolved module errors during `bun run build`. diff --git a/src/utils/ripgrep.test.ts b/src/utils/ripgrep.test.ts new file mode 100644 index 00000000..6a17d753 --- /dev/null +++ b/src/utils/ripgrep.test.ts @@ -0,0 +1,27 @@ +import { expect, test } from 'bun:test' + +import { wrapRipgrepUnavailableError } from './ripgrep.ts' + +test('wrapRipgrepUnavailableError explains missing packaged fallback', () => { + const error = wrapRipgrepUnavailableError( + { code: 'ENOENT', message: 'spawn rg ENOENT' }, + { mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe' }, + 'win32', + ) + + expect(error.name).toBe('RipgrepUnavailableError') + expect(error.code).toBe('ENOENT') + expect(error.message).toContain('packaged ripgrep fallback') + expect(error.message).toContain('winget install BurntSushi.ripgrep.MSVC') +}) + +test('wrapRipgrepUnavailableError explains missing system ripgrep', () => { + const error = wrapRipgrepUnavailableError( + { code: 'ENOENT', message: 'spawn rg ENOENT' }, + { mode: 'system', command: 'rg' }, + 'linux', + ) + + expect(error.message).toContain('system ripgrep binary was not found on PATH') + expect(error.message).toContain('apt install ripgrep') +}) diff --git a/src/utils/ripgrep.ts b/src/utils/ripgrep.ts index 683da051..4bd95894 100644 --- a/src/utils/ripgrep.ts +++ b/src/utils/ripgrep.ts @@ -28,6 +28,8 @@ type RipgrepConfig = { argv0?: string } +type RipgrepErrorLike = Pick + const getRipgrepConfig = memoize((): RipgrepConfig => { const userWantsSystemRipgrep = isEnvDefinedFalsy( process.env.USE_BUILTIN_RIPGREP, @@ -105,6 +107,52 @@ export class RipgrepTimeoutError extends Error { } } +export class RipgrepUnavailableError extends Error { + code?: string | number + + constructor( + message: string, + public readonly config: Pick, + code?: string | number, + ) { + super(message) + this.name = 'RipgrepUnavailableError' + this.code = code + } +} + +function getRipgrepInstallHint(platform = process.platform): string { + switch (platform) { + case 'win32': + return 'Install ripgrep and confirm `rg --version` works in the same terminal. Windows: `winget install BurntSushi.ripgrep.MSVC` or `choco install ripgrep`.' + case 'darwin': + return 'Install ripgrep and confirm `rg --version` works in the same terminal. macOS: `brew install ripgrep`.' + default: + return 'Install ripgrep and confirm `rg --version` works in the same terminal. Linux: use your distro package manager, for example `apt install ripgrep`.' + } +} + +export function wrapRipgrepUnavailableError( + error: RipgrepErrorLike, + config = getRipgrepConfig(), + platform = process.platform, +): RipgrepUnavailableError { + const modeExplanation = + config.mode === 'builtin' + ? 'This install could not locate its packaged ripgrep fallback.' + : config.mode === 'system' + ? 'A working system ripgrep binary was not found on PATH.' + : 'The embedded ripgrep binary could not be started.' + + const originalMessage = error.message ? ` Original error: ${error.message}` : '' + + return new RipgrepUnavailableError( + `ripgrep (rg) is required for file search but could not be started. ${modeExplanation} ${getRipgrepInstallHint(platform)}${originalMessage}`, + config, + error.code, + ) +} + function ripGrepRaw( args: string[], target: string, @@ -275,7 +323,9 @@ async function ripGrepFileCount( child.on('error', err => { if (settled) return settled = true - reject(err) + reject( + err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err, + ) }) }) } @@ -337,7 +387,9 @@ export async function ripGrepStream( child.on('error', err => { if (settled) return settled = true - reject(err) + reject( + err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err, + ) }) }) } @@ -383,7 +435,9 @@ export async function ripGrep( // These should be surfaced to the user rather than silently returning empty results const CRITICAL_ERROR_CODES = ['ENOENT', 'EACCES', 'EPERM'] if (CRITICAL_ERROR_CODES.includes(error.code as string)) { - reject(error) + reject( + error.code === 'ENOENT' ? wrapRipgrepUnavailableError(error) : error, + ) return } From 0746802b6a08c00053a28b3519f7946995b239ad Mon Sep 17 00:00:00 2001 From: Mikey <5217366+BrainSlugs83@users.noreply.github.com> Date: Wed, 1 Apr 2026 21:29:12 -0700 Subject: [PATCH 08/35] security: kill GrowthBook phone-home and auto-updater at build time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a Bun build plugin that replaces analytics/telemetry modules with no-op stubs at compile time. Primary targets (NOT killed by PR #94 or the feature() shim): - GrowthBook: phones home to api.anthropic.com on every launch, sending account UUID, org UUID, email, device ID, subscription type. Refreshes every 6 hours. Now returns defaults without making any network call. - Auto-updater: contacts storage.googleapis.com and npm registry on launch to check for new versions. Now returns null/no-op. Defense-in-depth (already gated by PR #94 or feature flags, but now the code itself is replaced with empty functions): - Datadog, 1P event logging, BigQuery metrics, Perfetto tracing, session tracing, plugin fetch telemetry, transcript sharing. Deliberately NOT stubbed: - Plugin marketplace (downloads.claude.ai) — needed for /plugin - User-configurable OTel (CLAUDE_CODE_ENABLE_TELEMETRY) — opt-in Implementation: separate plugin file (scripts/no-telemetry-plugin.ts) with a 2-line hook in build.ts. The plugin file does not exist upstream so it cannot cause merge conflicts. --- scripts/build.ts | 2 + scripts/no-telemetry-plugin.ts | 221 +++++++++++++++++++++++++++++++++ 2 files changed, 223 insertions(+) create mode 100644 scripts/no-telemetry-plugin.ts diff --git a/scripts/build.ts b/scripts/build.ts index 0a00f2c9..137fadb6 100644 --- a/scripts/build.ts +++ b/scripts/build.ts @@ -9,6 +9,7 @@ */ import { readFileSync } from 'fs' +import { noTelemetryPlugin } from './no-telemetry-plugin' const pkg = JSON.parse(readFileSync('./package.json', 'utf-8')) const version = pkg.version @@ -64,6 +65,7 @@ const result = await Bun.build({ 'MACRO.NATIVE_PACKAGE_URL': 'undefined', }, plugins: [ + noTelemetryPlugin, { name: 'bun-bundle-shim', setup(build) { diff --git a/scripts/no-telemetry-plugin.ts b/scripts/no-telemetry-plugin.ts new file mode 100644 index 00000000..dc83b50f --- /dev/null +++ b/scripts/no-telemetry-plugin.ts @@ -0,0 +1,221 @@ +/** + * No-Telemetry Build Plugin for OpenClaude + * + * Replaces all analytics, telemetry, and phone-home modules with no-op stubs + * at compile time. Zero runtime cost, zero network calls to Anthropic. + * + * This file is NOT tracked upstream — merge conflicts are impossible. + * Only build.ts needs a one-line import + one-line array entry. + * + * Kills: + * - GrowthBook remote feature flags (api.anthropic.com) + * - Datadog event intake + * - 1P event logging (api.anthropic.com/api/event_logging/batch) + * - BigQuery metrics exporter (api.anthropic.com/api/claude_code/metrics) + * - Perfetto / OpenTelemetry session tracing + * - Auto-updater (storage.googleapis.com, npm registry) + * - Plugin fetch telemetry + * - Transcript / feedback sharing + */ + +import type { BunPlugin } from 'bun' + +// Module path (relative to src/, without extension) → stub source +const stubs: Record = { + + // ─── Analytics core ───────────────────────────────────────────── + + 'services/analytics/index': ` +export function stripProtoFields(metadata) { return metadata; } +export function attachAnalyticsSink() {} +export function logEvent() {} +export async function logEventAsync() {} +export function _resetForTesting() {} +`, + + 'services/analytics/growthbook': ` +const noop = () => {}; +export function onGrowthBookRefresh() { return noop; } +export function hasGrowthBookEnvOverride() { return false; } +export function getAllGrowthBookFeatures() { return {}; } +export function getGrowthBookConfigOverrides() { return {}; } +export function setGrowthBookConfigOverride() {} +export function clearGrowthBookConfigOverrides() {} +export function getApiBaseUrlHost() { return undefined; } +export const initializeGrowthBook = async () => null; +export async function getFeatureValue_DEPRECATED(feature, defaultValue) { return defaultValue; } +export function getFeatureValue_CACHED_MAY_BE_STALE(feature, defaultValue) { return defaultValue; } +export function getFeatureValue_CACHED_WITH_REFRESH(feature, defaultValue) { return defaultValue; } +export function checkStatsigFeatureGate_CACHED_MAY_BE_STALE() { return false; } +export async function checkSecurityRestrictionGate() { return false; } +export async function checkGate_CACHED_OR_BLOCKING() { return false; } +export function refreshGrowthBookAfterAuthChange() {} +export function resetGrowthBook() {} +export async function refreshGrowthBookFeatures() {} +export function setupPeriodicGrowthBookRefresh() {} +export function stopPeriodicGrowthBookRefresh() {} +export async function getDynamicConfig_BLOCKS_ON_INIT(configName, defaultValue) { return defaultValue; } +export function getDynamicConfig_CACHED_MAY_BE_STALE(configName, defaultValue) { return defaultValue; } +`, + + 'services/analytics/sink': ` +export function initializeAnalyticsGates() {} +export function initializeAnalyticsSink() {} +`, + + 'services/analytics/config': ` +export function isAnalyticsDisabled() { return true; } +export function isFeedbackSurveyDisabled() { return true; } +`, + + 'services/analytics/datadog': ` +export const initializeDatadog = async () => false; +export async function shutdownDatadog() {} +export async function trackDatadogEvent() {} +`, + + 'services/analytics/firstPartyEventLogger': ` +export function getEventSamplingConfig() { return {}; } +export function shouldSampleEvent() { return null; } +export async function shutdown1PEventLogging() {} +export function is1PEventLoggingEnabled() { return false; } +export function logEventTo1P() {} +export function logGrowthBookExperimentTo1P() {} +export function initialize1PEventLogging() {} +export async function reinitialize1PEventLoggingIfConfigChanged() {} +`, + + 'services/analytics/firstPartyEventLoggingExporter': ` +export class FirstPartyEventLoggingExporter { + constructor() {} + async export(logs, resultCallback) { resultCallback({ code: 0 }); } + async getQueuedEventCount() { return 0; } + async shutdown() {} + async forceFlush() {} +} +`, + + 'services/analytics/metadata': ` +export function sanitizeToolNameForAnalytics(toolName) { return toolName; } +export function isToolDetailsLoggingEnabled() { return false; } +export function isAnalyticsToolDetailsLoggingEnabled() { return false; } +export function mcpToolDetailsForAnalytics() { return {}; } +export function extractMcpToolDetails() { return undefined; } +export function extractSkillName() { return undefined; } +export function extractToolInputForTelemetry() { return undefined; } +export function getFileExtensionForAnalytics() { return undefined; } +export function getFileExtensionsFromBashCommand() { return undefined; } +export async function getEventMetadata() { return {}; } +export function to1PEventFormat() { return {}; } +`, + + // ─── Telemetry subsystems ─────────────────────────────────────── + + 'utils/telemetry/bigqueryExporter': ` +export class BigQueryMetricsExporter { + constructor() {} + async export(metrics, resultCallback) { resultCallback({ code: 0 }); } + async shutdown() {} + async forceFlush() {} + selectAggregationTemporality() { return 0; } +} +`, + + 'utils/telemetry/perfettoTracing': ` +export function initializePerfettoTracing() {} +export function isPerfettoTracingEnabled() { return false; } +export function registerAgent() {} +export function unregisterAgent() {} +export function startLLMRequestPerfettoSpan() { return ''; } +export function endLLMRequestPerfettoSpan() {} +export function startToolPerfettoSpan() { return ''; } +export function endToolPerfettoSpan() {} +export function startUserInputPerfettoSpan() { return ''; } +export function endUserInputPerfettoSpan() {} +export function emitPerfettoInstant() {} +export function emitPerfettoCounter() {} +export function startInteractionPerfettoSpan() { return ''; } +export function endInteractionPerfettoSpan() {} +export function getPerfettoEvents() { return []; } +export function resetPerfettoTracer() {} +export async function triggerPeriodicWriteForTesting() {} +export function evictStaleSpansForTesting() {} +export const MAX_EVENTS_FOR_TESTING = 0; +export function evictOldestEventsForTesting() {} +`, + + 'utils/telemetry/sessionTracing': ` +const noopSpan = { + end() {}, setAttribute() {}, setStatus() {}, + recordException() {}, addEvent() {}, isRecording() { return false; }, +}; +export function isBetaTracingEnabled() { return false; } +export function isEnhancedTelemetryEnabled() { return false; } +export function startInteractionSpan() { return noopSpan; } +export function endInteractionSpan() {} +export function startLLMRequestSpan() { return noopSpan; } +export function endLLMRequestSpan() {} +export function startToolSpan() { return noopSpan; } +export function startToolBlockedOnUserSpan() { return noopSpan; } +export function endToolBlockedOnUserSpan() {} +export function startToolExecutionSpan() { return noopSpan; } +export function endToolExecutionSpan() {} +export function endToolSpan() {} +export function addToolContentEvent() {} +export function getCurrentSpan() { return null; } +export async function executeInSpan(spanName, fn) { return fn(noopSpan); } +export function startHookSpan() { return noopSpan; } +export function endHookSpan() {} +`, + + // ─── Auto-updater (phones home to GCS + npm) ────────────────── + + 'utils/autoUpdater': ` +export async function assertMinVersion() {} +export async function getMaxVersion() { return undefined; } +export async function getMaxVersionMessage() { return undefined; } +export function shouldSkipVersion() { return true; } +export function getLockFilePath() { return '/tmp/openclaude-update.lock'; } +export async function checkGlobalInstallPermissions() { return { hasPermissions: false, npmPrefix: null }; } +export async function getLatestVersion() { return null; } +export async function getNpmDistTags() { return { latest: null, stable: null }; } +export async function getLatestVersionFromGcs() { return null; } +export async function getGcsDistTags() { return { latest: null, stable: null }; } +export async function getVersionHistory() { return []; } +export async function installGlobalPackage() { return 'success'; } +`, + + // ─── Plugin fetch telemetry (not the marketplace itself) ─────── + + 'utils/plugins/fetchTelemetry': ` +export function logPluginFetch() {} +export function classifyFetchError() { return 'disabled'; } +`, + + // ─── Transcript / feedback sharing ───────────────────────────── + + 'components/FeedbackSurvey/submitTranscriptShare': ` +export async function submitTranscriptShare() { return { success: false }; } +`, +} + +export const noTelemetryPlugin: BunPlugin = { + name: 'no-telemetry', + setup(build) { + for (const [modulePath, contents] of Object.entries(stubs)) { + // Build regex that matches the resolved file path on any OS + // e.g. "services/analytics/growthbook" → /services[/\\]analytics[/\\]growthbook\.(ts|js)$/ + const escaped = modulePath + .replace(/\//g, '[/\\\\]') + .replace(/\./g, '\\.') + const filter = new RegExp(`${escaped}\\.(ts|js)$`) + + build.onLoad({ filter }, () => ({ + contents, + loader: 'js', + })) + } + + console.log(` 🔇 no-telemetry: stubbed ${Object.keys(stubs).length} modules`) + }, +} From 9590066b5b03e772f33b27ad33b7581fd5a487c3 Mon Sep 17 00:00:00 2001 From: Raj Rasane Date: Thu, 2 Apr 2026 10:18:52 +0530 Subject: [PATCH 09/35] fix: gracefully handle Docker/remote Ollama in system-check When Ollama runs inside Docker or a remote container, the native 'ollama ps' command is unavailable on the host. Instead of hard-failing and blocking CLI startup, downgrade to a pass() with a warning when the HTTP ping has already confirmed the server is reachable. --- scripts/system-check.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/system-check.ts b/scripts/system-check.ts index e129685a..2e12da5a 100644 --- a/scripts/system-check.ts +++ b/scripts/system-check.ts @@ -289,7 +289,7 @@ function checkOllamaProcessorMode(): CheckResult { if (result.status !== 0) { const detail = (result.stderr || result.stdout || 'Unable to run ollama ps').trim() - return fail('Ollama processor mode', detail) + return pass('Ollama processor mode', `Native CLI check failed (${detail}). Assuming valid Docker/remote backend since HTTP ping passed.`) } const output = (result.stdout || '').trim() From 310f1d344ad10667f9738d3e459092cb9d52f1dd Mon Sep 17 00:00:00 2001 From: Raj Rasane Date: Thu, 2 Apr 2026 10:26:06 +0530 Subject: [PATCH 10/35] fix: provide local session title fallback for 3P providers When using non-Anthropic providers (Ollama, Gemini, Codex), the underlying call to queryHaiku for session title generation fails. Previously, this caused the catch block to return null, leaving the terminal tab permanently stuck on 'Claude Code'. Now, when the API call fails, we gracefully derive a title locally from the user's first message (first 7 words, sentence-cased), ensuring users still see a meaningful session title in their terminal tab. --- src/utils/sessionTitle.ts | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/utils/sessionTitle.ts b/src/utils/sessionTitle.ts index 5a722c88..72ae8054 100644 --- a/src/utils/sessionTitle.ts +++ b/src/utils/sessionTitle.ts @@ -124,6 +124,29 @@ export async function generateSessionTitle( level: 'error', }) logEvent('tengu_session_title_generated', { success: false }) - return null + + // Fallback: derive a title locally from the user's first message. + // This ensures 3P providers (Ollama, Gemini, OpenAI) still get + // meaningful terminal titles when the Haiku API call is unavailable. + return localFallbackTitle(trimmed) } } + +/** + * Fallback local title generator for when the Haiku API is unavailable + * (e.g. when using third-party providers without an Anthropic API key). + */ +function localFallbackTitle(text: string): string | null { + const words = text.split(/\s+/).slice(0, 7) + if (words.length === 0) return null + + // Create a sentence-case string + let fallback = words.join(' ') + if (fallback.length > 50) { + fallback = fallback.substring(0, 49) + '…' + } + + if (fallback.length <= 3) return null + + return fallback.charAt(0).toUpperCase() + fallback.slice(1) +} From 302d9d4e44925c029703b0dd32a93932d8b33aba Mon Sep 17 00:00:00 2001 From: Raj Rasane Date: Thu, 2 Apr 2026 10:33:56 +0530 Subject: [PATCH 11/35] fix: enable session title generation for non-firstParty providers --- src/screens/REPL.tsx | 618 +++++++++++++++++++++---------------------- 1 file changed, 309 insertions(+), 309 deletions(-) diff --git a/src/screens/REPL.tsx b/src/screens/REPL.tsx index 9fdd3b11..ef1513aa 100644 --- a/src/screens/REPL.tsx +++ b/src/screens/REPL.tsx @@ -97,8 +97,8 @@ import { logError } from '../utils/log.js'; /* eslint-disable custom-rules/no-process-env-top-level, @typescript-eslint/no-require-imports */ const useVoiceIntegration: typeof import('../hooks/useVoiceIntegration.js').useVoiceIntegration = feature('VOICE_MODE') ? require('../hooks/useVoiceIntegration.js').useVoiceIntegration : () => ({ stripTrailing: () => 0, - handleKeyEvent: () => {}, - resetAnchor: () => {} + handleKeyEvent: () => { }, + resetAnchor: () => { } }); const VoiceKeybindingHandler: typeof import('../hooks/useVoiceIntegration.js').VoiceKeybindingHandler = feature('VOICE_MODE') ? require('../hooks/useVoiceIntegration.js').VoiceKeybindingHandler : () => null; // Frustration detection is ant-only (dogfooding). Conditional require so external @@ -106,11 +106,11 @@ const VoiceKeybindingHandler: typeof import('../hooks/useVoiceIntegration.js').V // on every messages change, plus the GrowthBook fetch). const useFrustrationDetection: typeof import('../components/FeedbackSurvey/useFrustrationDetection.js').useFrustrationDetection = "external" === 'ant' ? require('../components/FeedbackSurvey/useFrustrationDetection.js').useFrustrationDetection : () => ({ state: 'closed', - handleTranscriptSelect: () => {} + handleTranscriptSelect: () => { } }); // Ant-only org warning. Conditional require so the org UUID list is // eliminated from external builds (one UUID is on excluded-strings). -const useAntOrgWarningNotification: typeof import('../hooks/notifs/useAntOrgWarningNotification.js').useAntOrgWarningNotification = "external" === 'ant' ? require('../hooks/notifs/useAntOrgWarningNotification.js').useAntOrgWarningNotification : () => {}; +const useAntOrgWarningNotification: typeof import('../hooks/notifs/useAntOrgWarningNotification.js').useAntOrgWarningNotification = "external" === 'ant' ? require('../hooks/notifs/useAntOrgWarningNotification.js').useAntOrgWarningNotification : () => { }; // Dead code elimination: conditional import for coordinator mode const getCoordinatorUserContext: (mcpClients: ReadonlyArray<{ name: string; @@ -192,7 +192,7 @@ import { useInboxPoller } from '../hooks/useInboxPoller.js'; // Dead code elimination: conditional import for loop mode /* eslint-disable @typescript-eslint/no-require-imports */ const proactiveModule = feature('PROACTIVE') || feature('KAIROS') ? require('../proactive/index.js') : null; -const PROACTIVE_NO_OP_SUBSCRIBE = (_cb: () => void) => () => {}; +const PROACTIVE_NO_OP_SUBSCRIBE = (_cb: () => void) => () => { }; const PROACTIVE_FALSE = () => false; const SUGGEST_BG_PR_NOOP = (_p: string, _n: string): boolean => false; const useProactive = feature('PROACTIVE') || feature('KAIROS') ? require('../proactive/useProactive.js').useProactive : null; @@ -297,7 +297,7 @@ const EMPTY_MCP_CLIENTS: MCPServerConnection[] = []; // Stable stub for useAssistantHistory's non-KAIROS branch — avoids a new // function identity each render, which would break composedOnScroll's memo. const HISTORY_STUB = { - maybeLoadOlder: (_: ScrollBoxHandle) => {} + maybeLoadOlder: (_: ScrollBoxHandle) => { } }; // Window after a user-initiated scroll during which type-into-empty does NOT // repin to bottom. Josh Rosen's workflow: Claude emits long output → scroll @@ -448,28 +448,28 @@ function TranscriptSearchBar({ const off = cursorOffset; const cursorChar = off < query.length ? query[off] : ' '; return - / - {query.slice(0, off)} - {cursorChar} - {off < query.length && {query.slice(off + 1)}} - - {indexStatus === 'building' ? indexing… : indexStatus ? indexed in {indexStatus.ms}ms : count === 0 && query ? no matches : count > 0 ? - // Engine-counted (indexOf on extractSearchText). May drift from - // render-count for ghost/phantom messages — badge is a rough - // location hint. scanElement gives exact per-message positions - // but counting ALL would cost ~1-3ms × matched-messages. - - {current}/{count} - {' '} - : null} - ; + // applySearchHighlight scans the whole screen buffer. The query + // text rendered here IS on screen — /foo matches its own 'foo' in + // the bar. With no content matches that's the ONLY visible match → + // gets CURRENT → underlined. noSelect makes searchHighlight.ts:76 + // skip these cells (same exclusion as gutters). You can't text- + // select the bar either; it's transient chrome, fine. + noSelect> + / + {query.slice(0, off)} + {cursorChar} + {off < query.length && {query.slice(off + 1)}} + + {indexStatus === 'building' ? indexing… : indexStatus ? indexed in {indexStatus.ms}ms : count === 0 && query ? no matches : count > 0 ? + // Engine-counted (indexOf on extractSearchText). May drift from + // render-count for ghost/phantom messages — badge is a rough + // location hint. scanElement gives exact per-message positions + // but counting ALL would cost ~1-3ms × matched-messages. + + {current}/{count} + {' '} + : null} + ; } const TITLE_ANIMATION_FRAMES = ['⠂', '⠐']; const TITLE_STATIC_PREFIX = '✳'; @@ -605,8 +605,8 @@ export function REPL({ const moreRightEnabled = useMemo(() => "external" === 'ant' && isEnvTruthy(process.env.CLAUDE_MORERIGHT), []); const disableVirtualScroll = useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_DISABLE_VIRTUAL_SCROLL), []); const disableMessageActions = feature('MESSAGE_ACTIONS') ? - // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant - useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_DISABLE_MESSAGE_ACTIONS), []) : false; + // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant + useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_DISABLE_MESSAGE_ACTIONS), []) : false; // Agent definition is state so /resume can update it mid-session const [mainThreadAgentDefinition, setMainThreadAgentDefinition] = useState(initialMainThreadAgentDefinition); @@ -865,11 +865,11 @@ export function REPL({ // Ref for the bridge result callback — set after useReplBridge initializes, // read in the onQuery finally block to notify mobile clients that a turn ended. - const sendBridgeResultRef = useRef<() => void>(() => {}); + const sendBridgeResultRef = useRef<() => void>(() => { }); // Ref for the synchronous restore callback — set after restoreMessageSync is // defined, read in the onQuery finally block for auto-restore on interrupt. - const restoreMessageSyncRef = useRef<(m: UserMessage) => void>(() => {}); + const restoreMessageSyncRef = useRef<(m: UserMessage) => void>(() => { }); // Ref to the fullscreen layout's scroll box for keyboard scrolling. // Null when fullscreen mode is disabled (ref never attached). @@ -1246,8 +1246,8 @@ export function REPL({ const cursorNavRef = useRef(null); // Memoized so Messages' React.memo holds. const unseenDivider = useMemo(() => computeUnseenDivider(messages, dividerIndex), - // eslint-disable-next-line react-hooks/exhaustive-deps -- length change covers appends; useUnseenDivider's count-drop guard clears dividerIndex on replace/rewind - [dividerIndex, messages.length]); + // eslint-disable-next-line react-hooks/exhaustive-deps -- length change covers appends; useUnseenDivider's count-drop guard clears dividerIndex on replace/rewind + [dividerIndex, messages.length]); // Re-pin scroll to bottom and clear the unseen-messages baseline. Called // on any user-driven return-to-live action (submit, type-into-empty, // overlay appear/dismiss). @@ -1276,13 +1276,13 @@ export function REPL({ const { maybeLoadOlder } = feature('KAIROS') ? - // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant - useAssistantHistory({ - config: remoteSessionConfig, - setMessages, - scrollRef, - onPrepend: shiftDivider - }) : HISTORY_STUB; + // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant + useAssistantHistory({ + config: remoteSessionConfig, + setMessages, + scrollRef, + onPrepend: shiftDivider + }) : HISTORY_STUB; // Compose useUnseenDivider's callbacks with the lazy-load trigger. const composedOnScroll = useCallback((sticky: boolean, handle: ScrollBoxHandle) => { lastUserScrollTsRef.current = Date.now(); @@ -1593,12 +1593,12 @@ export function REPL({ swarmStartTimeRef.current = null; swarmBudgetInfoRef.current = undefined; setMessages(prev => [...prev, createTurnDurationMessage(totalMs, deferredBudget, - // Count only what recordTranscript will persist — ephemeral - // progress ticks and non-ant attachments are filtered by - // isLoggableMessage and never reach disk. Using raw prev.length - // would make checkResumeConsistency report false delta<0 for - // every turn that ran a progress-emitting tool. - count(prev, isLoggableMessage))]); + // Count only what recordTranscript will persist — ephemeral + // progress ticks and non-ant attachments are filtered by + // isLoggableMessage and never reach disk. Using raw prev.length + // would make checkResumeConsistency report false delta<0 for + // every turn that ran a progress-emitting tool. + count(prev, isLoggableMessage))]); } }, [hasRunningTeammates, setMessages]); @@ -1665,19 +1665,19 @@ export function REPL({ setToolJSX }); const showSpinner = (!toolJSX || toolJSX.showSpinner === true) && toolUseConfirmQueue.length === 0 && promptQueue.length === 0 && ( - // Show spinner during input processing, API call, while teammates are running, - // or while pending task notifications are queued (prevents spinner bounce between consecutive notifications) - isLoading || userInputOnProcessing || hasRunningTeammates || - // Keep spinner visible while task notifications are queued for processing. - // Without this, the spinner briefly disappears between consecutive notifications - // (e.g., multiple background agents completing in rapid succession) because - // isLoading goes false momentarily between processing each one. - getCommandQueueLength() > 0) && - // Hide spinner when waiting for leader to approve permission request - !pendingWorkerRequest && !onlySleepToolActive && ( - // Hide spinner when streaming text is visible (the text IS the feedback), - // but keep it when isBriefOnly suppresses the streaming text display - !visibleStreamingText || isBriefOnly); + // Show spinner during input processing, API call, while teammates are running, + // or while pending task notifications are queued (prevents spinner bounce between consecutive notifications) + isLoading || userInputOnProcessing || hasRunningTeammates || + // Keep spinner visible while task notifications are queued for processing. + // Without this, the spinner briefly disappears between consecutive notifications + // (e.g., multiple background agents completing in rapid succession) because + // isLoading goes false momentarily between processing each one. + getCommandQueueLength() > 0) && + // Hide spinner when waiting for leader to approve permission request + !pendingWorkerRequest && !onlySleepToolActive && ( + // Hide spinner when streaming text is visible (the text IS the feedback), + // but keep it when isBriefOnly suppresses the streaming text display + !visibleStreamingText || isBriefOnly); // Check if any permission or ask question prompt is currently visible // This is used to prevent the survey from opening while prompts are active @@ -2323,9 +2323,9 @@ export function REPL({ addNotification({ key: 'sandbox-unavailable', jsx: <> - sandbox disabled - · /sandbox - , + sandbox disabled + · /sandbox + , priority: 'medium' }); }, [addNotification]); @@ -2676,7 +2676,7 @@ export function REPL({ // useDeferredHookMessages) and attachment messages (appended by // processTextPrompt) — both pushed length past 1 on turn one, so the // title silently fell through to the "Claude Code" default. - if (getAPIProvider() === 'firstParty' && !titleDisabled && !sessionTitle && !agentTitle && !haikuTitleAttemptedRef.current) { + if (!titleDisabled && !sessionTitle && !agentTitle && !haikuTitleAttemptedRef.current) { const firstUserMessage = newMessages.find(m => m.type === 'user' && !m.isMeta); const text = firstUserMessage?.type === 'user' ? getContentText(firstUserMessage.message.content) : null; // Skip synthetic breadcrumbs — slash-command output, prompt-skill @@ -2686,7 +2686,7 @@ export function REPL({ if (text && !text.startsWith(`<${LOCAL_COMMAND_STDOUT_TAG}>`) && !text.startsWith(`<${COMMAND_MESSAGE_TAG}>`) && !text.startsWith(`<${COMMAND_NAME_TAG}>`) && !text.startsWith(`<${BASH_INPUT_TAG}>`)) { haikuTitleAttemptedRef.current = true; void generateSessionTitle(text, new AbortController().signal).then(title => { - if (title) setHaikuTitle(title);else haikuTitleAttemptedRef.current = false; + if (title) setHaikuTitle(title); else haikuTitleAttemptedRef.current = false; }, () => { haikuTitleAttemptedRef.current = false; }); @@ -2760,11 +2760,11 @@ export function REPL({ }); } queryCheckpoint('query_context_loading_start'); - const [,, defaultSystemPrompt, baseUserContext, systemContext] = await Promise.all([ - // IMPORTANT: do this after setMessages() above, to avoid UI jank - checkAndDisableBypassPermissionsIfNeeded(toolPermissionContext, setAppState), - // Gated on TRANSCRIPT_CLASSIFIER so GrowthBook kill switch runs wherever auto mode is built in - feature('TRANSCRIPT_CLASSIFIER') ? checkAndDisableAutoModeIfNeeded(toolPermissionContext, setAppState, store.getState().fastMode) : undefined, getSystemPrompt(freshTools, mainLoopModelParam, Array.from(toolPermissionContext.additionalWorkingDirectories.keys()), freshMcpClients), getUserContext(), getSystemContext()]); + const [, , defaultSystemPrompt, baseUserContext, systemContext] = await Promise.all([ + // IMPORTANT: do this after setMessages() above, to avoid UI jank + checkAndDisableBypassPermissionsIfNeeded(toolPermissionContext, setAppState), + // Gated on TRANSCRIPT_CLASSIFIER so GrowthBook kill switch runs wherever auto mode is built in + feature('TRANSCRIPT_CLASSIFIER') ? checkAndDisableAutoModeIfNeeded(toolPermissionContext, setAppState, store.getState().fastMode) : undefined, getSystemPrompt(freshTools, mainLoopModelParam, Array.from(toolPermissionContext.additionalWorkingDirectories.keys()), freshMcpClients), getUserContext(), getSystemContext()]); const userContext = { ...baseUserContext, ...getCoordinatorUserContext(freshMcpClients, isScratchpadEnabled() ? getScratchpadDir() : undefined), @@ -3110,9 +3110,9 @@ export function REPL({ if (typeof content === 'string' && !initialMsg.message.planContent) { // Route through onSubmit for proper processing including UserPromptSubmit hooks void onSubmit(content, { - setCursorOffset: () => {}, - clearBuffer: () => {}, - resetHistory: () => {} + setCursorOffset: () => { }, + clearBuffer: () => { }, + resetHistory: () => { } }); } else { // Plan messages or complex content (images, etc.) - send directly to model @@ -3121,10 +3121,10 @@ export function REPL({ const newAbortController = createAbortController(); setAbortController(newAbortController); void onQuery([initialMsg.message], newAbortController, true, - // shouldQuery - [], - // additionalAllowedTools - mainLoopModel); + // shouldQuery + [], + // additionalAllowedTools + mainLoopModel); } // Reset ref after a delay to allow new initial messages @@ -3526,18 +3526,18 @@ export function REPL({ setStashedPrompt(undefined); } }, [queryGuard, - // isLoading is read at the !isLoading checks above for input-clearing - // and submitCount gating. It's derived from isQueryActive || isExternalLoading, - // so including it here ensures the closure captures the fresh value. - isLoading, isExternalLoading, inputMode, commands, setInputValue, setInputMode, setPastedContents, setSubmitCount, setIDESelection, setToolJSX, getToolUseContext, - // messages is read via messagesRef.current inside the callback to - // keep onSubmit stable across message updates (see L2384/L2400/L2662). - // Without this, each setMessages call (~30× per turn) recreates - // onSubmit, pinning the REPL render scope (1776B) + that render's - // messages array in downstream closures (PromptInput, handleAutoRunIssue). - // Heap analysis showed ~9 REPL scopes and ~15 messages array versions - // accumulating after #20174/#20175, all traced to this dep. - mainLoopModel, pastedContents, ideSelection, setUserInputOnProcessing, setAbortController, addNotification, onQuery, stashedPrompt, setStashedPrompt, setAppState, onBeforeQuery, canUseTool, remoteSession, setMessages, awaitPendingHooks, repinScroll]); + // isLoading is read at the !isLoading checks above for input-clearing + // and submitCount gating. It's derived from isQueryActive || isExternalLoading, + // so including it here ensures the closure captures the fresh value. + isLoading, isExternalLoading, inputMode, commands, setInputValue, setInputMode, setPastedContents, setSubmitCount, setIDESelection, setToolJSX, getToolUseContext, + // messages is read via messagesRef.current inside the callback to + // keep onSubmit stable across message updates (see L2384/L2400/L2662). + // Without this, each setMessages call (~30× per turn) recreates + // onSubmit, pinning the REPL render scope (1776B) + that render's + // messages array in downstream closures (PromptInput, handleAutoRunIssue). + // Heap analysis showed ~9 REPL scopes and ~15 messages array versions + // accumulating after #20174/#20175, all traced to this dep. + mainLoopModel, pastedContents, ideSelection, setUserInputOnProcessing, setAbortController, addNotification, onQuery, stashedPrompt, setStashedPrompt, setAppState, onBeforeQuery, canUseTool, remoteSession, setMessages, awaitPendingHooks, repinScroll]); // Callback for when user submits input while viewing a teammate's transcript const onAgentSubmit = useCallback(async (input: string, task: InProcessTeammateTaskState | LocalAgentTaskState, helpers: PromptInputHelpers) => { @@ -3558,8 +3558,8 @@ export function REPL({ addNotification({ key: `resume-agent-failed-${task.id}`, jsx: - Failed to resume agent: {errorMessage(err)} - , + Failed to resume agent: {errorMessage(err)} + , priority: 'low' }); }); @@ -3577,9 +3577,9 @@ export function REPL({ const command = autoRunIssueReason ? getAutoRunCommand(autoRunIssueReason) : '/issue'; setAutoRunIssueReason(null); // Clear the state onSubmit(command, { - setCursorOffset: () => {}, - clearBuffer: () => {}, - resetHistory: () => {} + setCursorOffset: () => { }, + clearBuffer: () => { }, + resetHistory: () => { } }).catch(err => { logForDebugging(`Auto-run ${command} failed: ${errorMessage(err)}`); }); @@ -3592,9 +3592,9 @@ export function REPL({ const handleSurveyRequestFeedback = useCallback(() => { const command = "external" === 'ant' ? '/issue' : '/feedback'; onSubmit(command, { - setCursorOffset: () => {}, - clearBuffer: () => {}, - resetHistory: () => {} + setCursorOffset: () => { }, + clearBuffer: () => { }, + resetHistory: () => { } }).catch(err => { logForDebugging(`Survey feedback request failed: ${err instanceof Error ? err.message : String(err)}`); }); @@ -3609,9 +3609,9 @@ export function REPL({ onSubmitRef.current = onSubmit; const handleOpenRateLimitOptions = useCallback(() => { void onSubmitRef.current('/rate-limit-options', { - setCursorOffset: () => {}, - clearBuffer: () => {}, - resetHistory: () => {} + setCursorOffset: () => { }, + clearBuffer: () => { }, + resetHistory: () => { } }); }, []); const handleExit = useCallback(async () => { @@ -3628,14 +3628,14 @@ export function REPL({ } const showWorktree = getCurrentWorktreeSession() !== null; if (showWorktree) { - setExitFlow( {}} onCancel={() => { + setExitFlow( { }} onCancel={() => { setExitFlow(null); setIsExiting(false); }} />); return; } const exitMod = await exit.load(); - const exitFlowResult = await exitMod.call(() => {}); + const exitFlowResult = await exitMod.call(() => { }); setExitFlow(exitFlowResult); // If call() returned without killing the process (bg session detach), // clear isExiting so the UI is usable on reattach. No-op on the normal @@ -3749,18 +3749,18 @@ export function REPL({ }; const messageActionCaps: MessageActionCaps = { copy: text => - // setClipboard RETURNS OSC 52 — caller must stdout.write (tmux side-effects load-buffer, but that's tmux-only). - void setClipboard(text).then(raw => { - if (raw) process.stdout.write(raw); - addNotification({ - // Same key as text-selection copy — repeated copies replace toast, don't queue. - key: 'selection-copied', - text: 'copied', - color: 'success', - priority: 'immediate', - timeoutMs: 2000 - }); - }), + // setClipboard RETURNS OSC 52 — caller must stdout.write (tmux side-effects load-buffer, but that's tmux-only). + void setClipboard(text).then(raw => { + if (raw) process.stdout.write(raw); + addNotification({ + // Same key as text-selection copy — repeated copies replace toast, don't queue. + key: 'selection-copied', + text: 'copied', + color: 'success', + priority: 'immediate', + timeoutMs: 2000 + }); + }), edit: async msg => { // Same skip-confirm check as /rewind: lossless → direct, else confirm dialog. const rawIdx = findRawIndex(msg.uuid); @@ -3856,14 +3856,14 @@ export function REPL({ const executeQueuedInput = useCallback(async (queuedCommands: QueuedCommand[]) => { await handlePromptSubmit({ helpers: { - setCursorOffset: () => {}, - clearBuffer: () => {}, - resetHistory: () => {} + setCursorOffset: () => { }, + clearBuffer: () => { }, + resetHistory: () => { } }, queryGuard, commands, - onInputChange: () => {}, - setPastedContents: () => {}, + onInputChange: () => { }, + setPastedContents: () => { }, setToolJSX, getToolUseContext, messages, @@ -3924,8 +3924,8 @@ export function REPL({ // User hasn't interacted since response ended, check other conditions const idleTimeSinceResponse = Date.now() - lastQueryCompletionTime; if (!isLoading && !toolJSX && - // Use ref to get current dialog state, avoiding stale closure - focusedInputDialogRef.current === undefined && idleTimeSinceResponse >= getGlobalConfig().messageIdleNotifThresholdMs) { + // Use ref to get current dialog state, avoiding stale closure + focusedInputDialogRef.current === undefined && idleTimeSinceResponse >= getGlobalConfig().messageIdleNotifThresholdMs) { void sendNotification({ message: 'Claude is waiting for your input', notificationType: 'idle_prompt' @@ -3957,13 +3957,13 @@ export function REPL({ addNotif({ key: 'idle-return-hint', jsx: mode === 'hint_v2' ? <> - new task? - /clear - to save - {formattedTokens} tokens - : - new task? /clear to save {formattedTokens} tokens - , + new task? + /clear + to save + {formattedTokens} tokens + : + new task? /clear to save {formattedTokens} tokens + , priority: 'medium', // Persist until submit — the hint fires at T+75min idle, user may // not return for hours. removeNotification in useEffect cleanup @@ -4015,17 +4015,17 @@ export function REPL({ // Voice input integration (VOICE_MODE builds only) const voice = feature('VOICE_MODE') ? - // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant - useVoiceIntegration({ - setInputValueRaw, - inputValueRef, - insertTextRef - }) : { - stripTrailing: () => 0, - handleKeyEvent: () => {}, - resetAnchor: () => {}, - interimRange: null - }; + // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant + useVoiceIntegration({ + setInputValueRaw, + inputValueRef, + insertTextRef + }) : { + stripTrailing: () => 0, + handleKeyEvent: () => { }, + resetAnchor: () => { }, + interimRange: null + }; useInboxPoller({ enabled: isAgentSwarmsEnabled(), isLoading, @@ -4228,11 +4228,11 @@ export function REPL({ event.stopImmediatePropagation(); } }, - // Search needs virtual scroll (jumpRef drives VirtualMessageList). [ - // kills it, so !dumpMode — after [ there's nothing to jump in. - { - isActive: screen === 'transcript' && virtualScrollActive && !searchOpen && !dumpMode - }); + // Search needs virtual scroll (jumpRef drives VirtualMessageList). [ + // kills it, so !dumpMode — after [ there's nothing to jump in. + { + isActive: screen === 'transcript' && virtualScrollActive && !searchOpen && !dumpMode + }); const { setQuery: setHighlight, scanElement, @@ -4323,12 +4323,12 @@ export function REPL({ })(); } }, - // !searchOpen: typing 'v' or '[' in the search bar is search input, not - // a command. No !dumpMode here — v should work after [ (the [ handler - // guards itself inline). - { - isActive: screen === 'transcript' && virtualScrollActive && !searchOpen - }); + // !searchOpen: typing 'v' or '[' in the search bar is search input, not + // a command. No !dumpMode here — v should work after [ (the [ handler + // guards itself inline). + { + isActive: screen === 'transcript' && virtualScrollActive && !searchOpen + }); // Fresh `less` per transcript entry. Prevents stale highlights matching // unrelated normal-mode text (overlay is alt-screen-global) and avoids @@ -4396,78 +4396,78 @@ export function REPL({ const transcriptScrollRef = isFullscreenEnvEnabled() && !disableVirtualScroll && !dumpMode ? scrollRef : undefined; const transcriptMessagesElement = ; const transcriptToolJSX = toolJSX && - {toolJSX.jsx} - ; + {toolJSX.jsx} + ; const transcriptReturn = - - - {feature('VOICE_MODE') ? : null} - - {transcriptScrollRef ? - // ScrollKeybindingHandler must mount before CancelRequestHandler so - // ctrl+c-with-selection copies instead of cancelling the active task. - // Its raw useInput handler only stops propagation when a selection - // exists — without one, ctrl+c falls through to CancelRequestHandler. - jumpRef.current?.disarmSearch()} /> : null} - - {transcriptScrollRef ? - {transcriptMessagesElement} - {transcriptToolJSX} - - } bottom={searchOpen ? { - // Enter — commit. 0-match guard: junk query shouldn't - // persist (badge hidden, n/N dead anyway). - setSearchQuery(searchCount > 0 ? q : ''); - setSearchOpen(false); - // onCancel path: bar unmounts before its useEffect([query]) - // can fire with ''. Without this, searchCount stays stale - // (n guard at :4956 passes) and VML's matches[] too - // (nextMatch walks the old array). Phantom nav, no - // highlight. onExit (Enter, q non-empty) still commits. - if (!q) { - setSearchCount(0); - setSearchCurrent(0); + + + {feature('VOICE_MODE') ? : null} + + {transcriptScrollRef ? + // ScrollKeybindingHandler must mount before CancelRequestHandler so + // ctrl+c-with-selection copies instead of cancelling the active task. + // Its raw useInput handler only stops propagation when a selection + // exists — without one, ctrl+c falls through to CancelRequestHandler. + jumpRef.current?.disarmSearch()} /> : null} + + {transcriptScrollRef ? + {transcriptMessagesElement} + {transcriptToolJSX} + + } bottom={searchOpen ? { + // Enter — commit. 0-match guard: junk query shouldn't + // persist (badge hidden, n/N dead anyway). + setSearchQuery(searchCount > 0 ? q : ''); + setSearchOpen(false); + // onCancel path: bar unmounts before its useEffect([query]) + // can fire with ''. Without this, searchCount stays stale + // (n guard at :4956 passes) and VML's matches[] too + // (nextMatch walks the old array). Phantom nav, no + // highlight. onExit (Enter, q non-empty) still commits. + if (!q) { + setSearchCount(0); + setSearchCurrent(0); + jumpRef.current?.setSearchQuery(''); + } + }} onCancel={() => { + // Esc/ctrl+c/ctrl+g — undo. Bar's effect last fired + // with whatever was typed. searchQuery (REPL state) + // is unchanged since / (onClose = commit, didn't run). + // Two VML calls: '' restores anchor (0-match else- + // branch), then searchQuery re-scans from anchor's + // nearest. Both synchronous — one React batch. + // setHighlight explicit: REPL's sync-effect dep is + // searchQuery (unchanged), wouldn't re-fire. + setSearchOpen(false); jumpRef.current?.setSearchQuery(''); - } - }} onCancel={() => { - // Esc/ctrl+c/ctrl+g — undo. Bar's effect last fired - // with whatever was typed. searchQuery (REPL state) - // is unchanged since / (onClose = commit, didn't run). - // Two VML calls: '' restores anchor (0-match else- - // branch), then searchQuery re-scans from anchor's - // nearest. Both synchronous — one React batch. - // setHighlight explicit: REPL's sync-effect dep is - // searchQuery (unchanged), wouldn't re-fire. - setSearchOpen(false); - jumpRef.current?.setSearchQuery(''); - jumpRef.current?.setSearchQuery(searchQuery); - setHighlight(searchQuery); - }} setHighlight={setHighlight} /> : 0 ? { - current: searchCurrent, - count: searchCount - } : undefined} />} /> : <> - {transcriptMessagesElement} - {transcriptToolJSX} - - - } - ; + jumpRef.current?.setSearchQuery(searchQuery); + setHighlight(searchQuery); + }} setHighlight={setHighlight} /> : 0 ? { + current: searchCurrent, + count: searchCount + } : undefined} />} /> : <> + {transcriptMessagesElement} + {transcriptToolJSX} + + + } + ; // The virtual-scroll branch (FullscreenLayout above) needs // 's constraint — without it, // ScrollBox's flexGrow has no ceiling, viewport = content height, @@ -4478,8 +4478,8 @@ export function REPL({ // unwrapped — it wants native terminal scrollback. if (transcriptScrollRef) { return - {transcriptReturn} - ; + {transcriptReturn} + ; } return transcriptReturn; } @@ -4541,11 +4541,11 @@ export function REPL({ // early return above wraps its virtual-scroll branch the same way; only // the 30-cap dump branch stays unwrapped for native terminal scrollback. const mainReturn = - - - {feature('VOICE_MODE') ? : null} - - {/* ScrollKeybindingHandler must mount before CancelRequestHandler so + + + {feature('VOICE_MODE') ? : null} + + {/* ScrollKeybindingHandler must mount before CancelRequestHandler so ctrl+c-with-selection copies instead of cancelling the active task. Its raw useInput handler only stops propagation when a selection exists — without one, ctrl+c falls through to CancelRequestHandler. @@ -4553,40 +4553,40 @@ export function REPL({ the modal's inner ScrollBox is not keyboard-driven. onScroll stays suppressed while a modal is showing so scroll doesn't stamp divider/pill state. */} - - {feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? : null} - - - : undefined} modal={centeredModal} modalScrollRef={modalScrollRef} dividerYRef={dividerYRef} hidePill={!!viewedAgentTask} hideSticky={!!viewedTeammateTask} newMessageCount={unseenDivider?.count ?? 0} onPillClick={() => { + + {feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? : null} + + + : undefined} modal={centeredModal} modalScrollRef={modalScrollRef} dividerYRef={dividerYRef} hidePill={!!viewedAgentTask} hideSticky={!!viewedTeammateTask} newMessageCount={unseenDivider?.count ?? 0} onPillClick={() => { setCursor(null); jumpToNew(scrollRef.current); }} scrollable={<> - - - - {/* Hide the processing placeholder while a modal is showing — + + + + {/* Hide the processing placeholder while a modal is showing — it would sit at the last visible transcript row right above the ▔ divider, showing "❯ /config" as redundant clutter (the modal IS the /config UI). Outside modals it stays so the user sees their input echoed while Claude processes. */} - {!disabled && placeholderText && !centeredModal && } - {toolJSX && !(toolJSX.isLocalJSXCommand && toolJSX.isImmediate) && !toolJsxCentered && - {toolJSX.jsx} - } - {"external" === 'ant' && } - {feature('WEB_BROWSER_TOOL') ? WebBrowserPanelModule && : null} - - {showSpinner && 0} leaderIsIdle={!isLoading} />} - {!showSpinner && !isLoading && !userInputOnProcessing && !hasRunningTeammates && isBriefOnly && !viewedAgentTask && } - {isFullscreenEnvEnabled() && } - } bottom={ - {feature('BUDDY') && companionNarrow && isFullscreenEnvEnabled() && companionVisible ? : null} - - {permissionStickyFooter} - {/* Immediate local-jsx commands (/btw, /sandbox, /assistant, + {toolJSX && !(toolJSX.isLocalJSXCommand && toolJSX.isImmediate) && !toolJsxCentered && + {toolJSX.jsx} + } + {"external" === 'ant' && } + {feature('WEB_BROWSER_TOOL') ? WebBrowserPanelModule && : null} + + {showSpinner && 0} leaderIsIdle={!isLoading} />} + {!showSpinner && !isLoading && !userInputOnProcessing && !hasRunningTeammates && isBriefOnly && !viewedAgentTask && } + {isFullscreenEnvEnabled() && } + } bottom={ + {feature('BUDDY') && companionNarrow && isFullscreenEnvEnabled() && companionVisible ? : null} + + {permissionStickyFooter} + {/* Immediate local-jsx commands (/btw, /sandbox, /assistant, /issue) render here, NOT inside scrollable. They stay mounted while the main conversation streams behind them, so ScrollBox relayouts on each new message would drag them around. bottom @@ -4595,13 +4595,13 @@ export function REPL({ stays in scrollable: the main loop is paused so no jiggle, and their tall content (DiffDetailView renders up to 400 lines with no internal scroll) needs the outer ScrollBox. */} - {toolJSX?.isLocalJSXCommand && toolJSX.isImmediate && !toolJsxCentered && - {toolJSX.jsx} - } - {!showSpinner && !toolJSX?.isLocalJSXCommand && showExpandedTodos && tasksV2 && tasksV2.length > 0 && - - } - {focusedInputDialog === 'sandbox-permission' && + {toolJSX.jsx} + } + {!showSpinner && !toolJSX?.isLocalJSXCommand && showExpandedTodos && tasksV2 && tasksV2.length > 0 && + + } + {focusedInputDialog === 'sandbox-permission' && { @@ -4650,7 +4650,7 @@ export function REPL({ sandboxBridgeCleanupRef.current.delete(approvedHost); } }} />} - {focusedInputDialog === 'prompt' && { + {focusedInputDialog === 'prompt' && { const item = promptQueue[0]; if (!item) return; item.resolve({ @@ -4664,12 +4664,12 @@ export function REPL({ item.reject(new Error('Prompt cancelled by user')); setPromptQueue(([, ...tail]) => tail); }} />} - {/* Show pending indicator on worker while waiting for leader approval */} - {pendingWorkerRequest && } - {/* Show pending indicator for sandbox permission on worker side */} - {pendingSandboxRequest && } - {/* Worker sandbox permission requests from swarm workers */} - {focusedInputDialog === 'worker-sandbox-permission' && } + {/* Show pending indicator for sandbox permission on worker side */} + {pendingSandboxRequest && } + {/* Worker sandbox permission requests from swarm workers */} + {focusedInputDialog === 'worker-sandbox-permission' && } - {focusedInputDialog === 'elicitation' && { + {focusedInputDialog === 'elicitation' && { const currentRequest = elicitation.queue[0]; if (!currentRequest) return; // Call respond callback to resolve Promise @@ -4742,7 +4742,7 @@ export function REPL({ })); currentRequest?.onWaitingDismiss?.(action); }} />} - {focusedInputDialog === 'cost' && { + {focusedInputDialog === 'cost' && { setShowCostDialog(false); setHaveShownCostDialog(true); saveGlobalConfig(current => ({ @@ -4751,7 +4751,7 @@ export function REPL({ })); logEvent('tengu_cost_threshold_acknowledged', {}); }} />} - {focusedInputDialog === 'idle-return' && idleReturnPending && { + {focusedInputDialog === 'idle-return' && idleReturnPending && { const pending = idleReturnPending; setIdleReturnPending(null); logEvent('tengu_idle_return_action', { @@ -4793,13 +4793,13 @@ export function REPL({ } skipIdleCheckRef.current = true; void onSubmitRef.current(pending.input, { - setCursorOffset: () => {}, - clearBuffer: () => {}, - resetHistory: () => {} + setCursorOffset: () => { }, + clearBuffer: () => { }, + resetHistory: () => { } }); }} />} - {focusedInputDialog === 'ide-onboarding' && setShowIdeOnboarding(false)} installationStatus={ideInstallationStatus} />} - {"external" === 'ant' && focusedInputDialog === 'model-switch' && AntModelSwitchCallout && { + {focusedInputDialog === 'ide-onboarding' && setShowIdeOnboarding(false)} installationStatus={ideInstallationStatus} />} + {"external" === 'ant' && focusedInputDialog === 'model-switch' && AntModelSwitchCallout && { setShowModelSwitchCallout(false); if (selection === 'switch' && modelAlias) { setAppState(prev => ({ @@ -4809,8 +4809,8 @@ export function REPL({ })); } }} />} - {"external" === 'ant' && focusedInputDialog === 'undercover-callout' && UndercoverAutoCallout && setShowUndercoverCallout(false)} />} - {focusedInputDialog === 'effort-callout' && { + {"external" === 'ant' && focusedInputDialog === 'undercover-callout' && UndercoverAutoCallout && setShowUndercoverCallout(false)} />} + {focusedInputDialog === 'effort-callout' && { setShowEffortCallout(false); if (selection !== 'dismiss') { setAppState(prev => ({ @@ -4819,7 +4819,7 @@ export function REPL({ })); } }} />} - {focusedInputDialog === 'remote-callout' && { + {focusedInputDialog === 'remote-callout' && { setAppState(prev => { if (!prev.showRemoteCallout) return prev; return { @@ -4834,17 +4834,17 @@ export function REPL({ }); }} />} - {exitFlow} + {exitFlow} - {focusedInputDialog === 'plugin-hint' && hintRecommendation && } + {focusedInputDialog === 'plugin-hint' && hintRecommendation && } - {focusedInputDialog === 'lsp-recommendation' && lspRecommendation && } + {focusedInputDialog === 'lsp-recommendation' && lspRecommendation && } - {focusedInputDialog === 'desktop-upsell' && setShowDesktopUpsellStartup(false)} />} + {focusedInputDialog === 'desktop-upsell' && setShowDesktopUpsellStartup(false)} />} - {feature('ULTRAPLAN') ? focusedInputDialog === 'ultraplan-choice' && ultraplanPendingChoice && store.getState()} setConversationId={setConversationId} /> : null} + {feature('ULTRAPLAN') ? focusedInputDialog === 'ultraplan-choice' && ultraplanPendingChoice && store.getState()} setConversationId={setConversationId} /> : null} - {feature('ULTRAPLAN') ? focusedInputDialog === 'ultraplan-launch' && ultraplanLaunchPending && { + {feature('ULTRAPLAN') ? focusedInputDialog === 'ultraplan-launch' && ultraplanLaunchPending && { const blurb = ultraplanLaunchPending.blurb; setAppState(prev => prev.ultraplanLaunchPending ? { ...prev, @@ -4884,26 +4884,26 @@ export function REPL({ }).then(appendStdout).catch(logError); }} /> : null} - {mrRender()} + {mrRender()} - {!toolJSX?.shouldHidePromptInput && !focusedInputDialog && !isExiting && !disabled && !cursor && <> - {autoRunIssueReason && } - {postCompactSurvey.state !== 'closed' ? : memorySurvey.state !== 'closed' ? : } - {/* Frustration-triggered transcript sharing prompt */} - {frustrationDetection.state !== 'closed' && {}} handleTranscriptSelect={frustrationDetection.handleTranscriptSelect} inputValue={inputValue} setInputValue={setInputValue} />} - {/* Skill improvement survey - appears when improvements detected (ant-only) */} - {"external" === 'ant' && skillImprovementSurvey.suggestion && } - {showIssueFlagBanner && } - {} - - - } - {cursor && - // inputValue is REPL state; typed text survives the round-trip. - } - {focusedInputDialog === 'message-selector' && { + {!toolJSX?.shouldHidePromptInput && !focusedInputDialog && !isExiting && !disabled && !cursor && <> + {autoRunIssueReason && } + {postCompactSurvey.state !== 'closed' ? : memorySurvey.state !== 'closed' ? : } + {/* Frustration-triggered transcript sharing prompt */} + {frustrationDetection.state !== 'closed' && { }} handleTranscriptSelect={frustrationDetection.handleTranscriptSelect} inputValue={inputValue} setInputValue={setInputValue} />} + {/* Skill improvement survey - appears when improvements detected (ant-only) */} + {"external" === 'ant' && skillImprovementSurvey.suggestion && } + {showIssueFlagBanner && } + { } + + + } + {cursor && + // inputValue is REPL state; typed text survives the round-trip. + } + {focusedInputDialog === 'message-selector' && { await fileHistoryRewind((updater: (prev: FileHistoryState) => FileHistoryState) => { setAppState(prev => ({ ...prev, @@ -4985,16 +4985,16 @@ export function REPL({ setIsMessageSelectorVisible(false); setMessageSelectorPreselect(undefined); }} />} - {"external" === 'ant' && } - - {feature('BUDDY') && !(companionNarrow && isFullscreenEnvEnabled()) && companionVisible ? : null} - } /> - - ; + {"external" === 'ant' && } + + {feature('BUDDY') && !(companionNarrow && isFullscreenEnvEnabled()) && companionVisible ? : null} + } /> + + ; if (isFullscreenEnvEnabled()) { return - {mainReturn} - ; + {mainReturn} + ; } return mainReturn; } From 63546dcd9c3f80b1042897360b069033d30d7aff Mon Sep 17 00:00:00 2001 From: Raj Rasane Date: Thu, 2 Apr 2026 10:38:22 +0530 Subject: [PATCH 12/35] chore: rename default terminal title to Open Claude --- src/screens/REPL.tsx | 2 +- src/services/mcp/client.ts | 118 ++++++++++++++++++------------------- src/services/notifier.ts | 2 +- 3 files changed, 61 insertions(+), 61 deletions(-) diff --git a/src/screens/REPL.tsx b/src/screens/REPL.tsx index ef1513aa..65df5ca4 100644 --- a/src/screens/REPL.tsx +++ b/src/screens/REPL.tsx @@ -1127,7 +1127,7 @@ export function REPL({ // session from mid-conversation context. const haikuTitleAttemptedRef = useRef((initialMessages?.length ?? 0) > 0); const agentTitle = mainThreadAgentDefinition?.agentType; - const terminalTitle = sessionTitle ?? agentTitle ?? haikuTitle ?? 'Claude Code'; + const terminalTitle = sessionTitle ?? agentTitle ?? haikuTitle ?? 'Open Claude'; const isWaitingForApproval = toolUseConfirmQueue.length > 0 || promptQueue.length > 0 || pendingWorkerRequest || pendingSandboxRequest; // Local-jsx commands (like /plugin, /config) show user-facing dialogs that // wait for input. Require jsx != null — if the flag is stuck true but jsx diff --git a/src/services/mcp/client.ts b/src/services/mcp/client.ts index 0b3afc6a..b053dbb6 100644 --- a/src/services/mcp/client.ts +++ b/src/services/mcp/client.ts @@ -116,8 +116,8 @@ import { getLoggingSafeMcpBaseUrl } from './utils.js' /* eslint-disable @typescript-eslint/no-require-imports */ const fetchMcpSkillsForClient = feature('MCP_SKILLS') ? ( - require('../../skills/mcpSkills.js') as typeof import('../../skills/mcpSkills.js') - ).fetchMcpSkillsForClient + require('../../skills/mcpSkills.js') as typeof import('../../skills/mcpSkills.js') + ).fetchMcpSkillsForClient : null import { UnauthorizedError } from '@modelcontextprotocol/sdk/client/auth.js' @@ -240,12 +240,12 @@ const claudeInChromeToolRendering = // GrowthBook tengu_malort_pedway (see gates.ts). const computerUseWrapper = feature('CHICAGO_MCP') ? (): typeof import('../../utils/computerUse/wrapper.js') => - require('../../utils/computerUse/wrapper.js') + require('../../utils/computerUse/wrapper.js') : undefined const isComputerUseMCPServer = feature('CHICAGO_MCP') ? ( - require('../../utils/computerUse/common.js') as typeof import('../../utils/computerUse/common.js') - ).isComputerUseMCPServer + require('../../utils/computerUse/common.js') as typeof import('../../utils/computerUse/common.js') + ).isComputerUseMCPServer : undefined import { mkdir, readFile, unlink, writeFile } from 'fs/promises' @@ -326,9 +326,9 @@ function mcpBaseUrlAnalytics(serverRef: ScopedMcpServerConfig): { const url = getLoggingSafeMcpBaseUrl(serverRef) return url ? { - mcpServerBaseUrl: - url as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, - } + mcpServerBaseUrl: + url as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, + } : {} } @@ -683,20 +683,20 @@ export const connectToServer = memoize( const transportOptions: SSEClientTransportOptions = proxyOptions.dispatcher ? { - eventSourceInit: { - fetch: async (url: string | URL, init?: RequestInit) => { - // eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins - return fetch(url, { - ...init, - ...proxyOptions, - headers: { - 'User-Agent': getMCPUserAgent(), - ...init?.headers, - }, - }) - }, + eventSourceInit: { + fetch: async (url: string | URL, init?: RequestInit) => { + // eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins + return fetch(url, { + ...init, + ...proxyOptions, + headers: { + 'User-Agent': getMCPUserAgent(), + ...init?.headers, + }, + }) }, - } + }, + } : {} transport = new SSEClientTransport( @@ -832,8 +832,8 @@ export const connectToServer = memoize( 'User-Agent': getMCPUserAgent(), ...(sessionIngressToken && !hasOAuthTokens && { - Authorization: `Bearer ${sessionIngressToken}`, - }), + Authorization: `Bearer ${sessionIngressToken}`, + }), ...combinedHeaders, }, }, @@ -842,10 +842,10 @@ export const connectToServer = memoize( // Redact sensitive headers before logging const headersForLogging = transportOptions.requestInit?.headers ? mapValues( - transportOptions.requestInit.headers as Record, - (value, key) => - key.toLowerCase() === 'authorization' ? '[REDACTED]' : value, - ) + transportOptions.requestInit.headers as Record, + (value, key) => + key.toLowerCase() === 'authorization' ? '[REDACTED]' : value, + ) : undefined logMCPDebug( @@ -985,7 +985,7 @@ export const connectToServer = memoize( const client = new Client( { name: 'claude-code', - title: 'Claude Code', + title: 'Open Claude', version: MACRO.VERSION ?? 'unknown', description: "Anthropic's agentic coding tool", websiteUrl: PRODUCT_URL, @@ -1054,9 +1054,9 @@ export const connectToServer = memoize( `Connection timeout triggered after ${elapsed}ms (limit: ${getConnectionTimeoutMs()}ms)`, ) if (inProcessServer) { - inProcessServer.close().catch(() => {}) + inProcessServer.close().catch(() => { }) } - transport.close().catch(() => {}) + transport.close().catch(() => { }) reject( new TelemetrySafeError_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS( `MCP server "${name}" connection timed out after ${getConnectionTimeoutMs()}ms`, @@ -1145,9 +1145,9 @@ export const connectToServer = memoize( }) } if (inProcessServer) { - inProcessServer.close().catch(() => {}) + inProcessServer.close().catch(() => { }) } - transport.close().catch(() => {}) + transport.close().catch(() => { }) if (stderrOutput) { logMCPError(name, `Server stderr: ${stderrOutput}`) } @@ -1627,7 +1627,7 @@ export const connectToServer = memoize( logMCPError(name, `Connection failed: ${errorMessage(error)}`) if (inProcessServer) { - inProcessServer.close().catch(() => {}) + inProcessServer.close().catch(() => { }) } return { name, @@ -1779,8 +1779,8 @@ export const fetchToolsForClient = memoizeWithLRU( searchHint: typeof tool._meta?.['anthropic/searchHint'] === 'string' ? tool._meta['anthropic/searchHint'] - .replace(/\s+/g, ' ') - .trim() || undefined + .replace(/\s+/g, ' ') + .trim() || undefined : undefined, alwaysLoad: tool._meta?.['anthropic/alwaysLoad'] === true, async description() { @@ -1871,11 +1871,11 @@ export const fetchToolsForClient = memoizeWithLRU( onProgress: onProgress && toolUseId ? progressData => { - onProgress({ - toolUseID: toolUseId, - data: progressData, - }) - } + onProgress({ + toolUseID: toolUseId, + data: progressData, + }) + } : undefined, handleElicitation: context.handleElicitation, }) @@ -1975,14 +1975,14 @@ export const fetchToolsForClient = memoizeWithLRU( return `${client.name} - ${displayName} (MCP)` }, ...(isClaudeInChromeMCPServer(client.name) && - (client.config.type === 'stdio' || !client.config.type) + (client.config.type === 'stdio' || !client.config.type) ? claudeInChromeToolRendering().getClaudeInChromeMCPToolOverrides( - tool.name, - ) + tool.name, + ) : {}), ...(feature('CHICAGO_MCP') && - (client.config.type === 'stdio' || !client.config.type) && - isComputerUseMCPServer!(client.name) + (client.config.type === 'stdio' || !client.config.type) && + isComputerUseMCPServer!(client.name) ? computerUseWrapper!().getComputerUseMCPToolOverrides(tool.name) : {}), } @@ -2876,9 +2876,9 @@ export async function callMCPToolWithUrlElicitationRetry({ const errorData = error.data const rawElicitations = errorData != null && - typeof errorData === 'object' && - 'elicitations' in errorData && - Array.isArray(errorData.elicitations) + typeof errorData === 'object' && + 'elicitations' in errorData && + Array.isArray(errorData.elicitations) ? (errorData.elicitations as unknown[]) : [] @@ -3101,16 +3101,16 @@ async function callMCPTool({ timeout: timeoutMs, onprogress: onProgress ? sdkProgress => { - onProgress({ - type: 'mcp_progress', - status: 'progress', - serverName: name, - toolName: tool, - progress: sdkProgress.progress, - total: sdkProgress.total, - progressMessage: sdkProgress.message, - }) - } + onProgress({ + type: 'mcp_progress', + status: 'progress', + serverName: name, + toolName: tool, + progress: sdkProgress.progress, + total: sdkProgress.total, + progressMessage: sdkProgress.message, + }) + } : undefined, }, ), @@ -3280,7 +3280,7 @@ export async function setupSdkMcpClients( const client = new Client( { name: 'claude-code', - title: 'Claude Code', + title: 'Open Claude', version: MACRO.VERSION ?? 'unknown', description: "Anthropic's agentic coding tool", websiteUrl: PRODUCT_URL, diff --git a/src/services/notifier.ts b/src/services/notifier.ts index 330e16a0..b2136a4e 100644 --- a/src/services/notifier.ts +++ b/src/services/notifier.ts @@ -35,7 +35,7 @@ export async function sendNotification( }) } -const DEFAULT_TITLE = 'Claude Code' +const DEFAULT_TITLE = 'Open Claude' async function sendToChannel( channel: string, From f340b199c83f960ab292e6785bb78828fe45f4cc Mon Sep 17 00:00:00 2001 From: Raj Rasane Date: Thu, 2 Apr 2026 10:40:27 +0530 Subject: [PATCH 13/35] refactor: simplify session title fallback to static 'Open Claude' --- src/utils/sessionTitle.ts | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/src/utils/sessionTitle.ts b/src/utils/sessionTitle.ts index 72ae8054..141833b4 100644 --- a/src/utils/sessionTitle.ts +++ b/src/utils/sessionTitle.ts @@ -125,28 +125,9 @@ export async function generateSessionTitle( }) logEvent('tengu_session_title_generated', { success: false }) - // Fallback: derive a title locally from the user's first message. - // This ensures 3P providers (Ollama, Gemini, OpenAI) still get - // meaningful terminal titles when the Haiku API call is unavailable. - return localFallbackTitle(trimmed) + // Fallback: When using 3P providers without a compatible schema, + // default to the application name. + return 'Open Claude' } } -/** - * Fallback local title generator for when the Haiku API is unavailable - * (e.g. when using third-party providers without an Anthropic API key). - */ -function localFallbackTitle(text: string): string | null { - const words = text.split(/\s+/).slice(0, 7) - if (words.length === 0) return null - - // Create a sentence-case string - let fallback = words.join(' ') - if (fallback.length > 50) { - fallback = fallback.substring(0, 49) + '…' - } - - if (fallback.length <= 3) return null - - return fallback.charAt(0).toUpperCase() + fallback.slice(1) -} From 25c5987276baf5049339a3193d04be76cd65b015 Mon Sep 17 00:00:00 2001 From: Rithul Kamesh Date: Thu, 2 Apr 2026 11:25:28 +0530 Subject: [PATCH 14/35] feat: add support for GitHub Models provider - Introduced environment variable CLAUDE_CODE_USE_GITHUB to enable GitHub Models. - Added checks for GITHUB_TOKEN or GH_TOKEN for authentication. - Updated base URL handling to include GitHub Models default. - Enhanced provider detection and error handling for GitHub Models. - Updated relevant functions and components to accommodate the new provider. --- docs/github-models-pr-draft.md | 24 ++ scripts/system-check.ts | 79 +++++- src/commands.ts | 2 + src/commands/onboard-github/index.ts | 11 + .../onboard-github/onboard-github.tsx | 228 ++++++++++++++++++ src/components/StartupScreen.ts | 8 + src/entrypoints/cli.tsx | 37 ++- src/main.tsx | 6 +- src/services/api/client.ts | 5 +- src/services/api/openaiShim.ts | 117 +++++++-- .../api/providerConfig.github.test.ts | 41 ++++ src/services/api/providerConfig.ts | 26 +- src/services/github/deviceFlow.test.ts | 94 ++++++++ src/services/github/deviceFlow.ts | 174 +++++++++++++ src/utils/auth.ts | 8 +- src/utils/context.ts | 8 +- .../githubModelsCredentials.hydrate.test.ts | 66 +++++ src/utils/githubModelsCredentials.test.ts | 47 ++++ src/utils/githubModelsCredentials.ts | 73 ++++++ src/utils/managedEnvConstants.ts | 2 + src/utils/model/providers.test.ts | 12 + src/utils/model/providers.ts | 29 ++- src/utils/providerProfile.ts | 2 + src/utils/swarm/spawnUtils.ts | 12 + 24 files changed, 1069 insertions(+), 42 deletions(-) create mode 100644 docs/github-models-pr-draft.md create mode 100644 src/commands/onboard-github/index.ts create mode 100644 src/commands/onboard-github/onboard-github.tsx create mode 100644 src/services/api/providerConfig.github.test.ts create mode 100644 src/services/github/deviceFlow.test.ts create mode 100644 src/services/github/deviceFlow.ts create mode 100644 src/utils/githubModelsCredentials.hydrate.test.ts create mode 100644 src/utils/githubModelsCredentials.test.ts create mode 100644 src/utils/githubModelsCredentials.ts diff --git a/docs/github-models-pr-draft.md b/docs/github-models-pr-draft.md new file mode 100644 index 00000000..21fa7fa8 --- /dev/null +++ b/docs/github-models-pr-draft.md @@ -0,0 +1,24 @@ +# GitHub Models + onboard — PR draft (paste into GitHub) + +**Title:** `feat: GitHub Models provider + interactive onboard (keychain-backed)` + +**Body:** + +## Summary + +- Adds GitHub Models (`models.github.ai`) as an OpenAI-compatible backend via `CLAUDE_CODE_USE_GITHUB` (see existing shim changes). +- Adds `/onboard-github`: interactive Ink flow for GitHub Device Login or PAT, stores token in OS-backed secure storage (macOS Keychain when available, else `~/.claude/.credentials.json`), and writes user settings `env` so no `export GITHUB_TOKEN` is required. +- Applies user settings before provider env validation and hydrates `GITHUB_TOKEN` from secure storage when the GitHub provider flag is on. + +## How to test + +1. Run `openclaude` and execute `/onboard-github` (or launch via command registration). +2. Complete device flow or paste a PAT with Models access. +3. Restart CLI; confirm `CLAUDE_CODE_USE_GITHUB=1` in `~/.claude/settings.json` (or merged file) and that inference works without exporting `GITHUB_TOKEN`. +4. `bun test` (new suites) + `bun run build`. + +## Notes / follow-ups + +- Device flow OAuth app client ID is configurable via `GITHUB_DEVICE_FLOW_CLIENT_ID`; verify scope list against current GitHub Models documentation. +- `/logout` currently deletes all secure storage; GitHub token is cleared too — document or narrow in a follow-up. +- Linux: secure storage is plaintext with chmod 600 today; libsecret is still TODO in `secureStorage`. diff --git a/scripts/system-check.ts b/scripts/system-check.ts index e129685a..6626149a 100644 --- a/scripts/system-check.ts +++ b/scripts/system-check.ts @@ -93,11 +93,15 @@ function isLocalBaseUrl(baseUrl: string): boolean { } const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai' +const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference' function currentBaseUrl(): string { if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) { return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL } + if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) { + return process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE + } return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1' } @@ -126,15 +130,47 @@ function checkGeminiEnv(): CheckResult[] { return results } +function checkGithubEnv(): CheckResult[] { + const results: CheckResult[] = [] + const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE + results.push(pass('Provider mode', 'GitHub Models provider enabled.')) + + const token = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN + if (!token?.trim()) { + results.push(fail('GITHUB_TOKEN', 'Missing. Set GITHUB_TOKEN or GH_TOKEN.')) + } else { + results.push(pass('GITHUB_TOKEN', 'Configured.')) + } + + if (!process.env.OPENAI_MODEL) { + results.push( + pass( + 'OPENAI_MODEL', + 'Not set. Default github:copilot → openai/gpt-4.1 at runtime.', + ), + ) + } else { + results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL)) + } + + results.push(pass('OPENAI_BASE_URL', baseUrl)) + return results +} + function checkOpenAIEnv(): CheckResult[] { const results: CheckResult[] = [] const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) + const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) if (useGemini) { return checkGeminiEnv() } + if (useGithub && !useOpenAI) { + return checkGithubEnv() + } + if (!useOpenAI) { results.push(pass('Provider mode', 'Anthropic login flow enabled (CLAUDE_CODE_USE_OPENAI is off).')) return results @@ -181,10 +217,19 @@ function checkOpenAIEnv(): CheckResult[] { } const key = process.env.OPENAI_API_KEY + const githubToken = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN if (key === 'SUA_CHAVE') { results.push(fail('OPENAI_API_KEY', 'Placeholder value detected: SUA_CHAVE.')) - } else if (!key && !isLocalBaseUrl(request.baseUrl)) { + } else if ( + !key && + !isLocalBaseUrl(request.baseUrl) && + !(useGithub && githubToken?.trim()) + ) { results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.')) + } else if (!key && useGithub && githubToken?.trim()) { + results.push( + pass('OPENAI_API_KEY', 'Not set; GITHUB_TOKEN/GH_TOKEN will be used for GitHub Models.'), + ) } else if (!key) { results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).')) } else { @@ -197,11 +242,19 @@ function checkOpenAIEnv(): CheckResult[] { async function checkBaseUrlReachability(): Promise { const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) + const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) - if (!useGemini && !useOpenAI) { + if (!useGemini && !useOpenAI && !useGithub) { return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).') } + if (useGithub) { + return pass( + 'Provider reachability', + 'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).', + ) + } + const geminiBaseUrl = 'https://generativelanguage.googleapis.com/v1beta/openai' const resolvedBaseUrl = useGemini ? (process.env.GEMINI_BASE_URL ?? geminiBaseUrl) @@ -272,7 +325,11 @@ async function checkBaseUrlReachability(): Promise { } function checkOllamaProcessorMode(): CheckResult { - if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) { + if ( + !isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || + isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ) { return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).') } @@ -319,6 +376,22 @@ function serializeSafeEnvSummary(): Record { GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY), } } + if ( + isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) && + !isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) + ) { + return { + CLAUDE_CODE_USE_GITHUB: true, + OPENAI_MODEL: + process.env.OPENAI_MODEL ?? + '(unset, default: github:copilot → openai/gpt-4.1)', + OPENAI_BASE_URL: + process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE, + GITHUB_TOKEN_SET: Boolean( + process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN, + ), + } + } const request = resolveProviderRequest({ model: process.env.OPENAI_MODEL, baseUrl: process.env.OPENAI_BASE_URL, diff --git a/src/commands.ts b/src/commands.ts index 10f03b22..fe15aa2b 100644 --- a/src/commands.ts +++ b/src/commands.ts @@ -19,6 +19,7 @@ import cost from './commands/cost/index.js' import diff from './commands/diff/index.js' import ctx_viz from './commands/ctx_viz/index.js' import doctor from './commands/doctor/index.js' +import onboardGithub from './commands/onboard-github/index.js' import memory from './commands/memory/index.js' import help from './commands/help/index.js' import ide from './commands/ide/index.js' @@ -288,6 +289,7 @@ const COMMANDS = memoize((): Command[] => [ memory, mobile, model, + onboardGithub, outputStyle, remoteEnv, plugin, diff --git a/src/commands/onboard-github/index.ts b/src/commands/onboard-github/index.ts new file mode 100644 index 00000000..91d67247 --- /dev/null +++ b/src/commands/onboard-github/index.ts @@ -0,0 +1,11 @@ +import type { Command } from '../../commands.js' + +const onboardGithub: Command = { + name: 'onboard-github', + description: + 'Interactive setup for GitHub Models: device login or PAT, saved to secure storage', + type: 'local-jsx', + load: () => import('./onboard-github.js'), +} + +export default onboardGithub diff --git a/src/commands/onboard-github/onboard-github.tsx b/src/commands/onboard-github/onboard-github.tsx new file mode 100644 index 00000000..26088392 --- /dev/null +++ b/src/commands/onboard-github/onboard-github.tsx @@ -0,0 +1,228 @@ +import * as React from 'react' +import { useCallback, useState } from 'react' +import { Select } from '../../components/CustomSelect/select.js' +import { Spinner } from '../../components/Spinner.js' +import TextInput from '../../components/TextInput.js' +import { Box, Text } from '../../ink.js' +import { + openVerificationUri, + pollAccessToken, + requestDeviceCode, +} from '../../services/github/deviceFlow.js' +import type { LocalJSXCommandCall } from '../../types/command.js' +import { + hydrateGithubModelsTokenFromSecureStorage, + saveGithubModelsToken, +} from '../../utils/githubModelsCredentials.js' +import { updateSettingsForSource } from '../../utils/settings/settings.js' + +const DEFAULT_MODEL = 'github:copilot' + +type Step = + | 'menu' + | 'device-busy' + | 'pat' + | 'error' + +function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } { + const { error } = updateSettingsForSource('userSettings', { + env: { + CLAUDE_CODE_USE_GITHUB: '1', + OPENAI_MODEL: model, + }, + }) + if (error) { + return { ok: false, detail: error.message } + } + return { ok: true } +} + +function OnboardGithub(props: { + onDone: Parameters[0] + onChangeAPIKey: () => void +}): React.ReactNode { + const { onDone, onChangeAPIKey } = props + const [step, setStep] = useState('menu') + const [errorMsg, setErrorMsg] = useState(null) + const [deviceHint, setDeviceHint] = useState<{ + user_code: string + verification_uri: string + } | null>(null) + const [patDraft, setPatDraft] = useState('') + + const finalize = useCallback( + async (token: string, model: string = DEFAULT_MODEL) => { + const saved = saveGithubModelsToken(token) + if (!saved.success) { + setErrorMsg(saved.warning ?? 'Could not save token to secure storage.') + setStep('error') + return + } + const merged = mergeUserSettingsEnv(model.trim() || DEFAULT_MODEL) + if (!merged.ok) { + setErrorMsg( + `Token saved, but settings were not updated: ${merged.detail ?? 'unknown error'}. ` + + `Add env CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL to ~/.claude/settings.json manually.`, + ) + setStep('error') + return + } + process.env.CLAUDE_CODE_USE_GITHUB = '1' + process.env.OPENAI_MODEL = model.trim() || DEFAULT_MODEL + hydrateGithubModelsTokenFromSecureStorage() + onChangeAPIKey() + onDone( + 'GitHub Models onboard complete. Token stored in secure storage; user settings updated. Restart if the model does not switch.', + { display: 'user' }, + ) + }, + [onChangeAPIKey, onDone], + ) + + const runDeviceFlow = useCallback(async () => { + setStep('device-busy') + setErrorMsg(null) + setDeviceHint(null) + try { + const device = await requestDeviceCode() + setDeviceHint({ + user_code: device.user_code, + verification_uri: device.verification_uri, + }) + await openVerificationUri(device.verification_uri) + const token = await pollAccessToken(device.device_code, { + initialInterval: device.interval, + timeoutSeconds: device.expires_in, + }) + await finalize(token, DEFAULT_MODEL) + } catch (e) { + setErrorMsg(e instanceof Error ? e.message : String(e)) + setStep('error') + } + }, [finalize]) + + if (step === 'error' && errorMsg) { + const options = [ + { + label: 'Back to menu', + value: 'back' as const, + }, + { + label: 'Exit', + value: 'exit' as const, + }, + ] + return ( + + {errorMsg} + { + if (v === 'cancel') { + onDone('GitHub onboard cancelled', { display: 'system' }) + return + } + if (v === 'pat') { + setStep('pat') + return + } + void runDeviceFlow() + }} + /> + + ) +} + +export const call: LocalJSXCommandCall = async (onDone, context) => { + return ( + + ) +} diff --git a/src/components/StartupScreen.ts b/src/components/StartupScreen.ts index ded4f457..b20d26c1 100644 --- a/src/components/StartupScreen.ts +++ b/src/components/StartupScreen.ts @@ -80,6 +80,7 @@ const LOGO_CLAUDE = [ function detectProvider(): { name: string; model: string; baseUrl: string; isLocal: boolean } { const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true' + const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true' const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true' if (useGemini) { @@ -88,6 +89,13 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc return { name: 'Google Gemini', model, baseUrl, isLocal: false } } + if (useGithub) { + const model = process.env.OPENAI_MODEL || 'github:copilot' + const baseUrl = + process.env.OPENAI_BASE_URL || 'https://models.github.ai/inference' + return { name: 'GitHub Models', model, baseUrl, isLocal: false } + } + if (useOpenAI) { const model = process.env.OPENAI_MODEL || 'gpt-4o' const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' diff --git a/src/entrypoints/cli.tsx b/src/entrypoints/cli.tsx index 71adb260..629dd7db 100644 --- a/src/entrypoints/cli.tsx +++ b/src/entrypoints/cli.tsx @@ -46,7 +46,22 @@ function isLocalProviderUrl(baseUrl: string | undefined): boolean { } function validateProviderEnvOrExit(): void { - if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) { + const useOpenAI = isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) + const useGithub = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + + if (useGithub && !useOpenAI) { + const token = + (process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()) ?? '' + if (!token) { + console.error( + 'GITHUB_TOKEN or GH_TOKEN is required when CLAUDE_CODE_USE_GITHUB=1.', + ) + process.exit(1) + } + return + } + + if (!useOpenAI) { return } @@ -77,8 +92,15 @@ function validateProviderEnvOrExit(): void { } if (!process.env.OPENAI_API_KEY && !isLocalProviderUrl(request.baseUrl)) { - console.error('OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local.') - process.exit(1) + const hasGithubToken = !!( + process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim() + ) + if (!(useGithub && hasGithubToken)) { + console.error( + 'OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local. When CLAUDE_CODE_USE_GITHUB=1, GITHUB_TOKEN or GH_TOKEN may be used instead.', + ) + process.exit(1) + } } } @@ -98,6 +120,15 @@ async function main(): Promise { return; } + { + const { enableConfigs } = await import('../utils/config.js') + enableConfigs() + const { applySafeConfigEnvironmentVariables } = await import('../utils/managedEnv.js') + applySafeConfigEnvironmentVariables() + const { hydrateGithubModelsTokenFromSecureStorage } = await import('../utils/githubModelsCredentials.js') + hydrateGithubModelsTokenFromSecureStorage() + } + validateProviderEnvOrExit() // Print the gradient startup screen before the Ink UI loads diff --git a/src/main.tsx b/src/main.tsx index 07a3a3d2..a08f5899 100644 --- a/src/main.tsx +++ b/src/main.tsx @@ -2313,7 +2313,11 @@ async function run(): Promise { errors } = getSettingsWithErrors(); const nonMcpErrors = errors.filter(e => !e.mcpErrorMetadata); - if (nonMcpErrors.length > 0 && !isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) { + if ( + nonMcpErrors.length > 0 && + !isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) && + !isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ) { await launchInvalidSettingsDialog(root, { settingsErrors: nonMcpErrors, onExit: () => gracefulShutdownSync(1) diff --git a/src/services/api/client.ts b/src/services/api/client.ts index 493f4d73..ee50e35c 100644 --- a/src/services/api/client.ts +++ b/src/services/api/client.ts @@ -154,7 +154,10 @@ export async function getAnthropicClient({ fetch: resolvedFetch, }), } - if (isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) { + if ( + isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ) { const { createOpenAIShimClient } = await import('./openaiShim.js') return createOpenAIShimClient({ defaultHeaders, diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 9b77d07e..f13d2f15 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -14,8 +14,15 @@ * OPENAI_BASE_URL=http://... — base URL (default: https://api.openai.com/v1) * OPENAI_MODEL=gpt-4o — default model override * CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark + * + * GitHub Models (models.github.ai), OpenAI-compatible: + * CLAUDE_CODE_USE_GITHUB=1 — enable GitHub inference (no need for USE_OPENAI) + * GITHUB_TOKEN or GH_TOKEN — PAT with models access (mapped to Bearer auth) + * OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs */ +import { isEnvTruthy } from '../../utils/envUtils.js' +import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js' import { codexStreamToAnthropic, collectCodexCompletedResponse, @@ -30,6 +37,25 @@ import { resolveProviderRequest, } from './providerConfig.js' +const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference' +const GITHUB_API_VERSION = '2022-11-28' +const GITHUB_429_MAX_RETRIES = 3 +const GITHUB_429_BASE_DELAY_SEC = 1 +const GITHUB_429_MAX_DELAY_SEC = 32 + +function isGithubModelsMode(): boolean { + return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) +} + +function formatRetryAfterHint(response: Response): string { + const ra = response.headers.get('retry-after') + return ra ? ` (Retry-After: ${ra})` : '' +} + +function sleepMs(ms: number): Promise { + return new Promise(resolve => setTimeout(resolve, ms)) +} + // --------------------------------------------------------------------------- // Types — minimal subset of Anthropic SDK types we need to produce // --------------------------------------------------------------------------- @@ -254,9 +280,7 @@ function normalizeSchemaForOpenAI( function convertTools( tools: Array<{ name: string; description?: string; input_schema?: Record }>, ): OpenAITool[] { - const isGemini = - process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' + const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) return tools .filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI @@ -666,6 +690,12 @@ class OpenAIShimMessages { body.stream_options = { include_usage: true } } + const isGithub = isGithubModelsMode() + if (isGithub && body.max_completion_tokens !== undefined) { + body.max_tokens = body.max_completion_tokens + delete body.max_completion_tokens + } + if (params.temperature !== undefined) body.temperature = params.temperature if (params.top_p !== undefined) body.top_p = params.top_p @@ -715,6 +745,11 @@ class OpenAIShimMessages { } } + if (isGithub) { + headers.Accept = 'application/vnd.github.v3+json' + headers['X-GitHub-Api-Version'] = GITHUB_API_VERSION + } + // Build the chat completions URL // Azure Cognitive Services / Azure OpenAI require a deployment-specific path // and an api-version query parameter. @@ -737,19 +772,42 @@ class OpenAIShimMessages { chatCompletionsUrl = `${request.baseUrl}/chat/completions` } - const response = await fetch(chatCompletionsUrl, { - method: 'POST', + const fetchInit = { + method: 'POST' as const, headers, body: JSON.stringify(body), signal: options?.signal, - }) - - if (!response.ok) { - const errorBody = await response.text().catch(() => 'unknown error') - throw new Error(`OpenAI API error ${response.status}: ${errorBody}`) } - return response + const maxAttempts = isGithub ? GITHUB_429_MAX_RETRIES : 1 + let response: Response | undefined + for (let attempt = 0; attempt < maxAttempts; attempt++) { + response = await fetch(chatCompletionsUrl, fetchInit) + if (response.ok) { + return response + } + if ( + isGithub && + response.status === 429 && + attempt < maxAttempts - 1 + ) { + await response.text().catch(() => {}) + const delaySec = Math.min( + GITHUB_429_BASE_DELAY_SEC * 2 ** attempt, + GITHUB_429_MAX_DELAY_SEC, + ) + await sleepMs(delaySec * 1000) + continue + } + const errorBody = await response.text().catch(() => 'unknown error') + const rateHint = + isGithub && response.status === 429 ? formatRetryAfterHint(response) : '' + throw new Error( + `OpenAI API error ${response.status}: ${errorBody}${rateHint}`, + ) + } + + throw new Error('OpenAI shim: request loop exited unexpectedly') } private _convertNonStreamingResponse( @@ -759,7 +817,10 @@ class OpenAIShimMessages { choices?: Array<{ message?: { role?: string - content?: string | null + content?: + | string + | null + | Array<{ type?: string; text?: string }> tool_calls?: Array<{ id: string function: { name: string; arguments: string } @@ -778,8 +839,25 @@ class OpenAIShimMessages { const choice = data.choices?.[0] const content: Array> = [] - if (choice?.message?.content) { - content.push({ type: 'text', text: choice.message.content }) + const rawContent = choice?.message?.content + if (typeof rawContent === 'string' && rawContent) { + content.push({ type: 'text', text: rawContent }) + } else if (Array.isArray(rawContent) && rawContent.length > 0) { + const parts: string[] = [] + for (const part of rawContent) { + if ( + part && + typeof part === 'object' && + part.type === 'text' && + typeof part.text === 'string' + ) { + parts.push(part.text) + } + } + const joined = parts.join('\n') + if (joined) { + content.push({ type: 'text', text: joined }) + } } if (choice?.message?.tool_calls) { @@ -838,12 +916,11 @@ export function createOpenAIShimClient(options: { maxRetries?: number timeout?: number }): unknown { + hydrateGithubModelsTokenFromSecureStorage() + // When Gemini provider is active, map Gemini env vars to OpenAI-compatible ones // so the existing providerConfig.ts infrastructure picks them up correctly. - if ( - process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' - ) { + if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) { process.env.OPENAI_BASE_URL ??= process.env.GEMINI_BASE_URL ?? 'https://generativelanguage.googleapis.com/v1beta/openai' @@ -852,6 +929,10 @@ export function createOpenAIShimClient(options: { if (process.env.GEMINI_MODEL && !process.env.OPENAI_MODEL) { process.env.OPENAI_MODEL = process.env.GEMINI_MODEL } + } else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) { + process.env.OPENAI_BASE_URL ??= GITHUB_MODELS_DEFAULT_BASE + process.env.OPENAI_API_KEY ??= + process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? '' } const beta = new OpenAIShimBeta({ diff --git a/src/services/api/providerConfig.github.test.ts b/src/services/api/providerConfig.github.test.ts new file mode 100644 index 00000000..6177a9c6 --- /dev/null +++ b/src/services/api/providerConfig.github.test.ts @@ -0,0 +1,41 @@ +import { afterEach, expect, test } from 'bun:test' + +import { + DEFAULT_GITHUB_MODELS_API_MODEL, + normalizeGithubModelsApiModel, + resolveProviderRequest, +} from './providerConfig.js' + +const originalUseGithub = process.env.CLAUDE_CODE_USE_GITHUB + +afterEach(() => { + if (originalUseGithub === undefined) { + delete process.env.CLAUDE_CODE_USE_GITHUB + } else { + process.env.CLAUDE_CODE_USE_GITHUB = originalUseGithub + } +}) + +test.each([ + ['copilot', DEFAULT_GITHUB_MODELS_API_MODEL], + ['github:copilot', DEFAULT_GITHUB_MODELS_API_MODEL], + ['', DEFAULT_GITHUB_MODELS_API_MODEL], + ['github:gpt-4o', 'gpt-4o'], + ['gpt-4o', 'gpt-4o'], + ['github:copilot?reasoning=high', DEFAULT_GITHUB_MODELS_API_MODEL], +] as const)('normalizeGithubModelsApiModel(%s) -> %s', (input, expected) => { + expect(normalizeGithubModelsApiModel(input)).toBe(expected) +}) + +test('resolveProviderRequest applies GitHub normalization when CLAUDE_CODE_USE_GITHUB=1', () => { + process.env.CLAUDE_CODE_USE_GITHUB = '1' + const r = resolveProviderRequest({ model: 'github:gpt-4o' }) + expect(r.resolvedModel).toBe('gpt-4o') + expect(r.transport).toBe('chat_completions') +}) + +test('resolveProviderRequest leaves model unchanged without GitHub flag', () => { + delete process.env.CLAUDE_CODE_USE_GITHUB + const r = resolveProviderRequest({ model: 'github:gpt-4o' }) + expect(r.resolvedModel).toBe('github:gpt-4o') +}) diff --git a/src/services/api/providerConfig.ts b/src/services/api/providerConfig.ts index b197d785..bbbc2cb9 100644 --- a/src/services/api/providerConfig.ts +++ b/src/services/api/providerConfig.ts @@ -2,8 +2,12 @@ import { existsSync, readFileSync } from 'node:fs' import { homedir } from 'node:os' import { join } from 'node:path' +import { isEnvTruthy } from '../../utils/envUtils.js' + export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1' export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex' +/** Default GitHub Models API model when user selects copilot / github:copilot */ +export const DEFAULT_GITHUB_MODELS_API_MODEL = 'openai/gpt-4.1' const CODEX_ALIAS_MODELS: Record< string, @@ -171,6 +175,20 @@ export function isCodexBaseUrl(baseUrl: string | undefined): boolean { } } +/** + * Normalize user model string for GitHub Models inference (models.github.ai). + * Mirrors runtime devsper `github._normalize_model_id`. + */ +export function normalizeGithubModelsApiModel(requestedModel: string): string { + const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel + const segment = + noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim() + if (!segment || segment.toLowerCase() === 'copilot') { + return DEFAULT_GITHUB_MODELS_API_MODEL + } + return segment +} + export function resolveProviderRequest(options?: { model?: string baseUrl?: string @@ -192,10 +210,16 @@ export function resolveProviderRequest(options?: { ? 'codex_responses' : 'chat_completions' + const resolvedModel = + transport === 'chat_completions' && + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ? normalizeGithubModelsApiModel(requestedModel) + : descriptor.baseModel + return { transport, requestedModel, - resolvedModel: descriptor.baseModel, + resolvedModel, baseUrl: (rawBaseUrl ?? (transport === 'codex_responses' diff --git a/src/services/github/deviceFlow.test.ts b/src/services/github/deviceFlow.test.ts new file mode 100644 index 00000000..4b7ce584 --- /dev/null +++ b/src/services/github/deviceFlow.test.ts @@ -0,0 +1,94 @@ +import { afterEach, describe, expect, mock, test } from 'bun:test' + +import { + GitHubDeviceFlowError, + pollAccessToken, + requestDeviceCode, +} from './deviceFlow.js' + +describe('requestDeviceCode', () => { + const originalFetch = globalThis.fetch + + afterEach(() => { + globalThis.fetch = originalFetch + }) + + test('parses successful device code response', async () => { + globalThis.fetch = mock(() => + Promise.resolve( + new Response( + JSON.stringify({ + device_code: 'abc', + user_code: 'ABCD-1234', + verification_uri: 'https://github.com/login/device', + expires_in: 600, + interval: 5, + }), + { status: 200 }, + ), + ), + ) + + const r = await requestDeviceCode({ + clientId: 'test-client', + fetchImpl: globalThis.fetch, + }) + expect(r.device_code).toBe('abc') + expect(r.user_code).toBe('ABCD-1234') + expect(r.verification_uri).toBe('https://github.com/login/device') + expect(r.expires_in).toBe(600) + expect(r.interval).toBe(5) + }) + + test('throws on HTTP error', async () => { + globalThis.fetch = mock(() => + Promise.resolve(new Response('bad', { status: 500 })), + ) + await expect( + requestDeviceCode({ clientId: 'x', fetchImpl: globalThis.fetch }), + ).rejects.toThrow(GitHubDeviceFlowError) + }) +}) + +describe('pollAccessToken', () => { + const originalFetch = globalThis.fetch + + afterEach(() => { + globalThis.fetch = originalFetch + }) + + test('returns token when GitHub responds with access_token immediately', async () => { + let calls = 0 + globalThis.fetch = mock(() => { + calls++ + return Promise.resolve( + new Response(JSON.stringify({ access_token: 'tok-xyz' }), { + status: 200, + }), + ) + }) + + const token = await pollAccessToken('dev-code', { + clientId: 'cid', + fetchImpl: globalThis.fetch, + }) + expect(token).toBe('tok-xyz') + expect(calls).toBe(1) + }) + + test('throws on access_denied', async () => { + globalThis.fetch = mock(() => + Promise.resolve( + new Response(JSON.stringify({ error: 'access_denied' }), { + status: 200, + }), + ), + ) + await expect( + pollAccessToken('dc', { + clientId: 'c', + fetchImpl: globalThis.fetch, + }), + ).rejects.toThrow(/denied/) + }) +}) diff --git a/src/services/github/deviceFlow.ts b/src/services/github/deviceFlow.ts new file mode 100644 index 00000000..0e207b7f --- /dev/null +++ b/src/services/github/deviceFlow.ts @@ -0,0 +1,174 @@ +/** + * GitHub OAuth device flow for CLI login (https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps#device-flow). + */ + +import { execFileNoThrow } from '../../utils/execFileNoThrow.js' + +export const DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID = 'Ov23liXjWSSui6QIahPl' + +export const GITHUB_DEVICE_CODE_URL = 'https://github.com/login/device/code' +export const GITHUB_DEVICE_ACCESS_TOKEN_URL = + 'https://github.com/login/oauth/access_token' + +/** Match runtime devsper github_oauth DEFAULT_SCOPE */ +export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user' + +export class GitHubDeviceFlowError extends Error { + constructor(message: string) { + super(message) + this.name = 'GitHubDeviceFlowError' + } +} + +export type DeviceCodeResult = { + device_code: string + user_code: string + verification_uri: string + expires_in: number + interval: number +} + +export function getGithubDeviceFlowClientId(): string { + return ( + process.env.GITHUB_DEVICE_FLOW_CLIENT_ID?.trim() || + DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID + ) +} + +function sleep(ms: number): Promise { + return new Promise(resolve => setTimeout(resolve, ms)) +} + +export async function requestDeviceCode(options?: { + clientId?: string + scope?: string + fetchImpl?: typeof fetch +}): Promise { + const clientId = options?.clientId ?? getGithubDeviceFlowClientId() + if (!clientId) { + throw new GitHubDeviceFlowError( + 'No OAuth client ID: set GITHUB_DEVICE_FLOW_CLIENT_ID or paste a PAT instead.', + ) + } + const fetchFn = options?.fetchImpl ?? fetch + const res = await fetchFn(GITHUB_DEVICE_CODE_URL, { + method: 'POST', + headers: { Accept: 'application/json' }, + body: new URLSearchParams({ + client_id: clientId, + scope: options?.scope ?? DEFAULT_GITHUB_DEVICE_SCOPE, + }), + }) + if (!res.ok) { + const text = await res.text().catch(() => '') + throw new GitHubDeviceFlowError( + `Device code request failed: ${res.status} ${text}`, + ) + } + const data = (await res.json()) as Record + const device_code = data.device_code + const user_code = data.user_code + const verification_uri = data.verification_uri + if ( + typeof device_code !== 'string' || + typeof user_code !== 'string' || + typeof verification_uri !== 'string' + ) { + throw new GitHubDeviceFlowError('Malformed device code response from GitHub') + } + return { + device_code, + user_code, + verification_uri, + expires_in: typeof data.expires_in === 'number' ? data.expires_in : 900, + interval: typeof data.interval === 'number' ? data.interval : 5, + } +} + +export type PollOptions = { + clientId?: string + initialInterval?: number + timeoutSeconds?: number + fetchImpl?: typeof fetch +} + +export async function pollAccessToken( + deviceCode: string, + options?: PollOptions, +): Promise { + const clientId = options?.clientId ?? getGithubDeviceFlowClientId() + if (!clientId) { + throw new GitHubDeviceFlowError('client_id required for polling') + } + let interval = Math.max(1, options?.initialInterval ?? 5) + const timeoutSeconds = options?.timeoutSeconds ?? 900 + const fetchFn = options?.fetchImpl ?? fetch + const start = Date.now() + + while ((Date.now() - start) / 1000 < timeoutSeconds) { + const res = await fetchFn(GITHUB_DEVICE_ACCESS_TOKEN_URL, { + method: 'POST', + headers: { Accept: 'application/json' }, + body: new URLSearchParams({ + client_id: clientId, + device_code: deviceCode, + grant_type: 'urn:ietf:params:oauth:grant-type:device_code', + }), + }) + if (!res.ok) { + const text = await res.text().catch(() => '') + throw new GitHubDeviceFlowError( + `Token request failed: ${res.status} ${text}`, + ) + } + const data = (await res.json()) as Record + const err = data.error as string | undefined + if (err == null) { + const token = data.access_token + if (typeof token === 'string' && token) { + return token + } + throw new GitHubDeviceFlowError('No access_token in response') + } + if (err === 'authorization_pending') { + await sleep(interval * 1000) + continue + } + if (err === 'slow_down') { + interval = + typeof data.interval === 'number' ? data.interval : interval + 5 + await sleep(interval * 1000) + continue + } + if (err === 'expired_token') { + throw new GitHubDeviceFlowError( + 'Device code expired. Start the login flow again.', + ) + } + if (err === 'access_denied') { + throw new GitHubDeviceFlowError('Authorization was denied or cancelled.') + } + throw new GitHubDeviceFlowError(`GitHub OAuth error: ${err}`) + } + throw new GitHubDeviceFlowError('Timed out waiting for authorization.') +} + +/** + * Best-effort open browser / OS handler for the verification URL. + */ +export async function openVerificationUri(uri: string): Promise { + try { + if (process.platform === 'darwin') { + await execFileNoThrow('open', [uri], { useCwd: false, timeout: 5000 }) + } else if (process.platform === 'win32') { + await execFileNoThrow('cmd', ['/c', 'start', '', uri], { + useCwd: false, + timeout: 5000, + }) + } else { + await execFileNoThrow('xdg-open', [uri], { useCwd: false, timeout: 5000 }) + } + } catch { + // User can open the URL manually + } +} diff --git a/src/utils/auth.ts b/src/utils/auth.ts index b1cd024e..37d1ca1f 100644 --- a/src/utils/auth.ts +++ b/src/utils/auth.ts @@ -117,7 +117,8 @@ export function isAnthropicAuthEnabled(): boolean { isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) || isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) || isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || - isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) + isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) // Check if user has configured an external API key source // This allows externally-provided API keys to work (without requiring proxy configuration) @@ -1731,14 +1732,15 @@ export function getSubscriptionName(): string { } } -/** Check if using third-party services (Bedrock or Vertex or Foundry or OpenAI-compatible or Gemini) */ +/** Check if using third-party services (Bedrock or Vertex or Foundry or OpenAI-compatible or Gemini or GitHub Models) */ export function isUsing3PServices(): boolean { return !!( isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) || isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) || isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) || isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || - isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) + isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ) } diff --git a/src/utils/context.ts b/src/utils/context.ts index f13b2b0a..7dba02b7 100644 --- a/src/utils/context.ts +++ b/src/utils/context.ts @@ -77,7 +77,9 @@ export function getContextWindowForModel( process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true' || process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' + process.env.CLAUDE_CODE_USE_GEMINI === 'true' || + process.env.CLAUDE_CODE_USE_GITHUB === '1' || + process.env.CLAUDE_CODE_USE_GITHUB === 'true' ) { const openaiWindow = getOpenAIContextWindow(model) if (openaiWindow !== undefined) { @@ -181,7 +183,9 @@ export function getModelMaxOutputTokens(model: string): { process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true' || process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' + process.env.CLAUDE_CODE_USE_GEMINI === 'true' || + process.env.CLAUDE_CODE_USE_GITHUB === '1' || + process.env.CLAUDE_CODE_USE_GITHUB === 'true' ) { const openaiMax = getOpenAIMaxOutputTokens(model) if (openaiMax !== undefined) { diff --git a/src/utils/githubModelsCredentials.hydrate.test.ts b/src/utils/githubModelsCredentials.hydrate.test.ts new file mode 100644 index 00000000..23b0a5ee --- /dev/null +++ b/src/utils/githubModelsCredentials.hydrate.test.ts @@ -0,0 +1,66 @@ +/** + * Hydrate tests live in a separate file with no static import of + * githubModelsCredentials so Bun's mock.module can replace secureStorage + * before that module is first loaded. + */ +import { afterEach, describe, expect, mock, test } from 'bun:test' + +describe('hydrateGithubModelsTokenFromSecureStorage', () => { + const orig = { + CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, + GITHUB_TOKEN: process.env.GITHUB_TOKEN, + GH_TOKEN: process.env.GH_TOKEN, + CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE, + } + + afterEach(() => { + mock.restore() + for (const [k, v] of Object.entries(orig)) { + if (v === undefined) { + delete process.env[k as keyof typeof orig] + } else { + process.env[k as keyof typeof orig] = v + } + } + }) + + test('sets GITHUB_TOKEN from secure storage when USE_GITHUB and env token empty', async () => { + process.env.CLAUDE_CODE_USE_GITHUB = '1' + delete process.env.GITHUB_TOKEN + delete process.env.GH_TOKEN + delete process.env.CLAUDE_CODE_SIMPLE + + mock.module('./secureStorage/index.js', () => ({ + getSecureStorage: () => ({ + read: () => ({ + githubModels: { accessToken: 'stored-secret' }, + }), + }), + })) + + const { hydrateGithubModelsTokenFromSecureStorage } = await import( + './githubModelsCredentials.js' + ) + hydrateGithubModelsTokenFromSecureStorage() + expect(process.env.GITHUB_TOKEN).toBe('stored-secret') + }) + + test('does not override existing GITHUB_TOKEN', async () => { + process.env.CLAUDE_CODE_USE_GITHUB = '1' + process.env.GITHUB_TOKEN = 'already' + + mock.module('./secureStorage/index.js', () => ({ + getSecureStorage: () => ({ + read: () => ({ + githubModels: { accessToken: 'stored-secret' }, + }), + }), + })) + + const { hydrateGithubModelsTokenFromSecureStorage } = await import( + './githubModelsCredentials.js' + ) + hydrateGithubModelsTokenFromSecureStorage() + expect(process.env.GITHUB_TOKEN).toBe('already') + }) +}) diff --git a/src/utils/githubModelsCredentials.test.ts b/src/utils/githubModelsCredentials.test.ts new file mode 100644 index 00000000..81c3cdcc --- /dev/null +++ b/src/utils/githubModelsCredentials.test.ts @@ -0,0 +1,47 @@ +import { describe, expect, test } from 'bun:test' + +import { + clearGithubModelsToken, + readGithubModelsToken, + saveGithubModelsToken, +} from './githubModelsCredentials.js' + +describe('readGithubModelsToken', () => { + test('returns undefined in bare mode', () => { + const prev = process.env.CLAUDE_CODE_SIMPLE + process.env.CLAUDE_CODE_SIMPLE = '1' + expect(readGithubModelsToken()).toBeUndefined() + if (prev === undefined) { + delete process.env.CLAUDE_CODE_SIMPLE + } else { + process.env.CLAUDE_CODE_SIMPLE = prev + } + }) +}) + +describe('saveGithubModelsToken / clearGithubModelsToken', () => { + test('save returns failure in bare mode', () => { + const prev = process.env.CLAUDE_CODE_SIMPLE + process.env.CLAUDE_CODE_SIMPLE = '1' + const r = saveGithubModelsToken('abc') + expect(r.success).toBe(false) + expect(r.warning).toContain('Bare mode') + if (prev === undefined) { + delete process.env.CLAUDE_CODE_SIMPLE + } else { + process.env.CLAUDE_CODE_SIMPLE = prev + } + }) + + test('clear succeeds in bare mode', () => { + const prev = process.env.CLAUDE_CODE_SIMPLE + process.env.CLAUDE_CODE_SIMPLE = '1' + expect(clearGithubModelsToken().success).toBe(true) + if (prev === undefined) { + delete process.env.CLAUDE_CODE_SIMPLE + } else { + process.env.CLAUDE_CODE_SIMPLE = prev + } + }) +}) + diff --git a/src/utils/githubModelsCredentials.ts b/src/utils/githubModelsCredentials.ts new file mode 100644 index 00000000..83d5934c --- /dev/null +++ b/src/utils/githubModelsCredentials.ts @@ -0,0 +1,73 @@ +import { isBareMode, isEnvTruthy } from './envUtils.js' +import { getSecureStorage } from './secureStorage/index.js' + +/** JSON key in the shared OpenClaude secure storage blob. */ +export const GITHUB_MODELS_STORAGE_KEY = 'githubModels' as const + +export type GithubModelsCredentialBlob = { + accessToken: string +} + +export function readGithubModelsToken(): string | undefined { + if (isBareMode()) return undefined + try { + const data = getSecureStorage().read() as + | ({ githubModels?: GithubModelsCredentialBlob } & Record) + | null + const t = data?.githubModels?.accessToken?.trim() + return t || undefined + } catch { + return undefined + } +} + +/** + * If GitHub Models mode is on and no token is in the environment, copy the + * stored token into process.env so the OpenAI shim and validation see it. + */ +export function hydrateGithubModelsTokenFromSecureStorage(): void { + if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) { + return + } + if (process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()) { + return + } + if (isBareMode()) { + return + } + const t = readGithubModelsToken() + if (t) { + process.env.GITHUB_TOKEN = t + } +} + +export function saveGithubModelsToken(token: string): { + success: boolean + warning?: string +} { + if (isBareMode()) { + return { success: false, warning: 'Bare mode: secure storage is disabled.' } + } + const trimmed = token.trim() + if (!trimmed) { + return { success: false, warning: 'Token is empty.' } + } + const secureStorage = getSecureStorage() + const prev = secureStorage.read() || {} + const merged = { + ...(prev as Record), + [GITHUB_MODELS_STORAGE_KEY]: { accessToken: trimmed }, + } + return secureStorage.update(merged as typeof prev) +} + +export function clearGithubModelsToken(): { success: boolean; warning?: string } { + if (isBareMode()) { + return { success: true } + } + const secureStorage = getSecureStorage() + const prev = secureStorage.read() || {} + const next = { ...(prev as Record) } + delete next[GITHUB_MODELS_STORAGE_KEY] + return secureStorage.update(next as typeof prev) +} diff --git a/src/utils/managedEnvConstants.ts b/src/utils/managedEnvConstants.ts index 12c56565..86b2da29 100644 --- a/src/utils/managedEnvConstants.ts +++ b/src/utils/managedEnvConstants.ts @@ -18,6 +18,7 @@ const PROVIDER_MANAGED_ENV_VARS = new Set([ 'CLAUDE_CODE_USE_BEDROCK', 'CLAUDE_CODE_USE_VERTEX', 'CLAUDE_CODE_USE_FOUNDRY', + 'CLAUDE_CODE_USE_GITHUB', // Endpoint config (base URLs, project/resource identifiers) 'ANTHROPIC_BASE_URL', 'ANTHROPIC_BEDROCK_BASE_URL', @@ -147,6 +148,7 @@ export const SAFE_ENV_VARS = new Set([ 'CLAUDE_CODE_SUBAGENT_MODEL', 'CLAUDE_CODE_USE_BEDROCK', 'CLAUDE_CODE_USE_FOUNDRY', + 'CLAUDE_CODE_USE_GITHUB', 'CLAUDE_CODE_USE_VERTEX', 'DISABLE_AUTOUPDATER', 'DISABLE_BUG_COMMAND', diff --git a/src/utils/model/providers.test.ts b/src/utils/model/providers.test.ts index 1da3d596..ea03454f 100644 --- a/src/utils/model/providers.test.ts +++ b/src/utils/model/providers.test.ts @@ -7,6 +7,7 @@ import { const originalEnv = { CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI, + CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI, CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK, CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX, @@ -15,6 +16,7 @@ const originalEnv = { afterEach(() => { process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI + process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX @@ -23,6 +25,7 @@ afterEach(() => { function clearProviderEnv(): void { delete process.env.CLAUDE_CODE_USE_GEMINI + delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_OPENAI delete process.env.CLAUDE_CODE_USE_BEDROCK delete process.env.CLAUDE_CODE_USE_VERTEX @@ -38,6 +41,7 @@ test('first-party provider keeps Anthropic account setup flow enabled', () => { test.each([ ['CLAUDE_CODE_USE_OPENAI', 'openai'], + ['CLAUDE_CODE_USE_GITHUB', 'github'], ['CLAUDE_CODE_USE_GEMINI', 'gemini'], ['CLAUDE_CODE_USE_BEDROCK', 'bedrock'], ['CLAUDE_CODE_USE_VERTEX', 'vertex'], @@ -52,3 +56,11 @@ test.each([ expect(usesAnthropicAccountFlow()).toBe(false) }, ) + +test('GEMINI takes precedence over GitHub when both are set', () => { + clearProviderEnv() + process.env.CLAUDE_CODE_USE_GEMINI = '1' + process.env.CLAUDE_CODE_USE_GITHUB = '1' + + expect(getAPIProvider()).toBe('gemini') +}) diff --git a/src/utils/model/providers.ts b/src/utils/model/providers.ts index 847b5fc3..30a1f1c9 100644 --- a/src/utils/model/providers.ts +++ b/src/utils/model/providers.ts @@ -1,20 +1,29 @@ import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js' import { isEnvTruthy } from '../envUtils.js' -export type APIProvider = 'firstParty' | 'bedrock' | 'vertex' | 'foundry' | 'openai' | 'gemini' +export type APIProvider = + | 'firstParty' + | 'bedrock' + | 'vertex' + | 'foundry' + | 'openai' + | 'gemini' + | 'github' export function getAPIProvider(): APIProvider { return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ? 'gemini' - : isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) - ? 'openai' - : isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) - ? 'bedrock' - : isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) - ? 'vertex' - : isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) - ? 'foundry' - : 'firstParty' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ? 'github' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) + ? 'openai' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) + ? 'bedrock' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) + ? 'vertex' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) + ? 'foundry' + : 'firstParty' } export function usesAnthropicAccountFlow(): boolean { diff --git a/src/utils/providerProfile.ts b/src/utils/providerProfile.ts index 866c19c5..e88662ac 100644 --- a/src/utils/providerProfile.ts +++ b/src/utils/providerProfile.ts @@ -190,6 +190,7 @@ export async function buildLaunchEnv(options: { } delete env.CLAUDE_CODE_USE_OPENAI + delete env.CLAUDE_CODE_USE_GITHUB env.GEMINI_MODEL = processEnv.GEMINI_MODEL || @@ -224,6 +225,7 @@ export async function buildLaunchEnv(options: { } delete env.CLAUDE_CODE_USE_GEMINI + delete env.CLAUDE_CODE_USE_GITHUB delete env.GEMINI_API_KEY delete env.GEMINI_MODEL delete env.GEMINI_BASE_URL diff --git a/src/utils/swarm/spawnUtils.ts b/src/utils/swarm/spawnUtils.ts index cfccdf5a..037d273d 100644 --- a/src/utils/swarm/spawnUtils.ts +++ b/src/utils/swarm/spawnUtils.ts @@ -99,6 +99,18 @@ const TEAMMATE_ENV_VARS = [ 'CLAUDE_CODE_USE_BEDROCK', 'CLAUDE_CODE_USE_VERTEX', 'CLAUDE_CODE_USE_FOUNDRY', + 'CLAUDE_CODE_USE_GITHUB', + 'CLAUDE_CODE_USE_GEMINI', + 'CLAUDE_CODE_USE_OPENAI', + 'GITHUB_TOKEN', + 'GH_TOKEN', + 'OPENAI_API_KEY', + 'OPENAI_BASE_URL', + 'OPENAI_MODEL', + 'GEMINI_API_KEY', + 'GEMINI_BASE_URL', + 'GEMINI_MODEL', + 'GOOGLE_API_KEY', // Custom API endpoint 'ANTHROPIC_BASE_URL', // Config directory override From 2619401d34a6547ddfc7d59d3f245b72d75a10c4 Mon Sep 17 00:00:00 2001 From: Rithul Kamesh Date: Thu, 2 Apr 2026 11:26:27 +0530 Subject: [PATCH 15/35] Remove github-models-pr-draft.md --- docs/github-models-pr-draft.md | 24 ------------------------ 1 file changed, 24 deletions(-) delete mode 100644 docs/github-models-pr-draft.md diff --git a/docs/github-models-pr-draft.md b/docs/github-models-pr-draft.md deleted file mode 100644 index 21fa7fa8..00000000 --- a/docs/github-models-pr-draft.md +++ /dev/null @@ -1,24 +0,0 @@ -# GitHub Models + onboard — PR draft (paste into GitHub) - -**Title:** `feat: GitHub Models provider + interactive onboard (keychain-backed)` - -**Body:** - -## Summary - -- Adds GitHub Models (`models.github.ai`) as an OpenAI-compatible backend via `CLAUDE_CODE_USE_GITHUB` (see existing shim changes). -- Adds `/onboard-github`: interactive Ink flow for GitHub Device Login or PAT, stores token in OS-backed secure storage (macOS Keychain when available, else `~/.claude/.credentials.json`), and writes user settings `env` so no `export GITHUB_TOKEN` is required. -- Applies user settings before provider env validation and hydrates `GITHUB_TOKEN` from secure storage when the GitHub provider flag is on. - -## How to test - -1. Run `openclaude` and execute `/onboard-github` (or launch via command registration). -2. Complete device flow or paste a PAT with Models access. -3. Restart CLI; confirm `CLAUDE_CODE_USE_GITHUB=1` in `~/.claude/settings.json` (or merged file) and that inference works without exporting `GITHUB_TOKEN`. -4. `bun test` (new suites) + `bun run build`. - -## Notes / follow-ups - -- Device flow OAuth app client ID is configurable via `GITHUB_DEVICE_FLOW_CLIENT_ID`; verify scope list against current GitHub Models documentation. -- `/logout` currently deletes all secure storage; GitHub token is cleared too — document or narrow in a follow-up. -- Linux: secure storage is plaintext with chmod 600 today; libsecret is still TODO in `secureStorage`. From 93bc50f8cd5b68d725b9832e12c961b25235331e Mon Sep 17 00:00:00 2001 From: gnanam1990 Date: Thu, 2 Apr 2026 11:37:26 +0530 Subject: [PATCH 16/35] docs: replace stale OpenRouter Gemini example Update the OpenRouter Gemini README example to a model ID that works in current OpenRouter validation, and note that model availability can change over time. --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5c94ed80..7784083a 100644 --- a/README.md +++ b/README.md @@ -118,9 +118,13 @@ export OPENAI_MODEL=deepseek-chat export CLAUDE_CODE_USE_OPENAI=1 export OPENAI_API_KEY=sk-or-... export OPENAI_BASE_URL=https://openrouter.ai/api/v1 -export OPENAI_MODEL=google/gemini-2.0-flash +export OPENAI_MODEL=google/gemini-2.0-flash-001 ``` +OpenRouter model availability changes over time. If a model stops working, +pick another currently available OpenRouter model before assuming the +OpenAI-compatible setup is broken. + ### Ollama (local, free) ```bash From d156aed32da370b8feca4a273096b3725c16ec79 Mon Sep 17 00:00:00 2001 From: Aarondio <31071454+Aarondio@users.noreply.github.com> Date: Thu, 2 Apr 2026 08:14:52 +0100 Subject: [PATCH 17/35] fix(shim): implement tolerant bracket balancer for truncated tool JSON --- src/services/api/openaiShim.ts | 36 +++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 9a500490..4fa6ea31 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -342,7 +342,7 @@ async function* openaiStreamToAnthropic( ): AsyncGenerator { const messageId = makeMessageId() let contentBlockIndex = 0 - const activeToolCalls = new Map() + const activeToolCalls = new Map() let hasEmittedContentStart = false let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null let hasEmittedFinalUsage = false @@ -436,6 +436,7 @@ async function* openaiStreamToAnthropic( id: tc.id, name: tc.function.name, index: toolBlockIndex, + jsonBuffer: tc.function.arguments ?? '', }) yield { @@ -466,6 +467,9 @@ async function* openaiStreamToAnthropic( // Continuation of existing tool call const active = activeToolCalls.get(tc.index) if (active) { + if (tc.function.arguments) { + active.jsonBuffer += tc.function.arguments + } yield { type: 'content_block_delta', index: active.index, @@ -493,6 +497,36 @@ async function* openaiStreamToAnthropic( } // Close active tool calls for (const [, tc] of activeToolCalls) { + let suffixToAdd = '' + if (tc.jsonBuffer) { + try { + JSON.parse(tc.jsonBuffer) + } catch { + const str = tc.jsonBuffer.trimEnd() + const combinations = [ + '}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}' + ] + for (const combo of combinations) { + try { + JSON.parse(str + combo) + suffixToAdd = combo + break + } catch {} + } + } + } + + if (suffixToAdd) { + yield { + type: 'content_block_delta', + index: tc.index, + delta: { + type: 'input_json_delta', + partial_json: suffixToAdd, + }, + } + } + yield { type: 'content_block_stop', index: tc.index } } From f07f11b7b6134f07b74024afb299f1f2a258623a Mon Sep 17 00:00:00 2001 From: Rithul Kamesh Date: Thu, 2 Apr 2026 12:53:56 +0530 Subject: [PATCH 18/35] fix: use bun test for provider-recommendation script to resolve module errors --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5f5351b8..0bfa42c6 100644 --- a/package.json +++ b/package.json @@ -30,7 +30,7 @@ "dev:fast": "bun run profile:fast && bun run dev:ollama:fast", "dev:code": "bun run profile:code && bun run dev:profile", "start": "node dist/cli.mjs", - "test:provider-recommendation": "node --test --experimental-strip-types src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts", + "test:provider-recommendation": "bun test src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts", "typecheck": "tsc --noEmit", "smoke": "bun run build && node dist/cli.mjs --version", "test:provider": "bun test src/services/api/*.test.ts src/utils/context.test.ts", From 577e654ae74e868fb7095d5a0804e6d9b42b4e32 Mon Sep 17 00:00:00 2001 From: Misha Skvortsov Date: Wed, 1 Apr 2026 21:42:43 +0300 Subject: [PATCH 19/35] feat: add support for Atomic Chat provider - Introduced a new provider profile for Atomic Chat, allowing it to be used alongside existing providers. - Updated `package.json` to include a new development script for launching Atomic Chat. - Modified `smart_router.py` to recognize Atomic Chat as a local provider that does not require an API key. - Enhanced provider discovery and launch scripts to handle Atomic Chat, including model listing and connection checks. - Added tests to ensure proper environment setup and behavior for Atomic Chat profiles. This update expands the functionality of the application to support local LLMs via Atomic Chat, improving versatility for users. --- atomic_chat_provider.py | 146 ++++++++++++++++++++++++++++++ package.json | 1 + scripts/provider-bootstrap.ts | 21 ++++- scripts/provider-discovery.ts | 56 ++++++++++++ scripts/provider-launch.ts | 36 +++++++- smart_router.py | 13 ++- src/utils/providerProfile.test.ts | 70 ++++++++++++++ src/utils/providerProfile.ts | 37 +++++++- test_atomic_chat_provider.py | 130 ++++++++++++++++++++++++++ 9 files changed, 503 insertions(+), 7 deletions(-) create mode 100644 atomic_chat_provider.py create mode 100644 test_atomic_chat_provider.py diff --git a/atomic_chat_provider.py b/atomic_chat_provider.py new file mode 100644 index 00000000..bf55155f --- /dev/null +++ b/atomic_chat_provider.py @@ -0,0 +1,146 @@ +""" +atomic_chat_provider.py +----------------------- +Adds native Atomic Chat support to openclaude. +Lets Claude Code route requests to any locally-running model via +Atomic Chat (Apple Silicon only) at 127.0.0.1:1337. + +Atomic Chat exposes an OpenAI-compatible API, so messages are forwarded +directly without translation. + +Usage (.env): + PREFERRED_PROVIDER=atomic-chat + ATOMIC_CHAT_BASE_URL=http://127.0.0.1:1337 +""" + +import httpx +import json +import logging +import os +from typing import AsyncIterator + +logger = logging.getLogger(__name__) +ATOMIC_CHAT_BASE_URL = os.getenv("ATOMIC_CHAT_BASE_URL", "http://127.0.0.1:1337") + + +def _api_url(path: str) -> str: + return f"{ATOMIC_CHAT_BASE_URL}/v1{path}" + + +async def check_atomic_chat_running() -> bool: + try: + async with httpx.AsyncClient(timeout=3.0) as client: + resp = await client.get(_api_url("/models")) + return resp.status_code == 200 + except Exception: + return False + + +async def list_atomic_chat_models() -> list[str]: + try: + async with httpx.AsyncClient(timeout=5.0) as client: + resp = await client.get(_api_url("/models")) + resp.raise_for_status() + data = resp.json() + return [m["id"] for m in data.get("data", [])] + except Exception as e: + logger.warning(f"Could not list Atomic Chat models: {e}") + return [] + + +async def atomic_chat( + model: str, + messages: list[dict], + system: str | None = None, + max_tokens: int = 4096, + temperature: float = 1.0, +) -> dict: + chat_messages = list(messages) + if system: + chat_messages.insert(0, {"role": "system", "content": system}) + + payload = { + "model": model, + "messages": chat_messages, + "max_tokens": max_tokens, + "temperature": temperature, + "stream": False, + } + + async with httpx.AsyncClient(timeout=120.0) as client: + resp = await client.post(_api_url("/chat/completions"), json=payload) + resp.raise_for_status() + data = resp.json() + + choice = data.get("choices", [{}])[0] + assistant_text = choice.get("message", {}).get("content", "") + usage = data.get("usage", {}) + + return { + "id": data.get("id", "msg_atomic_chat"), + "type": "message", + "role": "assistant", + "content": [{"type": "text", "text": assistant_text}], + "model": model, + "stop_reason": "end_turn", + "stop_sequence": None, + "usage": { + "input_tokens": usage.get("prompt_tokens", 0), + "output_tokens": usage.get("completion_tokens", 0), + }, + } + + +async def atomic_chat_stream( + model: str, + messages: list[dict], + system: str | None = None, + max_tokens: int = 4096, + temperature: float = 1.0, +) -> AsyncIterator[str]: + chat_messages = list(messages) + if system: + chat_messages.insert(0, {"role": "system", "content": system}) + + payload = { + "model": model, + "messages": chat_messages, + "max_tokens": max_tokens, + "temperature": temperature, + "stream": True, + } + + yield "event: message_start\n" + yield f'data: {json.dumps({"type": "message_start", "message": {"id": "msg_atomic_chat_stream", "type": "message", "role": "assistant", "content": [], "model": model, "stop_reason": None, "usage": {"input_tokens": 0, "output_tokens": 0}}})}\n\n' + yield "event: content_block_start\n" + yield f'data: {json.dumps({"type": "content_block_start", "index": 0, "content_block": {"type": "text", "text": ""}})}\n\n' + + async with httpx.AsyncClient(timeout=120.0) as client: + async with client.stream("POST", _api_url("/chat/completions"), json=payload) as resp: + resp.raise_for_status() + async for line in resp.aiter_lines(): + if not line or not line.startswith("data: "): + continue + raw = line[len("data: "):] + if raw.strip() == "[DONE]": + break + try: + chunk = json.loads(raw) + delta = chunk.get("choices", [{}])[0].get("delta", {}) + delta_text = delta.get("content", "") + if delta_text: + yield "event: content_block_delta\n" + yield f'data: {json.dumps({"type": "content_block_delta", "index": 0, "delta": {"type": "text_delta", "text": delta_text}})}\n\n' + + finish_reason = chunk.get("choices", [{}])[0].get("finish_reason") + if finish_reason: + usage = chunk.get("usage", {}) + yield "event: content_block_stop\n" + yield f'data: {json.dumps({"type": "content_block_stop", "index": 0})}\n\n' + yield "event: message_delta\n" + yield f'data: {json.dumps({"type": "message_delta", "delta": {"stop_reason": "end_turn", "stop_sequence": None}, "usage": {"output_tokens": usage.get("completion_tokens", 0)}})}\n\n' + yield "event: message_stop\n" + yield f'data: {json.dumps({"type": "message_stop"})}\n\n' + break + except json.JSONDecodeError: + continue diff --git a/package.json b/package.json index 47052352..03abde72 100644 --- a/package.json +++ b/package.json @@ -21,6 +21,7 @@ "dev:gemini": "bun run scripts/provider-launch.ts gemini", "dev:ollama": "bun run scripts/provider-launch.ts ollama", "dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare", + "dev:atomic-chat": "bun run scripts/provider-launch.ts atomic-chat", "profile:init": "bun run scripts/provider-bootstrap.ts", "profile:recommend": "bun run scripts/provider-recommend.ts", "profile:auto": "bun run scripts/provider-recommend.ts --apply", diff --git a/scripts/provider-bootstrap.ts b/scripts/provider-bootstrap.ts index 82ebbbb6..f39e3e50 100644 --- a/scripts/provider-bootstrap.ts +++ b/scripts/provider-bootstrap.ts @@ -10,6 +10,7 @@ import { recommendOllamaModel, } from '../src/utils/providerRecommendation.ts' import { + buildAtomicChatProfileEnv, buildCodexProfileEnv, buildGeminiProfileEnv, buildOllamaProfileEnv, @@ -20,8 +21,11 @@ import { type ProviderProfile, } from '../src/utils/providerProfile.ts' import { + getAtomicChatChatBaseUrl, getOllamaChatBaseUrl, + hasLocalAtomicChat, hasLocalOllama, + listAtomicChatModels, listOllamaModels, } from './provider-discovery.ts' @@ -34,7 +38,7 @@ function parseArg(name: string): string | null { function parseProviderArg(): ProviderProfile | 'auto' { const p = parseArg('--provider')?.toLowerCase() - if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini') return p + if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p return 'auto' } @@ -102,6 +106,21 @@ async function main(): Promise { getOllamaChatBaseUrl, }, ) + } else if (selected === 'atomic-chat') { + const model = argModel || (await listAtomicChatModels(argBaseUrl || undefined))[0] + if (!model) { + if (!(await hasLocalAtomicChat(argBaseUrl || undefined))) { + console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.') + } else { + console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.') + } + process.exit(1) + } + + env = buildAtomicChatProfileEnv(model, { + baseUrl: argBaseUrl, + getAtomicChatChatBaseUrl, + }) } else if (selected === 'codex') { const builtEnv = buildCodexProfileEnv({ model: argModel, diff --git a/scripts/provider-discovery.ts b/scripts/provider-discovery.ts index 9e3aacda..9c463f2f 100644 --- a/scripts/provider-discovery.ts +++ b/scripts/provider-discovery.ts @@ -1,6 +1,7 @@ import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts' export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434' +export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337' function withTimeoutSignal(timeoutMs: number): { signal: AbortSignal @@ -93,6 +94,61 @@ export async function listOllamaModels( } } +// ── Atomic Chat discovery (Apple Silicon local LLMs at 127.0.0.1:1337) ────── + +export function getAtomicChatApiBaseUrl(baseUrl?: string): string { + const raw = baseUrl || process.env.ATOMIC_CHAT_BASE_URL || DEFAULT_ATOMIC_CHAT_BASE_URL + return trimTrailingSlash(raw) +} + +export function getAtomicChatChatBaseUrl(baseUrl?: string): string { + return `${getAtomicChatApiBaseUrl(baseUrl)}/v1` +} + +export async function hasLocalAtomicChat(baseUrl?: string): Promise { + const { signal, clear } = withTimeoutSignal(1200) + try { + const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, { + method: 'GET', + signal, + }) + return response.ok + } catch { + return false + } finally { + clear() + } +} + +export async function listAtomicChatModels( + baseUrl?: string, +): Promise { + const { signal, clear } = withTimeoutSignal(5000) + try { + const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, { + method: 'GET', + signal, + }) + if (!response.ok) { + return [] + } + + const data = await response.json() as { + data?: Array<{ id?: string }> + } + + return (data.data ?? []) + .filter(model => Boolean(model.id)) + .map(model => model.id!) + } catch { + return [] + } finally { + clear() + } +} + +// ── Ollama benchmarking ───────────────────────────────────────────────────── + export async function benchmarkOllamaModel( modelName: string, baseUrl?: string, diff --git a/scripts/provider-launch.ts b/scripts/provider-launch.ts index 2859e9e8..17f11fb8 100644 --- a/scripts/provider-launch.ts +++ b/scripts/provider-launch.ts @@ -16,8 +16,11 @@ import { type ProviderProfile, } from '../src/utils/providerProfile.ts' import { + getAtomicChatChatBaseUrl, getOllamaChatBaseUrl, + hasLocalAtomicChat, hasLocalOllama, + listAtomicChatModels, listOllamaModels, } from './provider-discovery.ts' @@ -48,7 +51,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions { continue } - if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') { + if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') { requestedProfile = lower as ProviderProfile | 'auto' continue } @@ -79,7 +82,7 @@ function loadPersistedProfile(): ProfileFile | null { if (!existsSync(path)) return null try { const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile - if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') { + if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini' || parsed.profile === 'atomic-chat') { return parsed } return null @@ -96,6 +99,11 @@ async function resolveOllamaDefaultModel( return recommended?.name ?? null } +async function resolveAtomicChatDefaultModel(): Promise { + const models = await listAtomicChatModels() + return models[0] ?? null +} + function runCommand(command: string, env: NodeJS.ProcessEnv): Promise { return runProcess(command, [], env) } @@ -132,6 +140,10 @@ function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void { console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`) console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`) console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`) + } else if (profile === 'atomic-chat') { + console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`) + console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`) + console.log('OPENAI_API_KEY_SET=false (local provider, no key required)') } else { console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`) console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`) @@ -143,7 +155,7 @@ async function main(): Promise { const options = parseLaunchOptions(process.argv.slice(2)) const requestedProfile = options.requestedProfile if (!requestedProfile) { - console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal ] [-- ]') + console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal ] [-- ]') process.exit(1) } @@ -175,12 +187,30 @@ async function main(): Promise { } } + let resolvedAtomicChatModel: string | null = null + if ( + profile === 'atomic-chat' && + (persisted?.profile !== 'atomic-chat' || !persisted?.env?.OPENAI_MODEL) + ) { + if (!(await hasLocalAtomicChat())) { + console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.') + process.exit(1) + } + resolvedAtomicChatModel = await resolveAtomicChatDefaultModel() + if (!resolvedAtomicChatModel) { + console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.') + process.exit(1) + } + } + const env = await buildLaunchEnv({ profile, persisted, goal: options.goal, getOllamaChatBaseUrl, resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b', + getAtomicChatChatBaseUrl, + resolveAtomicChatDefaultModel: async () => resolvedAtomicChatModel, }) if (options.fast) { applyFastFlags(env) diff --git a/smart_router.py b/smart_router.py index 0a54a791..14b90c03 100644 --- a/smart_router.py +++ b/smart_router.py @@ -57,8 +57,8 @@ class Provider: @property def is_configured(self) -> bool: """True if the provider has an API key set.""" - if self.name == "ollama": - return True # Ollama needs no API key + if self.name in ("ollama", "atomic-chat"): + return True # Local providers need no API key return bool(self.api_key) @property @@ -93,6 +93,7 @@ def build_default_providers() -> list[Provider]: big = os.getenv("BIG_MODEL", "gpt-4.1") small = os.getenv("SMALL_MODEL", "gpt-4.1-mini") ollama_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434") + atomic_chat_url = os.getenv("ATOMIC_CHAT_BASE_URL", "http://127.0.0.1:1337") return [ Provider( @@ -119,6 +120,14 @@ def build_default_providers() -> list[Provider]: big_model=big if "gemini" not in big and "gpt" not in big else "llama3:8b", small_model=small if "gemini" not in small and "gpt" not in small else "llama3:8b", ), + Provider( + name="atomic-chat", + ping_url=f"{atomic_chat_url}/v1/models", + api_key_env="", + cost_per_1k_tokens=0.0, # free — local (Apple Silicon) + big_model=big if "gemini" not in big and "gpt" not in big else "llama3:8b", + small_model=small if "gemini" not in small and "gpt" not in small else "llama3:8b", + ), ] diff --git a/src/utils/providerProfile.test.ts b/src/utils/providerProfile.test.ts index e90746c6..b953e1b6 100644 --- a/src/utils/providerProfile.test.ts +++ b/src/utils/providerProfile.test.ts @@ -5,6 +5,7 @@ import { join } from 'node:path' import test from 'node:test' import { + buildAtomicChatProfileEnv, buildCodexProfileEnv, buildGeminiProfileEnv, buildLaunchEnv, @@ -381,3 +382,72 @@ test('auto profile falls back to openai when no viable ollama model exists', () assert.equal(selectAutoProfile(null), 'openai') assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama') }) + +// ── Atomic Chat profile tests ──────────────────────────────────────────────── + +test('atomic-chat profiles never persist openai api keys', () => { + const env = buildAtomicChatProfileEnv('some-local-model', { + getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1', + }) + + assert.deepEqual(env, { + OPENAI_BASE_URL: 'http://127.0.0.1:1337/v1', + OPENAI_MODEL: 'some-local-model', + }) + assert.equal('OPENAI_API_KEY' in env, false) +}) + +test('atomic-chat profiles respect custom base url', () => { + const env = buildAtomicChatProfileEnv('my-model', { + baseUrl: 'http://192.168.1.100:1337', + getAtomicChatChatBaseUrl: (baseUrl?: string) => + baseUrl ? `${baseUrl}/v1` : 'http://127.0.0.1:1337/v1', + }) + + assert.equal(env.OPENAI_BASE_URL, 'http://192.168.1.100:1337/v1') + assert.equal(env.OPENAI_MODEL, 'my-model') +}) + +test('matching persisted atomic-chat env is reused for atomic-chat launch', async () => { + const env = await buildLaunchEnv({ + profile: 'atomic-chat', + persisted: profile('atomic-chat', { + OPENAI_BASE_URL: 'http://127.0.0.1:1337/v1', + OPENAI_MODEL: 'llama-3.1-8b', + }), + goal: 'balanced', + processEnv: {}, + getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1', + resolveAtomicChatDefaultModel: async () => 'other-model', + }) + + assert.equal(env.OPENAI_BASE_URL, 'http://127.0.0.1:1337/v1') + assert.equal(env.OPENAI_MODEL, 'llama-3.1-8b') + assert.equal(env.OPENAI_API_KEY, undefined) + assert.equal(env.CODEX_API_KEY, undefined) +}) + +test('atomic-chat launch ignores mismatched persisted openai env', async () => { + const env = await buildLaunchEnv({ + profile: 'atomic-chat', + persisted: profile('openai', { + OPENAI_BASE_URL: 'https://api.openai.com/v1', + OPENAI_MODEL: 'gpt-4o', + OPENAI_API_KEY: 'sk-persisted', + }), + goal: 'balanced', + processEnv: { + OPENAI_API_KEY: 'sk-live', + CODEX_API_KEY: 'codex-live', + CHATGPT_ACCOUNT_ID: 'acct_live', + }, + getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1', + resolveAtomicChatDefaultModel: async () => 'local-model', + }) + + assert.equal(env.OPENAI_BASE_URL, 'http://127.0.0.1:1337/v1') + assert.equal(env.OPENAI_MODEL, 'local-model') + assert.equal(env.OPENAI_API_KEY, undefined) + assert.equal(env.CODEX_API_KEY, undefined) + assert.equal(env.CHATGPT_ACCOUNT_ID, undefined) +}) diff --git a/src/utils/providerProfile.ts b/src/utils/providerProfile.ts index 866c19c5..d85af0c6 100644 --- a/src/utils/providerProfile.ts +++ b/src/utils/providerProfile.ts @@ -13,7 +13,7 @@ import { const DEFAULT_GEMINI_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai' const DEFAULT_GEMINI_MODEL = 'gemini-2.0-flash' -export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' +export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' | 'atomic-chat' export type ProfileEnv = { OPENAI_BASE_URL?: string @@ -53,6 +53,19 @@ export function buildOllamaProfileEnv( } } +export function buildAtomicChatProfileEnv( + model: string, + options: { + baseUrl?: string | null + getAtomicChatChatBaseUrl: (baseUrl?: string) => string + }, +): ProfileEnv { + return { + OPENAI_BASE_URL: options.getAtomicChatChatBaseUrl(options.baseUrl ?? undefined), + OPENAI_MODEL: model, + } +} + export function buildGeminiProfileEnv(options: { model?: string | null baseUrl?: string | null @@ -171,6 +184,8 @@ export async function buildLaunchEnv(options: { processEnv?: NodeJS.ProcessEnv getOllamaChatBaseUrl?: (baseUrl?: string) => string resolveOllamaDefaultModel?: (goal: RecommendationGoal) => Promise + getAtomicChatChatBaseUrl?: (baseUrl?: string) => string + resolveAtomicChatDefaultModel?: () => Promise }): Promise { const processEnv = options.processEnv ?? process.env const persistedEnv = @@ -248,6 +263,26 @@ export async function buildLaunchEnv(options: { return env } + if (options.profile === 'atomic-chat') { + const getAtomicChatBaseUrl = + options.getAtomicChatChatBaseUrl ?? (() => 'http://127.0.0.1:1337/v1') + const resolveModel = + options.resolveAtomicChatDefaultModel ?? (async () => null as string | null) + + env.OPENAI_BASE_URL = persistedEnv.OPENAI_BASE_URL || getAtomicChatBaseUrl() + env.OPENAI_MODEL = + persistedEnv.OPENAI_MODEL || + (await resolveModel()) || + '' + + delete env.OPENAI_API_KEY + delete env.CODEX_API_KEY + delete env.CHATGPT_ACCOUNT_ID + delete env.CODEX_ACCOUNT_ID + + return env + } + if (options.profile === 'codex') { env.OPENAI_BASE_URL = persistedEnv.OPENAI_BASE_URL && isCodexBaseUrl(persistedEnv.OPENAI_BASE_URL) diff --git a/test_atomic_chat_provider.py b/test_atomic_chat_provider.py new file mode 100644 index 00000000..819c610c --- /dev/null +++ b/test_atomic_chat_provider.py @@ -0,0 +1,130 @@ +""" +test_atomic_chat_provider.py +Run: pytest test_atomic_chat_provider.py -v +""" + +import pytest +from unittest.mock import AsyncMock, MagicMock, patch +from atomic_chat_provider import ( + atomic_chat, + list_atomic_chat_models, + check_atomic_chat_running, +) + + +@pytest.mark.asyncio +async def test_atomic_chat_running_true(): + mock_response = MagicMock() + mock_response.status_code = 200 + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response) + result = await check_atomic_chat_running() + assert result is True + + +@pytest.mark.asyncio +async def test_atomic_chat_running_false_on_exception(): + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.get = AsyncMock(side_effect=Exception("refused")) + result = await check_atomic_chat_running() + assert result is False + + +@pytest.mark.asyncio +async def test_list_models_returns_ids(): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "data": [{"id": "llama-3.1-8b"}, {"id": "mistral-7b"}], + } + mock_response.raise_for_status = MagicMock() + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response) + models = await list_atomic_chat_models() + assert "llama-3.1-8b" in models + assert "mistral-7b" in models + + +@pytest.mark.asyncio +async def test_list_models_empty_on_failure(): + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.get = AsyncMock(side_effect=Exception("down")) + models = await list_atomic_chat_models() + assert models == [] + + +@pytest.mark.asyncio +async def test_atomic_chat_returns_anthropic_format(): + mock_response = MagicMock() + mock_response.raise_for_status = MagicMock() + mock_response.json.return_value = { + "id": "chatcmpl-abc123", + "choices": [{"message": {"content": "42 is the answer."}}], + "usage": {"prompt_tokens": 10, "completion_tokens": 8}, + } + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response) + result = await atomic_chat( + model="llama-3.1-8b", + messages=[{"role": "user", "content": "What is 6*7?"}], + ) + assert result["type"] == "message" + assert result["role"] == "assistant" + assert "42" in result["content"][0]["text"] + assert result["usage"]["input_tokens"] == 10 + assert result["usage"]["output_tokens"] == 8 + + +@pytest.mark.asyncio +async def test_atomic_chat_prepends_system(): + captured = {} + + async def mock_post(url, json=None, **kwargs): + captured.update(json or {}) + m = MagicMock() + m.raise_for_status = MagicMock() + m.json.return_value = { + "id": "chatcmpl-xyz", + "choices": [{"message": {"content": "ok"}}], + "usage": {"prompt_tokens": 1, "completion_tokens": 1}, + } + return m + + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.post = mock_post + await atomic_chat( + model="llama-3.1-8b", + messages=[{"role": "user", "content": "Hi"}], + system="Be helpful.", + ) + assert captured["messages"][0]["role"] == "system" + assert "helpful" in captured["messages"][0]["content"] + + +@pytest.mark.asyncio +async def test_atomic_chat_sends_correct_payload(): + captured = {} + + async def mock_post(url, json=None, **kwargs): + captured.update(json or {}) + m = MagicMock() + m.raise_for_status = MagicMock() + m.json.return_value = { + "id": "chatcmpl-xyz", + "choices": [{"message": {"content": "ok"}}], + "usage": {"prompt_tokens": 1, "completion_tokens": 1}, + } + return m + + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.post = mock_post + await atomic_chat( + model="test-model", + messages=[{"role": "user", "content": "Test"}], + max_tokens=2048, + temperature=0.5, + ) + assert captured["model"] == "test-model" + assert captured["max_tokens"] == 2048 + assert captured["temperature"] == 0.5 + assert captured["stream"] is False From 3b7b9740f2dd3493a887e1277a7570c129bae3d9 Mon Sep 17 00:00:00 2001 From: Misha Skvortsov Date: Wed, 1 Apr 2026 23:06:25 +0300 Subject: [PATCH 20/35] fix: update OPENAI_API_KEY message and add Atomic Chat URL check - Updated the message for the OPENAI_API_KEY check to include Atomic Chat as an allowed local provider. - Introduced a new function to check if the base URL corresponds to Atomic Chat, enhancing the system's ability to identify local providers. - Adjusted the Ollama processor mode check to skip processing when an Atomic Chat local provider is detected. --- hello/world | 1 + scripts/system-check.ts | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 hello/world diff --git a/hello/world b/hello/world new file mode 100644 index 00000000..8ab686ea --- /dev/null +++ b/hello/world @@ -0,0 +1 @@ +Hello, World! diff --git a/scripts/system-check.ts b/scripts/system-check.ts index e129685a..dfb9db21 100644 --- a/scripts/system-check.ts +++ b/scripts/system-check.ts @@ -186,7 +186,7 @@ function checkOpenAIEnv(): CheckResult[] { } else if (!key && !isLocalBaseUrl(request.baseUrl)) { results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.')) } else if (!key) { - results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).')) + results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Atomic Chat/Ollama/LM Studio).')) } else { results.push(pass('OPENAI_API_KEY', 'Configured.')) } @@ -271,6 +271,15 @@ async function checkBaseUrlReachability(): Promise { } } +function isAtomicChatUrl(baseUrl: string): boolean { + try { + const parsed = new URL(baseUrl) + return parsed.port === '1337' && isLocalBaseUrl(baseUrl) + } catch { + return false + } +} + function checkOllamaProcessorMode(): CheckResult { if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) { return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).') @@ -281,6 +290,10 @@ function checkOllamaProcessorMode(): CheckResult { return pass('Ollama processor mode', 'Skipped (provider URL is not local).') } + if (isAtomicChatUrl(baseUrl)) { + return pass('Ollama processor mode', 'Skipped (Atomic Chat local provider detected, not Ollama).') + } + const result = spawnSync('ollama', ['ps'], { cwd: process.cwd(), encoding: 'utf8', From 4f78bde08581fda7e636d000ec08558c68d0cf6a Mon Sep 17 00:00:00 2001 From: Mike <71440932+Vect0rM@users.noreply.github.com> Date: Wed, 1 Apr 2026 23:15:10 +0300 Subject: [PATCH 21/35] Delete hello/world --- hello/world | 1 - 1 file changed, 1 deletion(-) delete mode 100644 hello/world diff --git a/hello/world b/hello/world deleted file mode 100644 index 8ab686ea..00000000 --- a/hello/world +++ /dev/null @@ -1 +0,0 @@ -Hello, World! From 6aec8416ccfffb2f2c0ae67a78388f1521a1af05 Mon Sep 17 00:00:00 2001 From: salmanrajz Date: Thu, 2 Apr 2026 11:51:04 +0400 Subject: [PATCH 22/35] fix: make normalizeSchemaForOpenAI recursive for nested objects Fixes #111. normalizeSchemaForOpenAI only processed the top-level object schema, leaving nested objects untouched. OpenAI strict mode rejects schemas where nested objects have properties not listed in their required array, causing 400 errors on tools with nested params. Now recurses into properties, items, and anyOf/oneOf/allOf combinators (matching the pattern used by enforceStrictSchema in codexShim.ts). Also adds additionalProperties: false to nested objects in strict mode. Build verified passing. --- src/services/api/openaiShim.ts | 64 +++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 13 deletions(-) diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 9a500490..017af0d1 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -235,20 +235,58 @@ function normalizeSchemaForOpenAI( schema: Record, strict = true, ): Record { - if (schema.type !== 'object' || !schema.properties) return schema - const properties = schema.properties as Record - const existingRequired = Array.isArray(schema.required) ? schema.required as string[] : [] - // OpenAI strict mode requires every property to be listed in required[]. - // Gemini rejects schemas where required[] contains keys absent from properties, - // so only promote keys that actually exist in properties. - if (strict) { - const allKeys = Object.keys(properties) - const required = Array.from(new Set([...existingRequired, ...allKeys])) - return { ...schema, required } + if (!schema || typeof schema !== 'object' || Array.isArray(schema)) { + return (schema ?? {}) as Record } - // For Gemini: keep only existing required keys that are present in properties - const required = existingRequired.filter(k => k in properties) - return { ...schema, required } + + const record = { ...schema } + + if (record.type === 'object' && record.properties) { + const properties = record.properties as Record> + const existingRequired = Array.isArray(record.required) ? record.required as string[] : [] + + // Recurse into each property + const normalizedProps: Record = {} + for (const [key, value] of Object.entries(properties)) { + normalizedProps[key] = normalizeSchemaForOpenAI( + value as Record, + strict, + ) + } + record.properties = normalizedProps + + if (strict) { + // OpenAI strict mode requires every property to be listed in required[] + const allKeys = Object.keys(normalizedProps) + record.required = Array.from(new Set([...existingRequired, ...allKeys])) + record.additionalProperties = false + } else { + // For Gemini: keep only existing required keys that are present in properties + record.required = existingRequired.filter(k => k in normalizedProps) + } + } + + // Recurse into array items + if ('items' in record) { + if (Array.isArray(record.items)) { + record.items = (record.items as unknown[]).map( + item => normalizeSchemaForOpenAI(item as Record, strict), + ) + } else { + record.items = normalizeSchemaForOpenAI(record.items as Record, strict) + } + } + + // Recurse into combinators + for (const key of ['anyOf', 'oneOf', 'allOf'] as const) { + if (key in record && Array.isArray(record[key])) { + record[key] = (record[key] as unknown[]).map( + item => normalizeSchemaForOpenAI(item as Record, strict), + ) + } + } + + return record } function convertTools( From 5b20fe783dac0581b8ea486ef5cc56d2659d8790 Mon Sep 17 00:00:00 2001 From: salmanrajz Date: Thu, 2 Apr 2026 12:00:07 +0400 Subject: [PATCH 23/35] fix: make CostThresholdDialog provider-aware instead of hardcoding Anthropic Partially addresses #39. The cost threshold dialog hardcoded 'Anthropic API' in the title, which is misleading for users on OpenAI, Gemini, Ollama, or other providers. Now detects the active provider via getAPIProvider() and shows the correct label. --- src/components/CostThresholdDialog.tsx | 101 +++++++++++++------------ 1 file changed, 53 insertions(+), 48 deletions(-) diff --git a/src/components/CostThresholdDialog.tsx b/src/components/CostThresholdDialog.tsx index 59c2c582..1bb5f4ce 100644 --- a/src/components/CostThresholdDialog.tsx +++ b/src/components/CostThresholdDialog.tsx @@ -1,50 +1,55 @@ -import { c as _c } from "react-compiler-runtime"; -import React from 'react'; -import { Box, Link, Text } from '../ink.js'; -import { Select } from './CustomSelect/index.js'; -import { Dialog } from './design-system/Dialog.js'; +import React from 'react' +import { Box, Link, Text } from '../ink.js' +import { Select } from './CustomSelect/index.js' +import { Dialog } from './design-system/Dialog.js' +import { getAPIProvider } from '../utils/model/providers.js' + type Props = { - onDone: () => void; -}; -export function CostThresholdDialog(t0) { - const $ = _c(7); - const { - onDone - } = t0; - let t1; - if ($[0] === Symbol.for("react.memo_cache_sentinel")) { - t1 = Learn more about how to monitor your spending:; - $[0] = t1; - } else { - t1 = $[0]; - } - let t2; - if ($[1] === Symbol.for("react.memo_cache_sentinel")) { - t2 = [{ - value: "ok", - label: "Got it, thanks!" - }]; - $[1] = t2; - } else { - t2 = $[1]; - } - let t3; - if ($[2] !== onDone) { - t3 = + + ) } -//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJuYW1lcyI6WyJSZWFjdCIsIkJveCIsIkxpbmsiLCJUZXh0IiwiU2VsZWN0IiwiRGlhbG9nIiwiUHJvcHMiLCJvbkRvbmUiLCJDb3N0VGhyZXNob2xkRGlhbG9nIiwidDAiLCIkIiwiX2MiLCJ0MSIsIlN5bWJvbCIsImZvciIsInQyIiwidmFsdWUiLCJsYWJlbCIsInQzIiwidDQiXSwic291cmNlcyI6WyJDb3N0VGhyZXNob2xkRGlhbG9nLnRzeCJdLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgUmVhY3QgZnJvbSAncmVhY3QnXG5pbXBvcnQgeyBCb3gsIExpbmssIFRleHQgfSBmcm9tICcuLi9pbmsuanMnXG5pbXBvcnQgeyBTZWxlY3QgfSBmcm9tICcuL0N1c3RvbVNlbGVjdC9pbmRleC5qcydcbmltcG9ydCB7IERpYWxvZyB9IGZyb20gJy4vZGVzaWduLXN5c3RlbS9EaWFsb2cuanMnXG5cbnR5cGUgUHJvcHMgPSB7XG4gIG9uRG9uZTogKCkgPT4gdm9pZFxufVxuXG5leHBvcnQgZnVuY3Rpb24gQ29zdFRocmVzaG9sZERpYWxvZyh7IG9uRG9uZSB9OiBQcm9wcyk6IFJlYWN0LlJlYWN0Tm9kZSB7XG4gIHJldHVybiAoXG4gICAgPERpYWxvZ1xuICAgICAgdGl0bGU9XCJZb3UndmUgc3BlbnQgJDUgb24gdGhlIEFudGhyb3BpYyBBUEkgdGhpcyBzZXNzaW9uLlwiXG4gICAgICBvbkNhbmNlbD17b25Eb25lfVxuICAgID5cbiAgICAgIDxCb3ggZmxleERpcmVjdGlvbj1cImNvbHVtblwiPlxuICAgICAgICA8VGV4dD5MZWFybiBtb3JlIGFib3V0IGhvdyB0byBtb25pdG9yIHlvdXIgc3BlbmRpbmc6PC9UZXh0PlxuICAgICAgICA8TGluayB1cmw9XCJodHRwczovL2NvZGUuY2xhdWRlLmNvbS9kb2NzL2VuL2Nvc3RzXCIgLz5cbiAgICAgIDwvQm94PlxuICAgICAgPFNlbGVjdFxuICAgICAgICBvcHRpb25zPXtbXG4gICAgICAgICAge1xuICAgICAgICAgICAgdmFsdWU6ICdvaycsXG4gICAgICAgICAgICBsYWJlbDogJ0dvdCBpdCwgdGhhbmtzIScsXG4gICAgICAgICAgfSxcbiAgICAgICAgXX1cbiAgICAgICAgb25DaGFuZ2U9e29uRG9uZX1cbiAgICAgIC8+XG4gICAgPC9EaWFsb2c+XG4gIClcbn1cbiJdLCJtYXBwaW5ncyI6IjtBQUFBLE9BQU9BLEtBQUssTUFBTSxPQUFPO0FBQ3pCLFNBQVNDLEdBQUcsRUFBRUMsSUFBSSxFQUFFQyxJQUFJLFFBQVEsV0FBVztBQUMzQyxTQUFTQyxNQUFNLFFBQVEseUJBQXlCO0FBQ2hELFNBQVNDLE1BQU0sUUFBUSwyQkFBMkI7QUFFbEQsS0FBS0MsS0FBSyxHQUFHO0VBQ1hDLE1BQU0sRUFBRSxHQUFHLEdBQUcsSUFBSTtBQUNwQixDQUFDO0FBRUQsT0FBTyxTQUFBQyxvQkFBQUMsRUFBQTtFQUFBLE1BQUFDLENBQUEsR0FBQUMsRUFBQTtFQUE2QjtJQUFBSjtFQUFBLElBQUFFLEVBQWlCO0VBQUEsSUFBQUcsRUFBQTtFQUFBLElBQUFGLENBQUEsUUFBQUcsTUFBQSxDQUFBQyxHQUFBO0lBTS9DRixFQUFBLElBQUMsR0FBRyxDQUFlLGFBQVEsQ0FBUixRQUFRLENBQ3pCLENBQUMsSUFBSSxDQUFDLDhDQUE4QyxFQUFuRCxJQUFJLENBQ0wsQ0FBQyxJQUFJLENBQUssR0FBdUMsQ0FBdkMsdUNBQXVDLEdBQ25ELEVBSEMsR0FBRyxDQUdFO0lBQUFGLENBQUEsTUFBQUUsRUFBQTtFQUFBO0lBQUFBLEVBQUEsR0FBQUYsQ0FBQTtFQUFBO0VBQUEsSUFBQUssRUFBQTtFQUFBLElBQUFMLENBQUEsUUFBQUcsTUFBQSxDQUFBQyxHQUFBO0lBRUtDLEVBQUEsSUFDUDtNQUFBQyxLQUFBLEVBQ1MsSUFBSTtNQUFBQyxLQUFBLEVBQ0o7SUFDVCxDQUFDLENBQ0Y7SUFBQVAsQ0FBQSxNQUFBSyxFQUFBO0VBQUE7SUFBQUEsRUFBQSxHQUFBTCxDQUFBO0VBQUE7RUFBQSxJQUFBUSxFQUFBO0VBQUEsSUFBQVIsQ0FBQSxRQUFBSCxNQUFBO0lBTkhXLEVBQUEsSUFBQyxNQUFNLENBQ0ksT0FLUixDQUxRLENBQUFILEVBS1QsQ0FBQyxDQUNTUixRQUFNLENBQU5BLE9BQUssQ0FBQyxHQUNoQjtJQUFBRyxDQUFBLE1BQUFILE1BQUE7SUFBQUcsQ0FBQSxNQUFBUSxFQUFBO0VBQUE7SUFBQUEsRUFBQSxHQUFBUixDQUFBO0VBQUE7RUFBQSxJQUFBUyxFQUFBO0VBQUEsSUFBQVQsQ0FBQSxRQUFBSCxNQUFBLElBQUFHLENBQUEsUUFBQVEsRUFBQTtJQWhCSkMsRUFBQSxJQUFDLE1BQU0sQ0FDQyxLQUFvRCxDQUFwRCxvREFBb0QsQ0FDaERaLFFBQU0sQ0FBTkEsT0FBSyxDQUFDLENBRWhCLENBQUFLLEVBR0ssQ0FDTCxDQUFBTSxFQVFDLENBQ0gsRUFqQkMsTUFBTSxDQWlCRTtJQUFBUixDQUFBLE1BQUFILE1BQUE7SUFBQUcsQ0FBQSxNQUFBUSxFQUFBO0lBQUFSLENBQUEsTUFBQVMsRUFBQTtFQUFBO0lBQUFBLEVBQUEsR0FBQVQsQ0FBQTtFQUFBO0VBQUEsT0FqQlRTLEVBaUJTO0FBQUEiLCJpZ25vcmVMaXN0IjpbXX0= \ No newline at end of file From e494015e9a9f02427230fc8c8e6b1834aae60474 Mon Sep 17 00:00:00 2001 From: salmanrajz Date: Thu, 2 Apr 2026 12:12:24 +0400 Subject: [PATCH 24/35] fix: wrap streaming reader in try/finally to release lock and prevent resource leaks Partially addresses #112. The streaming reader in openaiStreamToAnthropic had no error handling - if an error occurred during streaming, the reader lock was never released. Wrapped the while loop in try/finally to ensure reader.releaseLock() is always called. --- src/services/api/openaiShim.ts | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 017af0d1..c3b8566b 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -412,15 +412,16 @@ async function* openaiStreamToAnthropic( const decoder = new TextDecoder() let buffer = '' - while (true) { - const { done, value } = await reader.read() - if (done) break + try { + while (true) { + const { done, value } = await reader.read() + if (done) break - buffer += decoder.decode(value, { stream: true }) - const lines = buffer.split('\n') - buffer = lines.pop() ?? '' + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split('\n') + buffer = lines.pop() ?? '' - for (const line of lines) { + for (const line of lines) { const trimmed = line.trim() if (!trimmed || trimmed === 'data: [DONE]') continue if (!trimmed.startsWith('data: ')) continue @@ -566,6 +567,9 @@ async function* openaiStreamToAnthropic( hasEmittedFinalUsage = true } } + } + } finally { + reader.releaseLock() } yield { type: 'message_stop' } From 7f969200fb40776b14830f52d74e2d23a7d1793b Mon Sep 17 00:00:00 2001 From: Raj Rasane Date: Thu, 2 Apr 2026 14:00:32 +0530 Subject: [PATCH 25/35] Add exit reason types and improve graceful shutdown handling --- src/entrypoints/agentSdkTypes.ts | 5 +++++ src/screens/REPL.tsx | 4 ++-- src/utils/gracefulShutdown.ts | 18 ++++++++++++++---- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/entrypoints/agentSdkTypes.ts b/src/entrypoints/agentSdkTypes.ts index b1994585..1389113e 100644 --- a/src/entrypoints/agentSdkTypes.ts +++ b/src/entrypoints/agentSdkTypes.ts @@ -441,3 +441,8 @@ export async function connectRemoteControl( ): Promise { throw new Error('not implemented') } + +// add exit reason types for removing the error within gracefulShutdown file +export type ExitReason = { + +} \ No newline at end of file diff --git a/src/screens/REPL.tsx b/src/screens/REPL.tsx index 65df5ca4..93e61985 100644 --- a/src/screens/REPL.tsx +++ b/src/screens/REPL.tsx @@ -137,7 +137,7 @@ import { generateSessionTitle } from '../utils/sessionTitle.js'; import { BASH_INPUT_TAG, COMMAND_MESSAGE_TAG, COMMAND_NAME_TAG, LOCAL_COMMAND_STDOUT_TAG } from '../constants/xml.js'; import { escapeXml } from '../utils/xml.js'; import type { ThinkingConfig } from '../utils/thinking.js'; -import { gracefulShutdownSync } from '../utils/gracefulShutdown.js'; +import { gracefulShutdownSync, isShuttingDown } from '../utils/gracefulShutdown.js'; import { handlePromptSubmit, type PromptInputHelpers } from '../utils/handlePromptSubmit.js'; import { useQueueProcessor } from '../hooks/useQueueProcessor.js'; import { useMailboxBridge } from '../hooks/useMailboxBridge.js'; @@ -4886,7 +4886,7 @@ export function REPL({ {mrRender()} - {!toolJSX?.shouldHidePromptInput && !focusedInputDialog && !isExiting && !disabled && !cursor && <> + {!toolJSX?.shouldHidePromptInput && !focusedInputDialog && !isExiting && !disabled && !cursor && !isShuttingDown() && <> {autoRunIssueReason && } {postCompactSurvey.state !== 'closed' ? : memorySurvey.state !== 'closed' ? : } {/* Frustration-triggered transcript sharing prompt */} diff --git a/src/utils/gracefulShutdown.ts b/src/utils/gracefulShutdown.ts index 4e003000..ae486e34 100644 --- a/src/utils/gracefulShutdown.ts +++ b/src/utils/gracefulShutdown.ts @@ -56,7 +56,7 @@ import { profileReport } from './startupProfiler.js' * 3. Failing to disable leaves the terminal in a broken state */ /* eslint-disable custom-rules/no-sync-fs -- must be sync to flush before process.exit */ -function cleanupTerminalModes(): void { +function cleanupTerminalModes(skipUnmount: boolean = false): void { if (!process.stdout.isTTY) { return } @@ -84,7 +84,7 @@ function cleanupTerminalModes(): void { // Calling unmount() now does the final render on the alt buffer, // unsubscribes from signal-exit, and writes 1049l exactly once. const inst = instances.get(process.stdout) - if (inst?.isAltScreenActive) { + if (!skipUnmount && inst?.isAltScreenActive) { try { inst.unmount() } catch { @@ -92,6 +92,11 @@ function cleanupTerminalModes(): void { // so printResumeHint still hits the main buffer. writeSync(1, EXIT_ALT_SCREEN) } + } else if (skipUnmount && inst?.isAltScreenActive) { + // We already unmounted asynchronously in gracefulShutdown, but if we + // fallback to manual alt-screen exit here just in case Ink didn't write it or is dead. + // Actually, AlternateScreen unmount writes EXIT_ALT_SCREEN, so if we awaited unmount, + // we shouldn't emit it again. So we just do nothing here. } // Catches events that arrived during the unmount tree-walk. // detachForShutdown() below also drains. @@ -411,12 +416,17 @@ export async function gracefulShutdown( ) const sessionEndTimeoutMs = getSessionEndHookTimeoutMs() + // Await one tick so React can flush pending updates from commands (e.g. hiding + // the autocomplete menu on /exit) before we detach Ink. This lets log-update + // erase floating UI elements from the terminal so they don't linger after exit. + await new Promise(r => setTimeout(r, 20)) + // Failsafe: guarantee process exits even if cleanup hangs (e.g., MCP connections). // Runs cleanupTerminalModes first so a hung cleanup doesn't leave the terminal dirty. // Budget = max(5s, hook budget + 3.5s headroom for cleanup + analytics flush). failsafeTimer = setTimeout( code => { - cleanupTerminalModes() + cleanupTerminalModes(true) printResumeHint() forceExit(code) }, @@ -433,7 +443,7 @@ export async function gracefulShutdown( // cleanup (e.g., SIGKILL during macOS reboot). Without this, the resume // hint would only appear after cleanup functions, hooks, and analytics // flush — which can take several seconds. - cleanupTerminalModes() + cleanupTerminalModes(true) printResumeHint() // Flush session data first — this is the most critical cleanup. If the From 14de9cf0fb53cfe94d01d995e7d06b03ecdc75e5 Mon Sep 17 00:00:00 2001 From: salmanrajz Date: Thu, 2 Apr 2026 12:36:05 +0400 Subject: [PATCH 26/35] refactor: address code review feedback - Make getProviderLabel() switch exhaustive with explicit openai/gemini arms instead of falling through to env-var checks in default - Add clarifying comment on additionalProperties override in schema normalization --- src/components/CostThresholdDialog.tsx | 10 ++++------ src/services/api/openaiShim.ts | 2 ++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/components/CostThresholdDialog.tsx b/src/components/CostThresholdDialog.tsx index 1bb5f4ce..7c21becd 100644 --- a/src/components/CostThresholdDialog.tsx +++ b/src/components/CostThresholdDialog.tsx @@ -19,13 +19,11 @@ function getProviderLabel(): string { return 'Google Vertex' case 'foundry': return 'Azure Foundry' + case 'openai': + return 'OpenAI-compatible API' + case 'gemini': + return 'Gemini API' default: - if (process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true') { - return 'OpenAI-compatible API' - } - if (process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true') { - return 'Gemini API' - } return 'API' } } diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index c3b8566b..0d5bde28 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -259,6 +259,8 @@ function normalizeSchemaForOpenAI( // OpenAI strict mode requires every property to be listed in required[] const allKeys = Object.keys(normalizedProps) record.required = Array.from(new Set([...existingRequired, ...allKeys])) + // OpenAI strict mode requires additionalProperties: false on all object + // schemas — override unconditionally to ensure nested objects comply. record.additionalProperties = false } else { // For Gemini: keep only existing required keys that are present in properties From 7a7437b309bdd50291b85ca90920ac738d98439b Mon Sep 17 00:00:00 2001 From: Juan Camilo Date: Thu, 2 Apr 2026 09:29:53 +0200 Subject: [PATCH 27/35] fix: skip Anthropic model migration for third-party providers Add provider guard to migrateSonnet1mToSonnet45() so it only runs for firstParty (Anthropic) users. Without this, a 3P user with model='sonnet[1m]' would have it rewritten to an Anthropic-specific alias that is invalid for OpenAI/Gemini/Ollama providers. --- src/migrations/migrateSonnet1mToSonnet45.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/migrations/migrateSonnet1mToSonnet45.ts b/src/migrations/migrateSonnet1mToSonnet45.ts index f2936388..23319591 100644 --- a/src/migrations/migrateSonnet1mToSonnet45.ts +++ b/src/migrations/migrateSonnet1mToSonnet45.ts @@ -3,6 +3,7 @@ import { setMainLoopModelOverride, } from '../bootstrap/state.js' import { getGlobalConfig, saveGlobalConfig } from '../utils/config.js' +import { getAPIProvider } from '../utils/model/providers.js' import { getSettingsForSource, updateSettingsForSource, @@ -23,6 +24,10 @@ import { * tracked by a completion flag in global config. */ export function migrateSonnet1mToSonnet45(): void { + if (getAPIProvider() !== 'firstParty') { + return + } + const config = getGlobalConfig() if (config.sonnet1m45MigrationComplete) { return From 3ca6c299d68fca85b7f69a7d4c9009334d80177b Mon Sep 17 00:00:00 2001 From: Juan Camilo Date: Thu, 2 Apr 2026 09:46:16 +0200 Subject: [PATCH 28/35] security: pin GitHub Actions to immutable SHA digests Pin all GitHub Actions to commit SHA instead of mutable version tags to prevent supply chain attacks via tag poisoning. This is especially important for third-party actions like oven-sh/setup-bun. --- .github/workflows/pr-checks.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index 1b2bba1b..34580f4d 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -12,15 +12,15 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set up Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version: 22 - name: Set up Bun - uses: oven-sh/setup-bun@v2 + uses: oven-sh/setup-bun@4bc047ad259df6fc24a6c9b0f9a0cb08cf17fbe5 # v2.0.1 with: bun-version: 1.3.11 From 6c4225f6f4da060d1ca20362f490120d350a1624 Mon Sep 17 00:00:00 2001 From: Juan Camilo Date: Thu, 2 Apr 2026 09:46:53 +0200 Subject: [PATCH 29/35] fix: skip assertMinVersion for third-party providers The version kill-switch calls Anthropic's GrowthBook endpoint to enforce a minimum version. This is currently safe for 3P users only because isAnalyticsDisabled() returns true (disabling GrowthBook). Adding an explicit provider guard makes this safety independent of the analytics stub, preventing 3P users from being blocked by Anthropic's version requirements in case of future upstream merges. --- src/utils/autoUpdater.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/utils/autoUpdater.ts b/src/utils/autoUpdater.ts index 2a5fc6f9..4d4c2bf3 100644 --- a/src/utils/autoUpdater.ts +++ b/src/utils/autoUpdater.ts @@ -9,6 +9,7 @@ import { logEvent, } from 'src/services/analytics/index.js' import { type ReleaseChannel, saveGlobalConfig } from './config.js' +import { getAPIProvider } from './model/providers.js' import { logForDebugging } from './debug.js' import { env } from './env.js' import { getClaudeConfigHomeDir } from './envUtils.js' @@ -72,6 +73,12 @@ export async function assertMinVersion(): Promise { return } + // Skip version check for third-party providers — the min version + // kill-switch is Anthropic-specific and should not block 3P users + if (getAPIProvider() !== 'firstParty') { + return + } + try { const versionConfig = await getDynamicConfig_BLOCKS_ON_INIT<{ minVersion: string From 001f89f62c76d16dae8d30135144cf0d1b2228f8 Mon Sep 17 00:00:00 2001 From: gnanam1990 Date: Thu, 2 Apr 2026 14:55:04 +0530 Subject: [PATCH 30/35] feat: add MCP doctor diagnostics service Add the diagnostics core and report model for MCP health, scope, and config analysis. This creates the structured report used by both text and JSON doctor output. Co-Authored-By: Claude --- src/services/mcp/doctor.test.ts | 540 +++++++++++++++++++++++++ src/services/mcp/doctor.ts | 695 ++++++++++++++++++++++++++++++++ 2 files changed, 1235 insertions(+) create mode 100644 src/services/mcp/doctor.test.ts create mode 100644 src/services/mcp/doctor.ts diff --git a/src/services/mcp/doctor.test.ts b/src/services/mcp/doctor.test.ts new file mode 100644 index 00000000..83b74d3f --- /dev/null +++ b/src/services/mcp/doctor.test.ts @@ -0,0 +1,540 @@ +import assert from 'node:assert/strict' +import test from 'node:test' + +import type { ValidationError } from '../../utils/settings/validation.js' + +import { + buildEmptyDoctorReport, + doctorAllServers, + doctorServer, + findingsFromValidationErrors, + type McpDoctorDependencies, +} from './doctor.js' + +function stdioConfig(scope: 'local' | 'project' | 'user' | 'enterprise', command: string) { + return { + type: 'stdio' as const, + command, + args: [], + scope, + } +} + +function makeDependencies(overrides: Partial = {}): McpDoctorDependencies { + return { + getAllMcpConfigs: async () => ({ servers: {}, errors: [] }), + getMcpConfigsByScope: () => ({ servers: {}, errors: [] }), + getProjectMcpServerStatus: () => 'approved', + isMcpServerDisabled: () => false, + describeMcpConfigFilePath: scope => `scope://${scope}`, + clearServerCache: async () => {}, + connectToServer: async (name, config) => ({ + name, + type: 'connected', + capabilities: {}, + config, + cleanup: async () => {}, + }), + ...overrides, + } +} + +test('buildEmptyDoctorReport returns zeroed summary', () => { + const report = buildEmptyDoctorReport({ + configOnly: true, + scopeFilter: 'project', + targetName: 'filesystem', + }) + + assert.equal(report.targetName, 'filesystem') + assert.equal(report.scopeFilter, 'project') + assert.equal(report.configOnly, true) + assert.deepEqual(report.summary, { + totalReports: 0, + healthy: 0, + warnings: 0, + blocking: 0, + }) + assert.deepEqual(report.findings, []) + assert.deepEqual(report.servers, []) +}) + +test('findingsFromValidationErrors maps missing env warnings into doctor findings', () => { + const validationErrors: ValidationError[] = [ + { + file: '.mcp.json', + path: 'mcpServers.filesystem', + message: 'Missing environment variables: API_KEY, API_URL', + suggestion: 'Set the following environment variables: API_KEY, API_URL', + mcpErrorMetadata: { + scope: 'project', + serverName: 'filesystem', + severity: 'warning', + }, + }, + ] + + const findings = findingsFromValidationErrors(validationErrors) + + assert.equal(findings.length, 1) + assert.deepEqual(findings[0], { + blocking: false, + code: 'config.missing_env_vars', + message: 'Missing environment variables: API_KEY, API_URL', + remediation: 'Set the following environment variables: API_KEY, API_URL', + scope: 'project', + serverName: 'filesystem', + severity: 'warn', + sourcePath: '.mcp.json', + }) +}) + +test('findingsFromValidationErrors maps Windows npx warnings into doctor findings', () => { + const validationErrors: ValidationError[] = [ + { + file: '.mcp.json', + path: 'mcpServers.node-tools', + message: "Windows requires 'cmd /c' wrapper to execute npx", + suggestion: + 'Change command to "cmd" with args ["/c", "npx", ...]. See: https://code.claude.com/docs/en/mcp#configure-mcp-servers', + mcpErrorMetadata: { + scope: 'project', + serverName: 'node-tools', + severity: 'warning', + }, + }, + ] + + const findings = findingsFromValidationErrors(validationErrors) + + assert.equal(findings.length, 1) + assert.equal(findings[0]?.code, 'config.windows_npx_wrapper_required') + assert.equal(findings[0]?.serverName, 'node-tools') + assert.equal(findings[0]?.severity, 'warn') + assert.equal(findings[0]?.blocking, false) +}) + +test('findingsFromValidationErrors maps fatal parse errors into blocking findings', () => { + const validationErrors: ValidationError[] = [ + { + file: 'C:/repo/.mcp.json', + path: '', + message: 'MCP config is not a valid JSON', + suggestion: 'Fix the JSON syntax errors in the file', + mcpErrorMetadata: { + scope: 'project', + severity: 'fatal', + }, + }, + ] + + const findings = findingsFromValidationErrors(validationErrors) + + assert.equal(findings.length, 1) + assert.equal(findings[0]?.code, 'config.invalid_json') + assert.equal(findings[0]?.severity, 'error') + assert.equal(findings[0]?.blocking, true) +}) + +test('doctorAllServers reports global validation findings once without duplicating them into every server', async () => { + const localConfig = stdioConfig('local', 'node-local') + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { filesystem: localConfig }, + errors: [], + }), + getMcpConfigsByScope: scope => + scope === 'project' + ? { + servers: {}, + errors: [ + { + file: '.mcp.json', + path: '', + message: 'MCP config is not a valid JSON', + suggestion: 'Fix the JSON syntax errors in the file', + mcpErrorMetadata: { + scope: 'project', + severity: 'fatal', + }, + }, + ], + } + : scope === 'local' + ? { servers: { filesystem: localConfig }, errors: [] } + : { servers: {}, errors: [] }, + }) + + const report = await doctorAllServers({ configOnly: true }, deps) + + assert.equal(report.summary.totalReports, 1) + assert.equal(report.summary.blocking, 1) + assert.equal(report.findings.length, 1) + assert.equal(report.findings[0]?.code, 'config.invalid_json') + assert.deepEqual(report.servers[0]?.findings, []) +}) + +test('doctorServer explains same-name shadowing across scopes', async () => { + const localConfig = stdioConfig('local', 'node-local') + const userConfig = stdioConfig('user', 'node-user') + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { + filesystem: localConfig, + }, + errors: [], + }), + getMcpConfigsByScope: scope => { + switch (scope) { + case 'local': + return { servers: { filesystem: localConfig }, errors: [] } + case 'user': + return { servers: { filesystem: userConfig }, errors: [] } + default: + return { servers: {}, errors: [] } + } + }, + }) + + const report = await doctorServer('filesystem', { configOnly: true }, deps) + assert.equal(report.servers.length, 1) + assert.equal(report.servers[0]?.definitions.length, 2) + assert.equal(report.servers[0]?.definitions.find(def => def.sourceType === 'local')?.runtimeActive, true) + assert.equal(report.servers[0]?.definitions.find(def => def.sourceType === 'user')?.runtimeActive, false) + assert.deepEqual( + report.servers[0]?.findings.map(finding => finding.code).sort(), + ['duplicate.same_name_multiple_scopes', 'scope.shadowed'], + ) +}) + +test('doctorServer reports project servers pending approval', async () => { + const projectConfig = stdioConfig('project', 'node-project') + const deps = makeDependencies({ + getMcpConfigsByScope: scope => + scope === 'project' + ? { servers: { sentry: projectConfig }, errors: [] } + : { servers: {}, errors: [] }, + getProjectMcpServerStatus: name => (name === 'sentry' ? 'pending' : 'approved'), + }) + + const report = await doctorServer('sentry', { configOnly: true }, deps) + assert.equal(report.servers.length, 1) + assert.equal(report.servers[0]?.definitions[0]?.pendingApproval, true) + assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, false) + assert.equal(report.servers[0]?.definitions[0]?.runtimeVisible, false) + assert.equal( + report.servers[0]?.findings.some(finding => finding.code === 'state.pending_project_approval'), + true, + ) +}) + +test('doctorServer does not treat disabled servers as runtime-active or live-check targets', async () => { + let connectCalls = 0 + const localConfig = stdioConfig('local', 'node-local') + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { github: localConfig }, + errors: [], + }), + getMcpConfigsByScope: scope => + scope === 'local' + ? { servers: { github: localConfig }, errors: [] } + : { servers: {}, errors: [] }, + isMcpServerDisabled: name => name === 'github', + connectToServer: async (name, config) => { + connectCalls += 1 + return { + name, + type: 'failed', + config, + error: 'should not connect', + } + }, + }) + + const report = await doctorServer('github', { configOnly: false }, deps) + + assert.equal(connectCalls, 0) + assert.equal(report.summary.blocking, 0) + assert.equal(report.summary.warnings, 1) + assert.equal(report.servers[0]?.definitions[0]?.disabled, true) + assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, false) + assert.equal(report.servers[0]?.definitions[0]?.runtimeVisible, false) + assert.equal(report.servers[0]?.liveCheck.result, 'disabled') + assert.equal( + report.servers[0]?.findings.some(finding => finding.code === 'state.disabled' && finding.severity === 'warn'), + true, + ) +}) + +test('doctorAllServers skips live checks in config-only mode', async () => { + let connectCalls = 0 + const localConfig = stdioConfig('local', 'node-local') + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { linear: localConfig }, + errors: [], + }), + getMcpConfigsByScope: scope => + scope === 'local' + ? { servers: { linear: localConfig }, errors: [] } + : { servers: {}, errors: [] }, + connectToServer: async (name, config) => { + connectCalls += 1 + return { + name, + type: 'connected', + capabilities: {}, + config, + cleanup: async () => {}, + } + }, + }) + + const report = await doctorAllServers({ configOnly: true }, deps) + assert.equal(connectCalls, 0) + assert.equal(report.servers[0]?.liveCheck.attempted, false) + assert.equal(report.servers[0]?.liveCheck.result, 'skipped') +}) + +test('doctorAllServers honors scopeFilter when collecting names', async () => { + const pluginConfig = { + type: 'http' as const, + url: 'https://example.test/mcp', + scope: 'dynamic' as const, + pluginSource: 'plugin:github@official', + } + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { 'plugin:github:github': pluginConfig }, + errors: [], + }), + }) + + const report = await doctorAllServers({ configOnly: true, scopeFilter: 'user' }, deps) + + assert.equal(report.summary.totalReports, 0) + assert.deepEqual(report.servers, []) +}) + +test('doctorAllServers honors scopeFilter when collecting validation errors', async () => { + const userConfig = stdioConfig('user', 'node-user') + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { filesystem: userConfig }, + errors: [], + }), + getMcpConfigsByScope: scope => { + switch (scope) { + case 'project': + return { + servers: {}, + errors: [ + { + file: '.mcp.json', + path: '', + message: 'MCP config is not a valid JSON', + suggestion: 'Fix the JSON syntax errors in the file', + mcpErrorMetadata: { + scope: 'project', + severity: 'fatal', + }, + }, + ], + } + case 'user': + return { servers: { filesystem: userConfig }, errors: [] } + default: + return { servers: {}, errors: [] } + } + }, + }) + + const report = await doctorAllServers({ configOnly: true, scopeFilter: 'user' }, deps) + + assert.equal(report.summary.totalReports, 1) + assert.equal(report.summary.blocking, 0) + assert.deepEqual(report.findings, []) + assert.deepEqual(report.servers[0]?.findings, []) +}) + +test('doctorAllServers includes observed runtime definitions for plugin-only servers', async () => { + const pluginConfig = { + type: 'http' as const, + url: 'https://example.test/mcp', + scope: 'dynamic' as const, + pluginSource: 'plugin:github@official', + } + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { 'plugin:github:github': pluginConfig }, + errors: [], + }), + }) + + const report = await doctorAllServers({ configOnly: true }, deps) + + assert.equal(report.summary.totalReports, 1) + assert.equal(report.servers[0]?.definitions.length, 1) + assert.equal(report.servers[0]?.definitions[0]?.sourceType, 'plugin') + assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, true) +}) + +test('doctorAllServers reports disabled plugin servers as disabled, not not-found', async () => { + const pluginConfig = { + type: 'http' as const, + url: 'https://example.test/mcp', + scope: 'dynamic' as const, + pluginSource: 'plugin:github@official', + } + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { 'plugin:github:github': pluginConfig }, + errors: [], + }), + isMcpServerDisabled: name => name === 'plugin:github:github', + }) + + const report = await doctorAllServers({ configOnly: true }, deps) + + assert.equal(report.summary.totalReports, 1) + assert.equal(report.summary.warnings, 1) + assert.equal(report.summary.blocking, 0) + assert.equal(report.servers[0]?.definitions.length, 1) + assert.equal(report.servers[0]?.definitions[0]?.sourceType, 'plugin') + assert.equal(report.servers[0]?.definitions[0]?.disabled, true) + assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, false) + assert.equal( + report.servers[0]?.findings.some(finding => finding.code === 'state.disabled' && !finding.blocking), + true, + ) + assert.equal( + report.servers[0]?.findings.some(finding => finding.code === 'state.not_found'), + false, + ) +}) + +test('doctorServer converts failed live checks into blocking findings', async () => { + const localConfig = stdioConfig('local', 'node-local') + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { github: localConfig }, + errors: [], + }), + getMcpConfigsByScope: scope => + scope === 'local' + ? { servers: { github: localConfig }, errors: [] } + : { servers: {}, errors: [] }, + connectToServer: async (name, config) => ({ + name, + type: 'failed', + config, + error: 'command not found: node-local', + }), + }) + + const report = await doctorServer('github', { configOnly: false }, deps) + + assert.equal(report.summary.blocking, 1) + assert.equal(report.servers[0]?.liveCheck.result, 'failed') + assert.equal( + report.servers[0]?.findings.some( + finding => finding.code === 'stdio.command_not_found' && finding.blocking, + ), + true, + ) +}) + +test('doctorServer converts needs-auth live checks into warning findings', async () => { + const localConfig = stdioConfig('local', 'node-local') + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { sentry: localConfig }, + errors: [], + }), + getMcpConfigsByScope: scope => + scope === 'local' + ? { servers: { sentry: localConfig }, errors: [] } + : { servers: {}, errors: [] }, + connectToServer: async (name, config) => ({ + name, + type: 'needs-auth', + config, + }), + }) + + const report = await doctorServer('sentry', { configOnly: false }, deps) + + assert.equal(report.summary.warnings, 1) + assert.equal(report.summary.blocking, 0) + assert.equal( + report.servers[0]?.findings.some(finding => finding.code === 'auth.needs_auth' && finding.severity === 'warn'), + true, + ) +}) + +test('doctorServer includes observed runtime definition for plugin-only targets', async () => { + const pluginConfig = { + type: 'http' as const, + url: 'https://example.test/mcp', + scope: 'dynamic' as const, + pluginSource: 'plugin:github@official', + } + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { 'plugin:github:github': pluginConfig }, + errors: [], + }), + }) + + const report = await doctorServer('plugin:github:github', { configOnly: true }, deps) + + assert.equal(report.summary.totalReports, 1) + assert.equal(report.servers[0]?.definitions.length, 1) + assert.equal(report.servers[0]?.definitions[0]?.sourceType, 'plugin') + assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, true) +}) + +test('doctorServer with scopeFilter does not leak runtime definition from another scope when target is absent', async () => { + let connectCalls = 0 + const localConfig = stdioConfig('local', 'node-local') + const deps = makeDependencies({ + getAllMcpConfigs: async () => ({ + servers: { github: localConfig }, + errors: [], + }), + getMcpConfigsByScope: scope => + scope === 'local' + ? { servers: { github: localConfig }, errors: [] } + : { servers: {}, errors: [] }, + connectToServer: async (name, config) => { + connectCalls += 1 + return { + name, + type: 'connected', + capabilities: {}, + config, + cleanup: async () => {}, + } + }, + }) + + const report = await doctorServer('github', { configOnly: false, scopeFilter: 'user' }, deps) + + assert.equal(connectCalls, 0) + assert.equal(report.summary.totalReports, 1) + assert.equal(report.summary.blocking, 1) + assert.deepEqual(report.servers[0]?.definitions, []) + assert.equal(report.servers[0]?.liveCheck.result, 'skipped') + assert.equal( + report.servers[0]?.findings.some(finding => finding.code === 'state.not_found' && finding.blocking), + true, + ) +}) + +test('doctorServer reports blocking not-found state when no definition exists', async () => { + const report = await doctorServer('missing-server', { configOnly: true }, makeDependencies()) + + assert.equal(report.summary.blocking, 1) + assert.equal(report.servers[0]?.findings.some(finding => finding.code === 'state.not_found' && finding.blocking), true) +}) diff --git a/src/services/mcp/doctor.ts b/src/services/mcp/doctor.ts new file mode 100644 index 00000000..6cdd15e5 --- /dev/null +++ b/src/services/mcp/doctor.ts @@ -0,0 +1,695 @@ +import type { ValidationError } from '../../utils/settings/validation.js' +import { clearServerCache, connectToServer } from './client.js' +import { + getAllMcpConfigs, + getMcpConfigsByScope, + isMcpServerDisabled, +} from './config.js' +import type { + ConfigScope, + ScopedMcpServerConfig, +} from './types.js' +import { describeMcpConfigFilePath, getProjectMcpServerStatus } from './utils.js' + +export type McpDoctorSeverity = 'info' | 'warn' | 'error' +export type McpDoctorScopeFilter = 'local' | 'project' | 'user' | 'enterprise' + +export type McpDoctorFinding = { + blocking: boolean + code: string + message: string + remediation?: string + scope?: string + serverName?: string + severity: McpDoctorSeverity + sourcePath?: string +} + +export type McpDoctorLiveCheck = { + attempted: boolean + durationMs?: number + error?: string + result?: 'connected' | 'needs-auth' | 'failed' | 'pending' | 'disabled' | 'skipped' +} + +export type McpDoctorDefinition = { + name: string + sourceType: + | 'local' + | 'project' + | 'user' + | 'enterprise' + | 'managed' + | 'plugin' + | 'claudeai' + | 'dynamic' + | 'internal' + sourcePath?: string + transport?: string + runtimeVisible: boolean + runtimeActive: boolean + pendingApproval?: boolean + disabled?: boolean +} + +export type McpDoctorServerReport = { + serverName: string + requestedByUser: boolean + definitions: McpDoctorDefinition[] + liveCheck: McpDoctorLiveCheck + findings: McpDoctorFinding[] +} + +export type McpDoctorDependencies = { + getAllMcpConfigs: typeof getAllMcpConfigs + getMcpConfigsByScope: typeof getMcpConfigsByScope + getProjectMcpServerStatus: typeof getProjectMcpServerStatus + isMcpServerDisabled: typeof isMcpServerDisabled + describeMcpConfigFilePath: typeof describeMcpConfigFilePath + connectToServer: typeof connectToServer + clearServerCache: typeof clearServerCache +} + +export type McpDoctorReport = { + generatedAt: string + targetName?: string + scopeFilter?: McpDoctorScopeFilter + configOnly: boolean + summary: { + totalReports: number + healthy: number + warnings: number + blocking: number + } + findings: McpDoctorFinding[] + servers: McpDoctorServerReport[] +} + +const DEFAULT_DEPENDENCIES: McpDoctorDependencies = { + getAllMcpConfigs, + getMcpConfigsByScope, + getProjectMcpServerStatus, + isMcpServerDisabled, + describeMcpConfigFilePath, + connectToServer, + clearServerCache, +} + +export function buildEmptyDoctorReport(options: { + configOnly: boolean + scopeFilter?: McpDoctorScopeFilter + targetName?: string +}): McpDoctorReport { + return { + generatedAt: new Date().toISOString(), + targetName: options.targetName, + scopeFilter: options.scopeFilter, + configOnly: options.configOnly, + summary: { + totalReports: 0, + healthy: 0, + warnings: 0, + blocking: 0, + }, + findings: [], + servers: [], + } +} + +function getFindingCode(error: ValidationError): string { + if (error.message === 'MCP config is not a valid JSON') { + return 'config.invalid_json' + } + if (error.message.startsWith('Missing environment variables:')) { + return 'config.missing_env_vars' + } + if (error.message.includes("Windows requires 'cmd /c' wrapper to execute npx")) { + return 'config.windows_npx_wrapper_required' + } + if (error.message === 'Does not adhere to MCP server configuration schema') { + return 'config.invalid_schema' + } + return 'config.validation_error' +} + +function getSeverity(error: ValidationError): McpDoctorSeverity { + const severity = error.mcpErrorMetadata?.severity + if (severity === 'fatal') { + return 'error' + } + if (severity === 'warning') { + return 'warn' + } + return 'warn' +} + +export function findingsFromValidationErrors( + validationErrors: ValidationError[], +): McpDoctorFinding[] { + return validationErrors.map(error => { + const severity = getSeverity(error) + return { + blocking: severity === 'error', + code: getFindingCode(error), + message: error.message, + remediation: error.suggestion, + scope: error.mcpErrorMetadata?.scope, + serverName: error.mcpErrorMetadata?.serverName, + severity, + sourcePath: error.file, + } + }) +} + +function splitValidationFindings(validationFindings: McpDoctorFinding[]): { + globalFindings: McpDoctorFinding[] + serverFindingsByName: Map +} { + const globalFindings: McpDoctorFinding[] = [] + const serverFindingsByName = new Map() + + for (const finding of validationFindings) { + if (!finding.serverName) { + globalFindings.push(finding) + continue + } + + const findings = serverFindingsByName.get(finding.serverName) ?? [] + findings.push(finding) + serverFindingsByName.set(finding.serverName, findings) + } + + return { + globalFindings, + serverFindingsByName, + } +} + +function getSourceType(config: ScopedMcpServerConfig): McpDoctorDefinition['sourceType'] { + if (config.scope === 'claudeai') { + return 'claudeai' + } + if (config.scope === 'dynamic') { + return config.pluginSource ? 'plugin' : 'dynamic' + } + if (config.scope === 'managed') { + return 'managed' + } + return config.scope +} + +function getTransport(config: ScopedMcpServerConfig): string { + return config.type ?? 'stdio' +} + +function getConfigSignature(config: ScopedMcpServerConfig): string { + switch (config.type) { + case 'sse': + case 'http': + case 'ws': + case 'claudeai-proxy': + return `${config.scope}:${config.type}:${config.url}` + case 'sdk': + return `${config.scope}:${config.type}:${config.name}` + default: + return `${config.scope}:${config.type ?? 'stdio'}:${config.command}:${JSON.stringify(config.args ?? [])}` + } +} + +function isSameDefinition( + config: ScopedMcpServerConfig, + activeConfig: ScopedMcpServerConfig | undefined, +): boolean { + if (!activeConfig) { + return false + } + return getSourceType(config) === getSourceType(activeConfig) && getConfigSignature(config) === getConfigSignature(activeConfig) +} + +function buildScopeDefinitions( + name: string, + scope: ConfigScope, + servers: Record, + activeConfig: ScopedMcpServerConfig | undefined, + deps: McpDoctorDependencies, +): McpDoctorDefinition[] { + const config = servers[name] + if (!config) { + return [] + } + + const pendingApproval = + scope === 'project' ? deps.getProjectMcpServerStatus(name) === 'pending' : false + const disabled = deps.isMcpServerDisabled(name) + const runtimeActive = !disabled && isSameDefinition(config, activeConfig) + + return [ + { + name, + sourceType: getSourceType(config), + sourcePath: deps.describeMcpConfigFilePath(scope), + transport: getTransport(config), + runtimeVisible: runtimeActive, + runtimeActive, + pendingApproval, + disabled, + }, + ] +} + +function shouldIncludeScope( + scope: ConfigScope, + scopeFilter: McpDoctorScopeFilter | undefined, +): boolean { + if (!scopeFilter) { + return scope === 'enterprise' || scope === 'local' || scope === 'project' || scope === 'user' + } + return scope === scopeFilter +} + +function getValidationErrorsForSelectedScopes( + scopeResults: { + enterprise: ReturnType + local: ReturnType + project: ReturnType + user: ReturnType + }, + scopeFilter: McpDoctorScopeFilter | undefined, +): ValidationError[] { + return [ + ...(shouldIncludeScope('enterprise', scopeFilter) ? scopeResults.enterprise.errors : []), + ...(shouldIncludeScope('local', scopeFilter) ? scopeResults.local.errors : []), + ...(shouldIncludeScope('project', scopeFilter) ? scopeResults.project.errors : []), + ...(shouldIncludeScope('user', scopeFilter) ? scopeResults.user.errors : []), + ] +} + +function buildObservedDefinition( + name: string, + activeConfig: ScopedMcpServerConfig, + options?: { + disabled?: boolean + runtimeActive?: boolean + runtimeVisible?: boolean + }, +): McpDoctorDefinition { + return { + name, + sourceType: getSourceType(activeConfig), + sourcePath: + getSourceType(activeConfig) === 'plugin' + ? `plugin:${activeConfig.pluginSource ?? 'unknown'}` + : getSourceType(activeConfig) === 'claudeai' + ? 'claude.ai' + : activeConfig.scope, + transport: getTransport(activeConfig), + runtimeVisible: options?.runtimeVisible ?? true, + runtimeActive: options?.runtimeActive ?? true, + disabled: options?.disabled ?? false, + } +} + +function hasDefinitionForRuntimeSource( + definitions: McpDoctorDefinition[], + runtimeConfig: ScopedMcpServerConfig, + deps: McpDoctorDependencies, +): boolean { + const runtimeSourceType = getSourceType(runtimeConfig) + const runtimeSourcePath = + runtimeSourceType === 'plugin' + ? `plugin:${runtimeConfig.pluginSource ?? 'unknown'}` + : runtimeSourceType === 'claudeai' + ? 'claude.ai' + : deps.describeMcpConfigFilePath(runtimeConfig.scope) + + return definitions.some( + definition => + definition.sourceType === runtimeSourceType && + definition.sourcePath === runtimeSourcePath && + definition.transport === getTransport(runtimeConfig), + ) +} + +function buildShadowingFindings(definitions: McpDoctorDefinition[]): McpDoctorFinding[] { + const userEditable = definitions.filter(definition => + definition.sourceType === 'local' || + definition.sourceType === 'project' || + definition.sourceType === 'user' || + definition.sourceType === 'enterprise', + ) + + if (userEditable.length <= 1) { + return [] + } + + const active = userEditable.find(definition => definition.runtimeActive) ?? userEditable[0] + return [ + { + blocking: false, + code: 'duplicate.same_name_multiple_scopes', + message: `Server is defined in multiple config scopes; active source is ${active.sourceType}`, + remediation: 'Remove or rename one of the duplicate definitions to avoid confusion.', + serverName: active.name, + severity: 'warn', + }, + { + blocking: false, + code: 'scope.shadowed', + message: `${active.name} has shadowed definitions in lower-precedence config scopes.`, + remediation: 'Inspect the other definitions and remove the ones you no longer want to keep.', + serverName: active.name, + severity: 'warn', + }, + ] +} + +function buildStateFindings(definitions: McpDoctorDefinition[]): McpDoctorFinding[] { + const findings: McpDoctorFinding[] = [] + + for (const definition of definitions) { + if (definition.pendingApproval) { + findings.push({ + blocking: false, + code: 'state.pending_project_approval', + message: `${definition.name} is declared in project config but pending project approval.`, + remediation: 'Approve the server in the project MCP approval flow before expecting it to become active.', + scope: 'project', + serverName: definition.name, + severity: 'warn', + sourcePath: definition.sourcePath, + }) + } + + if (definition.disabled) { + findings.push({ + blocking: false, + code: 'state.disabled', + message: `${definition.name} is currently disabled.`, + remediation: 'Re-enable the server before expecting it to be available at runtime.', + serverName: definition.name, + severity: 'warn', + sourcePath: definition.sourcePath, + }) + } + } + + return findings +} + +function summarizeReport(report: McpDoctorReport): McpDoctorReport { + const allFindings = [...report.findings, ...report.servers.flatMap(server => server.findings)] + const blocking = allFindings.filter(finding => finding.blocking).length + const warnings = allFindings.filter(finding => finding.severity === 'warn').length + const healthy = report.servers.filter( + server => + server.liveCheck.result === 'connected' && + server.findings.every(finding => !finding.blocking && finding.severity !== 'warn'), + ).length + + return { + ...report, + summary: { + totalReports: report.servers.length, + healthy, + warnings, + blocking, + }, + } +} + +async function getLiveCheck( + name: string, + activeConfig: ScopedMcpServerConfig | undefined, + configOnly: boolean, + definitions: McpDoctorDefinition[], + deps: McpDoctorDependencies, +): Promise { + if (configOnly) { + return { attempted: false, result: 'skipped' } + } + + if (!activeConfig) { + if (definitions.some(definition => definition.pendingApproval)) { + return { attempted: false, result: 'pending' } + } + if (definitions.some(definition => definition.disabled)) { + return { attempted: false, result: 'disabled' } + } + return { attempted: false, result: 'skipped' } + } + + const startedAt = Date.now() + const connection = await deps.connectToServer(name, activeConfig) + const durationMs = Date.now() - startedAt + + try { + switch (connection.type) { + case 'connected': + return { attempted: true, result: 'connected', durationMs } + case 'needs-auth': + return { attempted: true, result: 'needs-auth', durationMs } + case 'disabled': + return { attempted: true, result: 'disabled', durationMs } + case 'pending': + return { attempted: true, result: 'pending', durationMs } + case 'failed': + return { + attempted: true, + result: 'failed', + durationMs, + error: connection.error, + } + } + } finally { + await deps.clearServerCache(name, activeConfig).catch(() => { + // Best-effort cleanup for diagnostic connections. + }) + } +} + +function buildLiveFindings( + name: string, + definitions: McpDoctorDefinition[], + liveCheck: McpDoctorLiveCheck, +): McpDoctorFinding[] { + const activeDefinition = definitions.find(definition => definition.runtimeActive) + + if (liveCheck.result === 'needs-auth') { + return [ + { + blocking: false, + code: 'auth.needs_auth', + message: `${name} requires authentication before it can be used.`, + remediation: 'Authenticate the server and then rerun the doctor command.', + serverName: name, + severity: 'warn', + sourcePath: activeDefinition?.sourcePath, + }, + ] + } + + if (liveCheck.result === 'failed') { + const commandNotFound = + activeDefinition?.transport === 'stdio' && + typeof liveCheck.error === 'string' && + liveCheck.error.toLowerCase().includes('not found') + + return [ + { + blocking: true, + code: commandNotFound ? 'stdio.command_not_found' : 'health.failed', + message: liveCheck.error + ? `${name} failed its live health check: ${liveCheck.error}` + : `${name} failed its live health check.`, + remediation: commandNotFound + ? 'Verify the configured executable exists on PATH or use a full executable path.' + : 'Inspect the server configuration and retry the connection once the underlying problem is fixed.', + serverName: name, + severity: 'error', + sourcePath: activeDefinition?.sourcePath, + }, + ] + } + + return [] +} + +async function buildServerReport( + name: string, + options: { + configOnly: boolean + requestedByUser: boolean + scopeFilter?: McpDoctorScopeFilter + }, + validationFindingsByName: Map, + deps: McpDoctorDependencies, +): Promise { + const scopeResults = { + enterprise: deps.getMcpConfigsByScope('enterprise'), + local: deps.getMcpConfigsByScope('local'), + project: deps.getMcpConfigsByScope('project'), + user: deps.getMcpConfigsByScope('user'), + } + const { servers: activeServers } = await deps.getAllMcpConfigs() + const serverDisabled = deps.isMcpServerDisabled(name) + const runtimeConfig = activeServers[name] ?? undefined + const activeConfig = serverDisabled ? undefined : runtimeConfig + + const definitions = [ + ...(shouldIncludeScope('enterprise', options.scopeFilter) + ? buildScopeDefinitions(name, 'enterprise', scopeResults.enterprise.servers, activeConfig, deps) + : []), + ...(shouldIncludeScope('local', options.scopeFilter) + ? buildScopeDefinitions(name, 'local', scopeResults.local.servers, activeConfig, deps) + : []), + ...(shouldIncludeScope('project', options.scopeFilter) + ? buildScopeDefinitions(name, 'project', scopeResults.project.servers, activeConfig, deps) + : []), + ...(shouldIncludeScope('user', options.scopeFilter) + ? buildScopeDefinitions(name, 'user', scopeResults.user.servers, activeConfig, deps) + : []), + ] + + const shouldAddObservedDefinition = + !!runtimeConfig && + !hasDefinitionForRuntimeSource(definitions, runtimeConfig, deps) && + ((definitions.length === 0 && !options.scopeFilter) || + (definitions.length > 0 && definitions.every(definition => !definition.runtimeActive))) + + if (runtimeConfig && shouldAddObservedDefinition) { + definitions.push( + buildObservedDefinition(name, runtimeConfig, { + disabled: serverDisabled, + runtimeActive: !serverDisabled, + runtimeVisible: !serverDisabled, + }), + ) + } + + const visibleRuntimeConfig = + definitions.some(definition => definition.runtimeActive) || shouldAddObservedDefinition + ? activeConfig + : undefined + + const findings: McpDoctorFinding[] = [ + ...(validationFindingsByName.get(name) ?? []), + ...buildShadowingFindings(definitions), + ...buildStateFindings(definitions), + ] + + if (definitions.length === 0 && !shouldAddObservedDefinition) { + findings.push({ + blocking: true, + code: 'state.not_found', + message: `${name} was not found in the selected MCP configuration sources.`, + remediation: 'Check the server name and scope, or add the MCP server before retrying.', + serverName: name, + severity: 'error', + }) + } + + const liveCheck = await getLiveCheck(name, visibleRuntimeConfig, options.configOnly, definitions, deps) + findings.push(...buildLiveFindings(name, definitions, liveCheck)) + + return { + serverName: name, + requestedByUser: options.requestedByUser, + definitions, + liveCheck, + findings, + } +} + +function getServerNames( + scopeServers: Array>, + activeServers: Record, + includeActiveServers: boolean, +): string[] { + const names = new Set(includeActiveServers ? Object.keys(activeServers) : []) + for (const servers of scopeServers) { + for (const name of Object.keys(servers)) { + names.add(name) + } + } + return [...names].sort() +} + +export async function doctorAllServers( + options: { configOnly: boolean; scopeFilter?: McpDoctorScopeFilter } = { + configOnly: false, + }, + deps: McpDoctorDependencies = DEFAULT_DEPENDENCIES, +): Promise { + const report = buildEmptyDoctorReport(options) + const scopeResults = { + enterprise: deps.getMcpConfigsByScope('enterprise'), + local: deps.getMcpConfigsByScope('local'), + project: deps.getMcpConfigsByScope('project'), + user: deps.getMcpConfigsByScope('user'), + } + const validationFindings = findingsFromValidationErrors( + getValidationErrorsForSelectedScopes(scopeResults, options.scopeFilter), + ) + const { globalFindings, serverFindingsByName } = splitValidationFindings(validationFindings) + const { servers: activeServers } = await deps.getAllMcpConfigs() + const names = getServerNames( + [ + ...(shouldIncludeScope('enterprise', options.scopeFilter) ? [scopeResults.enterprise.servers] : []), + ...(shouldIncludeScope('local', options.scopeFilter) ? [scopeResults.local.servers] : []), + ...(shouldIncludeScope('project', options.scopeFilter) ? [scopeResults.project.servers] : []), + ...(shouldIncludeScope('user', options.scopeFilter) ? [scopeResults.user.servers] : []), + ], + activeServers, + !options.scopeFilter, + ) + + const servers = await Promise.all( + names.map(name => + buildServerReport( + name, + { + configOnly: options.configOnly, + requestedByUser: false, + scopeFilter: options.scopeFilter, + }, + serverFindingsByName, + deps, + ), + ), + ) + + report.servers = servers + report.findings = globalFindings + return summarizeReport(report) +} + +export async function doctorServer( + name: string, + options: { configOnly: boolean; scopeFilter?: McpDoctorScopeFilter }, + deps: McpDoctorDependencies = DEFAULT_DEPENDENCIES, +): Promise { + const report = buildEmptyDoctorReport({ ...options, targetName: name }) + const scopeResults = { + enterprise: deps.getMcpConfigsByScope('enterprise'), + local: deps.getMcpConfigsByScope('local'), + project: deps.getMcpConfigsByScope('project'), + user: deps.getMcpConfigsByScope('user'), + } + const validationFindings = findingsFromValidationErrors( + getValidationErrorsForSelectedScopes(scopeResults, options.scopeFilter), + ) + const { globalFindings, serverFindingsByName } = splitValidationFindings(validationFindings) + const server = await buildServerReport( + name, + { + configOnly: options.configOnly, + requestedByUser: true, + scopeFilter: options.scopeFilter, + }, + serverFindingsByName, + deps, + ) + report.servers = [server] + report.findings = globalFindings + return summarizeReport(report) +} From ad1f328672ecc9046c772507e771cdc37f8050b2 Mon Sep 17 00:00:00 2001 From: gnanam1990 Date: Thu, 2 Apr 2026 14:55:05 +0530 Subject: [PATCH 31/35] feat(mcp): add doctor command Add the MCP doctor subcommand with text and JSON output, config-only mode, and scope filtering so users can diagnose MCP issues from the CLI. Co-Authored-By: Claude --- src/cli/handlers/mcp.tsx | 99 +++++++++++++++++++++++++- src/commands/mcp/doctorCommand.test.ts | 19 +++++ src/commands/mcp/doctorCommand.ts | 25 +++++++ src/main.tsx | 2 + 4 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 src/commands/mcp/doctorCommand.test.ts create mode 100644 src/commands/mcp/doctorCommand.ts diff --git a/src/cli/handlers/mcp.tsx b/src/cli/handlers/mcp.tsx index e530c268..bf43d490 100644 --- a/src/cli/handlers/mcp.tsx +++ b/src/cli/handlers/mcp.tsx @@ -12,6 +12,7 @@ import { render } from '../../ink.js'; import { KeybindingSetup } from '../../keybindings/KeybindingProviderSetup.js'; import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js'; import { clearMcpClientConfig, clearServerTokensFromLocalStorage, getMcpClientConfig, readClientSecret, saveMcpClientSecret } from '../../services/mcp/auth.js'; +import { doctorAllServers, doctorServer, type McpDoctorReport, type McpDoctorScopeFilter } from '../../services/mcp/doctor.js'; import { connectToServer, getMcpServerConnectionBatchSize } from '../../services/mcp/client.js'; import { addMcpConfig, getAllMcpConfigs, getMcpConfigByName, getMcpConfigsByScope, removeMcpConfig } from '../../services/mcp/config.js'; import type { ConfigScope, ScopedMcpServerConfig } from '../../services/mcp/types.js'; @@ -23,6 +24,102 @@ import { gracefulShutdown } from '../../utils/gracefulShutdown.js'; import { safeParseJSON } from '../../utils/json.js'; import { getPlatform } from '../../utils/platform.js'; import { cliError, cliOk } from '../exit.js'; + +function formatDoctorReport(report: McpDoctorReport): string { + const lines: string[] = [] + lines.push('MCP Doctor') + lines.push('') + lines.push('Summary') + lines.push(`- ${report.summary.totalReports} server reports generated`) + lines.push(`- ${report.summary.healthy} healthy`) + lines.push(`- ${report.summary.warnings} warnings`) + lines.push(`- ${report.summary.blocking} blocking issues`) + + if (report.targetName) { + lines.push(`- target: ${report.targetName}`) + } + + for (const server of report.servers) { + lines.push('') + lines.push(server.serverName) + + const activeDefinition = server.definitions.find(definition => definition.runtimeActive) + if (activeDefinition) { + lines.push(`- Active source: ${activeDefinition.sourceType}`) + lines.push(`- Transport: ${activeDefinition.transport ?? 'unknown'}`) + } + + if (server.definitions.length > 1) { + const extraDefinitions = server.definitions + .filter(definition => !definition.runtimeActive) + .map(definition => definition.sourceType) + if (extraDefinitions.length > 0) { + lines.push(`- Additional definitions: ${extraDefinitions.join(', ')}`) + } + } + + if (server.liveCheck.result) { + const stateLikeResults = new Set(['disabled', 'pending', 'skipped']) + const label = stateLikeResults.has(server.liveCheck.result) + ? 'State' + : 'Live check' + lines.push(`- ${label}: ${server.liveCheck.result}`) + } + + if (server.liveCheck.error) { + lines.push(`- Error: ${server.liveCheck.error}`) + } + + for (const finding of server.findings) { + lines.push(`- ${finding.message}`) + if (finding.remediation) { + lines.push(`- Fix: ${finding.remediation}`) + } + } + } + + if (report.findings.length > 0) { + lines.push('') + lines.push('Global findings') + for (const finding of report.findings) { + lines.push(`- ${finding.message}`) + if (finding.remediation) { + lines.push(`- Fix: ${finding.remediation}`) + } + } + } + + return lines.join('\n') +} + +export async function mcpDoctorHandler(name: string | undefined, options: { + scope?: string; + configOnly?: boolean; + json?: boolean; +}): Promise { + try { + const scopeFilter = options.scope ? ensureConfigScope(options.scope) as McpDoctorScopeFilter : undefined + const configOnly = !!options.configOnly + const report = name + ? await doctorServer(name, { configOnly, scopeFilter }) + : await doctorAllServers({ configOnly, scopeFilter }) + + if (options.json) { + process.stdout.write(`${JSON.stringify(report, null, 2)}\n`) + } else { + process.stdout.write(`${formatDoctorReport(report)}\n`) + } + + // On Windows, exiting immediately after a single failed HTTP MCP health check + // can trip a libuv assertion while async handle shutdown is still settling. + // Let the event loop drain briefly before exiting this one-shot command. + await new Promise(resolve => setTimeout(resolve, 50)) + process.exit(report.summary.blocking > 0 ? 1 : 0) + return + } catch (error) { + cliError((error as Error).message) + } +} async function checkMcpServerHealth(name: string, server: ScopedMcpServerConfig): Promise { try { const result = await connectToServer(name, server); @@ -359,4 +456,4 @@ export async function mcpResetChoicesHandler(): Promise { })); cliOk('All project-scoped (.mcp.json) server approvals and rejections have been reset.\n' + 'You will be prompted for approval next time you start Claude Code.'); } -//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"names":["stat","pMap","cwd","React","MCPServerDesktopImportDialog","render","KeybindingSetup","AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS","logEvent","clearMcpClientConfig","clearServerTokensFromLocalStorage","getMcpClientConfig","readClientSecret","saveMcpClientSecret","connectToServer","getMcpServerConnectionBatchSize","addMcpConfig","getAllMcpConfigs","getMcpConfigByName","getMcpConfigsByScope","removeMcpConfig","ConfigScope","ScopedMcpServerConfig","describeMcpConfigFilePath","ensureConfigScope","getScopeLabel","AppStateProvider","getCurrentProjectConfig","getGlobalConfig","saveCurrentProjectConfig","isFsInaccessible","gracefulShutdown","safeParseJSON","getPlatform","cliError","cliOk","checkMcpServerHealth","name","server","Promise","result","type","_error","mcpServeHandler","debug","verbose","providedCwd","error","setup","undefined","startMCPServer","mcpRemoveHandler","options","scope","serverBeforeRemoval","cleanupSecureStorage","process","stdout","write","projectConfig","globalConfig","servers","projectServers","mcpJsonExists","scopes","Array","Exclude","mcpServers","push","length","stderr","forEach","Error","message","mcpListHandler","configs","Object","keys","console","log","entries","results","status","concurrency","url","args","isArray","command","join","mcpGetHandler","headers","key","value","oauth","clientId","callbackPort","parts","clientConfig","clientSecret","env","mcpAddJsonHandler","json","parsedJson","needsSecret","transportType","String","source","mcpAddFromDesktopHandler","platform","readClaudeDesktopMcpServers","unmount","exitOnCtrlC","mcpResetChoicesHandler","current","enabledMcpjsonServers","disabledMcpjsonServers","enableAllProjectMcpServers"],"sources":["mcp.tsx"],"sourcesContent":["/**\n * MCP subcommand handlers — extracted from main.tsx for lazy loading.\n * These are dynamically imported only when the corresponding `claude mcp *` command runs.\n */\n\nimport { stat } from 'fs/promises'\nimport pMap from 'p-map'\nimport { cwd } from 'process'\nimport React from 'react'\nimport { MCPServerDesktopImportDialog } from '../../components/MCPServerDesktopImportDialog.js'\nimport { render } from '../../ink.js'\nimport { KeybindingSetup } from '../../keybindings/KeybindingProviderSetup.js'\nimport {\n  type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  logEvent,\n} from '../../services/analytics/index.js'\nimport {\n  clearMcpClientConfig,\n  clearServerTokensFromLocalStorage,\n  getMcpClientConfig,\n  readClientSecret,\n  saveMcpClientSecret,\n} from '../../services/mcp/auth.js'\nimport {\n  connectToServer,\n  getMcpServerConnectionBatchSize,\n} from '../../services/mcp/client.js'\nimport {\n  addMcpConfig,\n  getAllMcpConfigs,\n  getMcpConfigByName,\n  getMcpConfigsByScope,\n  removeMcpConfig,\n} from '../../services/mcp/config.js'\nimport type {\n  ConfigScope,\n  ScopedMcpServerConfig,\n} from '../../services/mcp/types.js'\nimport {\n  describeMcpConfigFilePath,\n  ensureConfigScope,\n  getScopeLabel,\n} from '../../services/mcp/utils.js'\nimport { AppStateProvider } from '../../state/AppState.js'\nimport {\n  getCurrentProjectConfig,\n  getGlobalConfig,\n  saveCurrentProjectConfig,\n} from '../../utils/config.js'\nimport { isFsInaccessible } from '../../utils/errors.js'\nimport { gracefulShutdown } from '../../utils/gracefulShutdown.js'\nimport { safeParseJSON } from '../../utils/json.js'\nimport { getPlatform } from '../../utils/platform.js'\nimport { cliError, cliOk } from '../exit.js'\n\nasync function checkMcpServerHealth(\n  name: string,\n  server: ScopedMcpServerConfig,\n): Promise<string> {\n  try {\n    const result = await connectToServer(name, server)\n    if (result.type === 'connected') {\n      return '✓ Connected'\n    } else if (result.type === 'needs-auth') {\n      return '! Needs authentication'\n    } else {\n      return '✗ Failed to connect'\n    }\n  } catch (_error) {\n    return '✗ Connection error'\n  }\n}\n\n// mcp serve (lines 4512–4532)\nexport async function mcpServeHandler({\n  debug,\n  verbose,\n}: {\n  debug?: boolean\n  verbose?: boolean\n}): Promise<void> {\n  const providedCwd = cwd()\n  logEvent('tengu_mcp_start', {})\n\n  try {\n    await stat(providedCwd)\n  } catch (error) {\n    if (isFsInaccessible(error)) {\n      cliError(`Error: Directory ${providedCwd} does not exist`)\n    }\n    throw error\n  }\n\n  try {\n    const { setup } = await import('../../setup.js')\n    await setup(providedCwd, 'default', false, false, undefined, false)\n    const { startMCPServer } = await import('../../entrypoints/mcp.js')\n    await startMCPServer(providedCwd, debug ?? false, verbose ?? false)\n  } catch (error) {\n    cliError(`Error: Failed to start MCP server: ${error}`)\n  }\n}\n\n// mcp remove (lines 4545–4635)\nexport async function mcpRemoveHandler(\n  name: string,\n  options: { scope?: string },\n): Promise<void> {\n  // Look up config before removing so we can clean up secure storage\n  const serverBeforeRemoval = getMcpConfigByName(name)\n\n  const cleanupSecureStorage = () => {\n    if (\n      serverBeforeRemoval &&\n      (serverBeforeRemoval.type === 'sse' ||\n        serverBeforeRemoval.type === 'http')\n    ) {\n      clearServerTokensFromLocalStorage(name, serverBeforeRemoval)\n      clearMcpClientConfig(name, serverBeforeRemoval)\n    }\n  }\n\n  try {\n    if (options.scope) {\n      const scope = ensureConfigScope(options.scope)\n      logEvent('tengu_mcp_delete', {\n        name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n        scope:\n          scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      })\n\n      await removeMcpConfig(name, scope)\n      cleanupSecureStorage()\n      process.stdout.write(`Removed MCP server ${name} from ${scope} config\\n`)\n      cliOk(`File modified: ${describeMcpConfigFilePath(scope)}`)\n    }\n\n    // If no scope specified, check where the server exists\n    const projectConfig = getCurrentProjectConfig()\n    const globalConfig = getGlobalConfig()\n\n    // Check if server exists in project scope (.mcp.json)\n    const { servers: projectServers } = getMcpConfigsByScope('project')\n    const mcpJsonExists = !!projectServers[name]\n\n    // Count how many scopes contain this server\n    const scopes: Array<Exclude<ConfigScope, 'dynamic'>> = []\n    if (projectConfig.mcpServers?.[name]) scopes.push('local')\n    if (mcpJsonExists) scopes.push('project')\n    if (globalConfig.mcpServers?.[name]) scopes.push('user')\n\n    if (scopes.length === 0) {\n      cliError(`No MCP server found with name: \"${name}\"`)\n    } else if (scopes.length === 1) {\n      // Server exists in only one scope, remove it\n      const scope = scopes[0]!\n      logEvent('tengu_mcp_delete', {\n        name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n        scope:\n          scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      })\n\n      await removeMcpConfig(name, scope)\n      cleanupSecureStorage()\n      process.stdout.write(\n        `Removed MCP server \"${name}\" from ${scope} config\\n`,\n      )\n      cliOk(`File modified: ${describeMcpConfigFilePath(scope)}`)\n    } else {\n      // Server exists in multiple scopes\n      process.stderr.write(`MCP server \"${name}\" exists in multiple scopes:\\n`)\n      scopes.forEach(scope => {\n        process.stderr.write(\n          `  - ${getScopeLabel(scope)} (${describeMcpConfigFilePath(scope)})\\n`,\n        )\n      })\n      process.stderr.write('\\nTo remove from a specific scope, use:\\n')\n      scopes.forEach(scope => {\n        process.stderr.write(`  claude mcp remove \"${name}\" -s ${scope}\\n`)\n      })\n      cliError()\n    }\n  } catch (error) {\n    cliError((error as Error).message)\n  }\n}\n\n// mcp list (lines 4641–4688)\nexport async function mcpListHandler(): Promise<void> {\n  logEvent('tengu_mcp_list', {})\n  const { servers: configs } = await getAllMcpConfigs()\n  if (Object.keys(configs).length === 0) {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(\n      'No MCP servers configured. Use `claude mcp add` to add a server.',\n    )\n  } else {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log('Checking MCP server health...\\n')\n\n    // Check servers concurrently\n    const entries = Object.entries(configs)\n    const results = await pMap(\n      entries,\n      async ([name, server]) => ({\n        name,\n        server,\n        status: await checkMcpServerHealth(name, server),\n      }),\n      { concurrency: getMcpServerConnectionBatchSize() },\n    )\n\n    for (const { name, server, status } of results) {\n      // Intentionally excluding sse-ide servers here since they're internal\n      if (server.type === 'sse') {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`${name}: ${server.url} (SSE) - ${status}`)\n      } else if (server.type === 'http') {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`${name}: ${server.url} (HTTP) - ${status}`)\n      } else if (server.type === 'claudeai-proxy') {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`${name}: ${server.url} - ${status}`)\n      } else if (!server.type || server.type === 'stdio') {\n        const args = Array.isArray(server.args) ? server.args : []\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`${name}: ${server.command} ${args.join(' ')} - ${status}`)\n      }\n    }\n  }\n  // Use gracefulShutdown to properly clean up MCP server connections\n  // (process.exit bypasses cleanup handlers, leaving child processes orphaned)\n  await gracefulShutdown(0)\n}\n\n// mcp get (lines 4694–4786)\nexport async function mcpGetHandler(name: string): Promise<void> {\n  logEvent('tengu_mcp_get', {\n    name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  })\n  const server = getMcpConfigByName(name)\n  if (!server) {\n    cliError(`No MCP server found with name: ${name}`)\n  }\n\n  // biome-ignore lint/suspicious/noConsole:: intentional console output\n  console.log(`${name}:`)\n  // biome-ignore lint/suspicious/noConsole:: intentional console output\n  console.log(`  Scope: ${getScopeLabel(server.scope)}`)\n\n  // Check server health\n  const status = await checkMcpServerHealth(name, server)\n  // biome-ignore lint/suspicious/noConsole:: intentional console output\n  console.log(`  Status: ${status}`)\n\n  // Intentionally excluding sse-ide servers here since they're internal\n  if (server.type === 'sse') {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Type: sse`)\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  URL: ${server.url}`)\n    if (server.headers) {\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log('  Headers:')\n      for (const [key, value] of Object.entries(server.headers)) {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`    ${key}: ${value}`)\n      }\n    }\n    if (server.oauth?.clientId || server.oauth?.callbackPort) {\n      const parts: string[] = []\n      if (server.oauth.clientId) {\n        parts.push('client_id configured')\n        const clientConfig = getMcpClientConfig(name, server)\n        if (clientConfig?.clientSecret) parts.push('client_secret configured')\n      }\n      if (server.oauth.callbackPort)\n        parts.push(`callback_port ${server.oauth.callbackPort}`)\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log(`  OAuth: ${parts.join(', ')}`)\n    }\n  } else if (server.type === 'http') {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Type: http`)\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  URL: ${server.url}`)\n    if (server.headers) {\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log('  Headers:')\n      for (const [key, value] of Object.entries(server.headers)) {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`    ${key}: ${value}`)\n      }\n    }\n    if (server.oauth?.clientId || server.oauth?.callbackPort) {\n      const parts: string[] = []\n      if (server.oauth.clientId) {\n        parts.push('client_id configured')\n        const clientConfig = getMcpClientConfig(name, server)\n        if (clientConfig?.clientSecret) parts.push('client_secret configured')\n      }\n      if (server.oauth.callbackPort)\n        parts.push(`callback_port ${server.oauth.callbackPort}`)\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log(`  OAuth: ${parts.join(', ')}`)\n    }\n  } else if (server.type === 'stdio') {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Type: stdio`)\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Command: ${server.command}`)\n    const args = Array.isArray(server.args) ? server.args : []\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Args: ${args.join(' ')}`)\n    if (server.env) {\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log('  Environment:')\n      for (const [key, value] of Object.entries(server.env)) {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`    ${key}=${value}`)\n      }\n    }\n  }\n  // biome-ignore lint/suspicious/noConsole:: intentional console output\n  console.log(\n    `\\nTo remove this server, run: claude mcp remove \"${name}\" -s ${server.scope}`,\n  )\n  // Use gracefulShutdown to properly clean up MCP server connections\n  // (process.exit bypasses cleanup handlers, leaving child processes orphaned)\n  await gracefulShutdown(0)\n}\n\n// mcp add-json (lines 4801–4870)\nexport async function mcpAddJsonHandler(\n  name: string,\n  json: string,\n  options: { scope?: string; clientSecret?: true },\n): Promise<void> {\n  try {\n    const scope = ensureConfigScope(options.scope)\n    const parsedJson = safeParseJSON(json)\n\n    // Read secret before writing config so cancellation doesn't leave partial state\n    const needsSecret =\n      options.clientSecret &&\n      parsedJson &&\n      typeof parsedJson === 'object' &&\n      'type' in parsedJson &&\n      (parsedJson.type === 'sse' || parsedJson.type === 'http') &&\n      'url' in parsedJson &&\n      typeof parsedJson.url === 'string' &&\n      'oauth' in parsedJson &&\n      parsedJson.oauth &&\n      typeof parsedJson.oauth === 'object' &&\n      'clientId' in parsedJson.oauth\n    const clientSecret = needsSecret ? await readClientSecret() : undefined\n\n    await addMcpConfig(name, parsedJson, scope)\n\n    const transportType =\n      parsedJson && typeof parsedJson === 'object' && 'type' in parsedJson\n        ? String(parsedJson.type || 'stdio')\n        : 'stdio'\n\n    if (\n      clientSecret &&\n      parsedJson &&\n      typeof parsedJson === 'object' &&\n      'type' in parsedJson &&\n      (parsedJson.type === 'sse' || parsedJson.type === 'http') &&\n      'url' in parsedJson &&\n      typeof parsedJson.url === 'string'\n    ) {\n      saveMcpClientSecret(\n        name,\n        { type: parsedJson.type, url: parsedJson.url },\n        clientSecret,\n      )\n    }\n\n    logEvent('tengu_mcp_add', {\n      scope:\n        scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      source:\n        'json' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      type: transportType as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n    })\n\n    cliOk(`Added ${transportType} MCP server ${name} to ${scope} config`)\n  } catch (error) {\n    cliError((error as Error).message)\n  }\n}\n\n// mcp add-from-claude-desktop (lines 4881–4927)\nexport async function mcpAddFromDesktopHandler(options: {\n  scope?: string\n}): Promise<void> {\n  try {\n    const scope = ensureConfigScope(options.scope)\n    const platform = getPlatform()\n\n    logEvent('tengu_mcp_add', {\n      scope:\n        scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      platform:\n        platform as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      source:\n        'desktop' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n    })\n\n    const { readClaudeDesktopMcpServers } = await import(\n      '../../utils/claudeDesktop.js'\n    )\n    const servers = await readClaudeDesktopMcpServers()\n\n    if (Object.keys(servers).length === 0) {\n      cliOk(\n        'No MCP servers found in Claude Desktop configuration or configuration file does not exist.',\n      )\n    }\n\n    const { unmount } = await render(\n      <AppStateProvider>\n        <KeybindingSetup>\n          <MCPServerDesktopImportDialog\n            servers={servers}\n            scope={scope}\n            onDone={() => {\n              unmount()\n            }}\n          />\n        </KeybindingSetup>\n      </AppStateProvider>,\n      { exitOnCtrlC: true },\n    )\n  } catch (error) {\n    cliError((error as Error).message)\n  }\n}\n\n// mcp reset-project-choices (lines 4935–4952)\nexport async function mcpResetChoicesHandler(): Promise<void> {\n  logEvent('tengu_mcp_reset_mcpjson_choices', {})\n  saveCurrentProjectConfig(current => ({\n    ...current,\n    enabledMcpjsonServers: [],\n    disabledMcpjsonServers: [],\n    enableAllProjectMcpServers: false,\n  }))\n  cliOk(\n    'All project-scoped (.mcp.json) server approvals and rejections have been reset.\\n' +\n      'You will be prompted for approval next time you start Claude Code.',\n  )\n}\n"],"mappings":"AAAA;AACA;AACA;AACA;;AAEA,SAASA,IAAI,QAAQ,aAAa;AAClC,OAAOC,IAAI,MAAM,OAAO;AACxB,SAASC,GAAG,QAAQ,SAAS;AAC7B,OAAOC,KAAK,MAAM,OAAO;AACzB,SAASC,4BAA4B,QAAQ,kDAAkD;AAC/F,SAASC,MAAM,QAAQ,cAAc;AACrC,SAASC,eAAe,QAAQ,8CAA8C;AAC9E,SACE,KAAKC,0DAA0D,EAC/DC,QAAQ,QACH,mCAAmC;AAC1C,SACEC,oBAAoB,EACpBC,iCAAiC,EACjCC,kBAAkB,EAClBC,gBAAgB,EAChBC,mBAAmB,QACd,4BAA4B;AACnC,SACEC,eAAe,EACfC,+BAA+B,QAC1B,8BAA8B;AACrC,SACEC,YAAY,EACZC,gBAAgB,EAChBC,kBAAkB,EAClBC,oBAAoB,EACpBC,eAAe,QACV,8BAA8B;AACrC,cACEC,WAAW,EACXC,qBAAqB,QAChB,6BAA6B;AACpC,SACEC,yBAAyB,EACzBC,iBAAiB,EACjBC,aAAa,QACR,6BAA6B;AACpC,SAASC,gBAAgB,QAAQ,yBAAyB;AAC1D,SACEC,uBAAuB,EACvBC,eAAe,EACfC,wBAAwB,QACnB,uBAAuB;AAC9B,SAASC,gBAAgB,QAAQ,uBAAuB;AACxD,SAASC,gBAAgB,QAAQ,iCAAiC;AAClE,SAASC,aAAa,QAAQ,qBAAqB;AACnD,SAASC,WAAW,QAAQ,yBAAyB;AACrD,SAASC,QAAQ,EAAEC,KAAK,QAAQ,YAAY;AAE5C,eAAeC,oBAAoBA,CACjCC,IAAI,EAAE,MAAM,EACZC,MAAM,EAAEhB,qBAAqB,CAC9B,EAAEiB,OAAO,CAAC,MAAM,CAAC,CAAC;EACjB,IAAI;IACF,MAAMC,MAAM,GAAG,MAAM1B,eAAe,CAACuB,IAAI,EAAEC,MAAM,CAAC;IAClD,IAAIE,MAAM,CAACC,IAAI,KAAK,WAAW,EAAE;MAC/B,OAAO,aAAa;IACtB,CAAC,MAAM,IAAID,MAAM,CAACC,IAAI,KAAK,YAAY,EAAE;MACvC,OAAO,wBAAwB;IACjC,CAAC,MAAM;MACL,OAAO,qBAAqB;IAC9B;EACF,CAAC,CAAC,OAAOC,MAAM,EAAE;IACf,OAAO,oBAAoB;EAC7B;AACF;;AAEA;AACA,OAAO,eAAeC,eAAeA,CAAC;EACpCC,KAAK;EACLC;AAIF,CAHC,EAAE;EACDD,KAAK,CAAC,EAAE,OAAO;EACfC,OAAO,CAAC,EAAE,OAAO;AACnB,CAAC,CAAC,EAAEN,OAAO,CAAC,IAAI,CAAC,CAAC;EAChB,MAAMO,WAAW,GAAG5C,GAAG,CAAC,CAAC;EACzBM,QAAQ,CAAC,iBAAiB,EAAE,CAAC,CAAC,CAAC;EAE/B,IAAI;IACF,MAAMR,IAAI,CAAC8C,WAAW,CAAC;EACzB,CAAC,CAAC,OAAOC,KAAK,EAAE;IACd,IAAIjB,gBAAgB,CAACiB,KAAK,CAAC,EAAE;MAC3Bb,QAAQ,CAAC,oBAAoBY,WAAW,iBAAiB,CAAC;IAC5D;IACA,MAAMC,KAAK;EACb;EAEA,IAAI;IACF,MAAM;MAAEC;IAAM,CAAC,GAAG,MAAM,MAAM,CAAC,gBAAgB,CAAC;IAChD,MAAMA,KAAK,CAACF,WAAW,EAAE,SAAS,EAAE,KAAK,EAAE,KAAK,EAAEG,SAAS,EAAE,KAAK,CAAC;IACnE,MAAM;MAAEC;IAAe,CAAC,GAAG,MAAM,MAAM,CAAC,0BAA0B,CAAC;IACnE,MAAMA,cAAc,CAACJ,WAAW,EAAEF,KAAK,IAAI,KAAK,EAAEC,OAAO,IAAI,KAAK,CAAC;EACrE,CAAC,CAAC,OAAOE,KAAK,EAAE;IACdb,QAAQ,CAAC,sCAAsCa,KAAK,EAAE,CAAC;EACzD;AACF;;AAEA;AACA,OAAO,eAAeI,gBAAgBA,CACpCd,IAAI,EAAE,MAAM,EACZe,OAAO,EAAE;EAAEC,KAAK,CAAC,EAAE,MAAM;AAAC,CAAC,CAC5B,EAAEd,OAAO,CAAC,IAAI,CAAC,CAAC;EACf;EACA,MAAMe,mBAAmB,GAAGpC,kBAAkB,CAACmB,IAAI,CAAC;EAEpD,MAAMkB,oBAAoB,GAAGA,CAAA,KAAM;IACjC,IACED,mBAAmB,KAClBA,mBAAmB,CAACb,IAAI,KAAK,KAAK,IACjCa,mBAAmB,CAACb,IAAI,KAAK,MAAM,CAAC,EACtC;MACA/B,iCAAiC,CAAC2B,IAAI,EAAEiB,mBAAmB,CAAC;MAC5D7C,oBAAoB,CAAC4B,IAAI,EAAEiB,mBAAmB,CAAC;IACjD;EACF,CAAC;EAED,IAAI;IACF,IAAIF,OAAO,CAACC,KAAK,EAAE;MACjB,MAAMA,KAAK,GAAG7B,iBAAiB,CAAC4B,OAAO,CAACC,KAAK,CAAC;MAC9C7C,QAAQ,CAAC,kBAAkB,EAAE;QAC3B6B,IAAI,EAAEA,IAAI,IAAI9B,0DAA0D;QACxE8C,KAAK,EACHA,KAAK,IAAI9C;MACb,CAAC,CAAC;MAEF,MAAMa,eAAe,CAACiB,IAAI,EAAEgB,KAAK,CAAC;MAClCE,oBAAoB,CAAC,CAAC;MACtBC,OAAO,CAACC,MAAM,CAACC,KAAK,CAAC,sBAAsBrB,IAAI,SAASgB,KAAK,WAAW,CAAC;MACzElB,KAAK,CAAC,kBAAkBZ,yBAAyB,CAAC8B,KAAK,CAAC,EAAE,CAAC;IAC7D;;IAEA;IACA,MAAMM,aAAa,GAAGhC,uBAAuB,CAAC,CAAC;IAC/C,MAAMiC,YAAY,GAAGhC,eAAe,CAAC,CAAC;;IAEtC;IACA,MAAM;MAAEiC,OAAO,EAAEC;IAAe,CAAC,GAAG3C,oBAAoB,CAAC,SAAS,CAAC;IACnE,MAAM4C,aAAa,GAAG,CAAC,CAACD,cAAc,CAACzB,IAAI,CAAC;;IAE5C;IACA,MAAM2B,MAAM,EAAEC,KAAK,CAACC,OAAO,CAAC7C,WAAW,EAAE,SAAS,CAAC,CAAC,GAAG,EAAE;IACzD,IAAIsC,aAAa,CAACQ,UAAU,GAAG9B,IAAI,CAAC,EAAE2B,MAAM,CAACI,IAAI,CAAC,OAAO,CAAC;IAC1D,IAAIL,aAAa,EAAEC,MAAM,CAACI,IAAI,CAAC,SAAS,CAAC;IACzC,IAAIR,YAAY,CAACO,UAAU,GAAG9B,IAAI,CAAC,EAAE2B,MAAM,CAACI,IAAI,CAAC,MAAM,CAAC;IAExD,IAAIJ,MAAM,CAACK,MAAM,KAAK,CAAC,EAAE;MACvBnC,QAAQ,CAAC,mCAAmCG,IAAI,GAAG,CAAC;IACtD,CAAC,MAAM,IAAI2B,MAAM,CAACK,MAAM,KAAK,CAAC,EAAE;MAC9B;MACA,MAAMhB,KAAK,GAAGW,MAAM,CAAC,CAAC,CAAC,CAAC;MACxBxD,QAAQ,CAAC,kBAAkB,EAAE;QAC3B6B,IAAI,EAAEA,IAAI,IAAI9B,0DAA0D;QACxE8C,KAAK,EACHA,KAAK,IAAI9C;MACb,CAAC,CAAC;MAEF,MAAMa,eAAe,CAACiB,IAAI,EAAEgB,KAAK,CAAC;MAClCE,oBAAoB,CAAC,CAAC;MACtBC,OAAO,CAACC,MAAM,CAACC,KAAK,CAClB,uBAAuBrB,IAAI,UAAUgB,KAAK,WAC5C,CAAC;MACDlB,KAAK,CAAC,kBAAkBZ,yBAAyB,CAAC8B,KAAK,CAAC,EAAE,CAAC;IAC7D,CAAC,MAAM;MACL;MACAG,OAAO,CAACc,MAAM,CAACZ,KAAK,CAAC,eAAerB,IAAI,gCAAgC,CAAC;MACzE2B,MAAM,CAACO,OAAO,CAAClB,KAAK,IAAI;QACtBG,OAAO,CAACc,MAAM,CAACZ,KAAK,CAClB,OAAOjC,aAAa,CAAC4B,KAAK,CAAC,KAAK9B,yBAAyB,CAAC8B,KAAK,CAAC,KAClE,CAAC;MACH,CAAC,CAAC;MACFG,OAAO,CAACc,MAAM,CAACZ,KAAK,CAAC,2CAA2C,CAAC;MACjEM,MAAM,CAACO,OAAO,CAAClB,KAAK,IAAI;QACtBG,OAAO,CAACc,MAAM,CAACZ,KAAK,CAAC,wBAAwBrB,IAAI,QAAQgB,KAAK,IAAI,CAAC;MACrE,CAAC,CAAC;MACFnB,QAAQ,CAAC,CAAC;IACZ;EACF,CAAC,CAAC,OAAOa,KAAK,EAAE;IACdb,QAAQ,CAAC,CAACa,KAAK,IAAIyB,KAAK,EAAEC,OAAO,CAAC;EACpC;AACF;;AAEA;AACA,OAAO,eAAeC,cAAcA,CAAA,CAAE,EAAEnC,OAAO,CAAC,IAAI,CAAC,CAAC;EACpD/B,QAAQ,CAAC,gBAAgB,EAAE,CAAC,CAAC,CAAC;EAC9B,MAAM;IAAEqD,OAAO,EAAEc;EAAQ,CAAC,GAAG,MAAM1D,gBAAgB,CAAC,CAAC;EACrD,IAAI2D,MAAM,CAACC,IAAI,CAACF,OAAO,CAAC,CAACN,MAAM,KAAK,CAAC,EAAE;IACrC;IACAS,OAAO,CAACC,GAAG,CACT,kEACF,CAAC;EACH,CAAC,MAAM;IACL;IACAD,OAAO,CAACC,GAAG,CAAC,iCAAiC,CAAC;;IAE9C;IACA,MAAMC,OAAO,GAAGJ,MAAM,CAACI,OAAO,CAACL,OAAO,CAAC;IACvC,MAAMM,OAAO,GAAG,MAAMhF,IAAI,CACxB+E,OAAO,EACP,OAAO,CAAC3C,IAAI,EAAEC,MAAM,CAAC,MAAM;MACzBD,IAAI;MACJC,MAAM;MACN4C,MAAM,EAAE,MAAM9C,oBAAoB,CAACC,IAAI,EAAEC,MAAM;IACjD,CAAC,CAAC,EACF;MAAE6C,WAAW,EAAEpE,+BAA+B,CAAC;IAAE,CACnD,CAAC;IAED,KAAK,MAAM;MAAEsB,IAAI;MAAEC,MAAM;MAAE4C;IAAO,CAAC,IAAID,OAAO,EAAE;MAC9C;MACA,IAAI3C,MAAM,CAACG,IAAI,KAAK,KAAK,EAAE;QACzB;QACAqC,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,KAAKC,MAAM,CAAC8C,GAAG,YAAYF,MAAM,EAAE,CAAC;MACzD,CAAC,MAAM,IAAI5C,MAAM,CAACG,IAAI,KAAK,MAAM,EAAE;QACjC;QACAqC,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,KAAKC,MAAM,CAAC8C,GAAG,aAAaF,MAAM,EAAE,CAAC;MAC1D,CAAC,MAAM,IAAI5C,MAAM,CAACG,IAAI,KAAK,gBAAgB,EAAE;QAC3C;QACAqC,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,KAAKC,MAAM,CAAC8C,GAAG,MAAMF,MAAM,EAAE,CAAC;MACnD,CAAC,MAAM,IAAI,CAAC5C,MAAM,CAACG,IAAI,IAAIH,MAAM,CAACG,IAAI,KAAK,OAAO,EAAE;QAClD,MAAM4C,IAAI,GAAGpB,KAAK,CAACqB,OAAO,CAAChD,MAAM,CAAC+C,IAAI,CAAC,GAAG/C,MAAM,CAAC+C,IAAI,GAAG,EAAE;QAC1D;QACAP,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,KAAKC,MAAM,CAACiD,OAAO,IAAIF,IAAI,CAACG,IAAI,CAAC,GAAG,CAAC,MAAMN,MAAM,EAAE,CAAC;MACzE;IACF;EACF;EACA;EACA;EACA,MAAMnD,gBAAgB,CAAC,CAAC,CAAC;AAC3B;;AAEA;AACA,OAAO,eAAe0D,aAAaA,CAACpD,IAAI,EAAE,MAAM,CAAC,EAAEE,OAAO,CAAC,IAAI,CAAC,CAAC;EAC/D/B,QAAQ,CAAC,eAAe,EAAE;IACxB6B,IAAI,EAAEA,IAAI,IAAI9B;EAChB,CAAC,CAAC;EACF,MAAM+B,MAAM,GAAGpB,kBAAkB,CAACmB,IAAI,CAAC;EACvC,IAAI,CAACC,MAAM,EAAE;IACXJ,QAAQ,CAAC,kCAAkCG,IAAI,EAAE,CAAC;EACpD;;EAEA;EACAyC,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,GAAG,CAAC;EACvB;EACAyC,OAAO,CAACC,GAAG,CAAC,YAAYtD,aAAa,CAACa,MAAM,CAACe,KAAK,CAAC,EAAE,CAAC;;EAEtD;EACA,MAAM6B,MAAM,GAAG,MAAM9C,oBAAoB,CAACC,IAAI,EAAEC,MAAM,CAAC;EACvD;EACAwC,OAAO,CAACC,GAAG,CAAC,aAAaG,MAAM,EAAE,CAAC;;EAElC;EACA,IAAI5C,MAAM,CAACG,IAAI,KAAK,KAAK,EAAE;IACzB;IACAqC,OAAO,CAACC,GAAG,CAAC,aAAa,CAAC;IAC1B;IACAD,OAAO,CAACC,GAAG,CAAC,UAAUzC,MAAM,CAAC8C,GAAG,EAAE,CAAC;IACnC,IAAI9C,MAAM,CAACoD,OAAO,EAAE;MAClB;MACAZ,OAAO,CAACC,GAAG,CAAC,YAAY,CAAC;MACzB,KAAK,MAAM,CAACY,GAAG,EAAEC,KAAK,CAAC,IAAIhB,MAAM,CAACI,OAAO,CAAC1C,MAAM,CAACoD,OAAO,CAAC,EAAE;QACzD;QACAZ,OAAO,CAACC,GAAG,CAAC,OAAOY,GAAG,KAAKC,KAAK,EAAE,CAAC;MACrC;IACF;IACA,IAAItD,MAAM,CAACuD,KAAK,EAAEC,QAAQ,IAAIxD,MAAM,CAACuD,KAAK,EAAEE,YAAY,EAAE;MACxD,MAAMC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE;MAC1B,IAAI1D,MAAM,CAACuD,KAAK,CAACC,QAAQ,EAAE;QACzBE,KAAK,CAAC5B,IAAI,CAAC,sBAAsB,CAAC;QAClC,MAAM6B,YAAY,GAAGtF,kBAAkB,CAAC0B,IAAI,EAAEC,MAAM,CAAC;QACrD,IAAI2D,YAAY,EAAEC,YAAY,EAAEF,KAAK,CAAC5B,IAAI,CAAC,0BAA0B,CAAC;MACxE;MACA,IAAI9B,MAAM,CAACuD,KAAK,CAACE,YAAY,EAC3BC,KAAK,CAAC5B,IAAI,CAAC,iBAAiB9B,MAAM,CAACuD,KAAK,CAACE,YAAY,EAAE,CAAC;MAC1D;MACAjB,OAAO,CAACC,GAAG,CAAC,YAAYiB,KAAK,CAACR,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC;IAC7C;EACF,CAAC,MAAM,IAAIlD,MAAM,CAACG,IAAI,KAAK,MAAM,EAAE;IACjC;IACAqC,OAAO,CAACC,GAAG,CAAC,cAAc,CAAC;IAC3B;IACAD,OAAO,CAACC,GAAG,CAAC,UAAUzC,MAAM,CAAC8C,GAAG,EAAE,CAAC;IACnC,IAAI9C,MAAM,CAACoD,OAAO,EAAE;MAClB;MACAZ,OAAO,CAACC,GAAG,CAAC,YAAY,CAAC;MACzB,KAAK,MAAM,CAACY,GAAG,EAAEC,KAAK,CAAC,IAAIhB,MAAM,CAACI,OAAO,CAAC1C,MAAM,CAACoD,OAAO,CAAC,EAAE;QACzD;QACAZ,OAAO,CAACC,GAAG,CAAC,OAAOY,GAAG,KAAKC,KAAK,EAAE,CAAC;MACrC;IACF;IACA,IAAItD,MAAM,CAACuD,KAAK,EAAEC,QAAQ,IAAIxD,MAAM,CAACuD,KAAK,EAAEE,YAAY,EAAE;MACxD,MAAMC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE;MAC1B,IAAI1D,MAAM,CAACuD,KAAK,CAACC,QAAQ,EAAE;QACzBE,KAAK,CAAC5B,IAAI,CAAC,sBAAsB,CAAC;QAClC,MAAM6B,YAAY,GAAGtF,kBAAkB,CAAC0B,IAAI,EAAEC,MAAM,CAAC;QACrD,IAAI2D,YAAY,EAAEC,YAAY,EAAEF,KAAK,CAAC5B,IAAI,CAAC,0BAA0B,CAAC;MACxE;MACA,IAAI9B,MAAM,CAACuD,KAAK,CAACE,YAAY,EAC3BC,KAAK,CAAC5B,IAAI,CAAC,iBAAiB9B,MAAM,CAACuD,KAAK,CAACE,YAAY,EAAE,CAAC;MAC1D;MACAjB,OAAO,CAACC,GAAG,CAAC,YAAYiB,KAAK,CAACR,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC;IAC7C;EACF,CAAC,MAAM,IAAIlD,MAAM,CAACG,IAAI,KAAK,OAAO,EAAE;IAClC;IACAqC,OAAO,CAACC,GAAG,CAAC,eAAe,CAAC;IAC5B;IACAD,OAAO,CAACC,GAAG,CAAC,cAAczC,MAAM,CAACiD,OAAO,EAAE,CAAC;IAC3C,MAAMF,IAAI,GAAGpB,KAAK,CAACqB,OAAO,CAAChD,MAAM,CAAC+C,IAAI,CAAC,GAAG/C,MAAM,CAAC+C,IAAI,GAAG,EAAE;IAC1D;IACAP,OAAO,CAACC,GAAG,CAAC,WAAWM,IAAI,CAACG,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC;IACxC,IAAIlD,MAAM,CAAC6D,GAAG,EAAE;MACd;MACArB,OAAO,CAACC,GAAG,CAAC,gBAAgB,CAAC;MAC7B,KAAK,MAAM,CAACY,GAAG,EAAEC,KAAK,CAAC,IAAIhB,MAAM,CAACI,OAAO,CAAC1C,MAAM,CAAC6D,GAAG,CAAC,EAAE;QACrD;QACArB,OAAO,CAACC,GAAG,CAAC,OAAOY,GAAG,IAAIC,KAAK,EAAE,CAAC;MACpC;IACF;EACF;EACA;EACAd,OAAO,CAACC,GAAG,CACT,oDAAoD1C,IAAI,QAAQC,MAAM,CAACe,KAAK,EAC9E,CAAC;EACD;EACA;EACA,MAAMtB,gBAAgB,CAAC,CAAC,CAAC;AAC3B;;AAEA;AACA,OAAO,eAAeqE,iBAAiBA,CACrC/D,IAAI,EAAE,MAAM,EACZgE,IAAI,EAAE,MAAM,EACZjD,OAAO,EAAE;EAAEC,KAAK,CAAC,EAAE,MAAM;EAAE6C,YAAY,CAAC,EAAE,IAAI;AAAC,CAAC,CACjD,EAAE3D,OAAO,CAAC,IAAI,CAAC,CAAC;EACf,IAAI;IACF,MAAMc,KAAK,GAAG7B,iBAAiB,CAAC4B,OAAO,CAACC,KAAK,CAAC;IAC9C,MAAMiD,UAAU,GAAGtE,aAAa,CAACqE,IAAI,CAAC;;IAEtC;IACA,MAAME,WAAW,GACfnD,OAAO,CAAC8C,YAAY,IACpBI,UAAU,IACV,OAAOA,UAAU,KAAK,QAAQ,IAC9B,MAAM,IAAIA,UAAU,KACnBA,UAAU,CAAC7D,IAAI,KAAK,KAAK,IAAI6D,UAAU,CAAC7D,IAAI,KAAK,MAAM,CAAC,IACzD,KAAK,IAAI6D,UAAU,IACnB,OAAOA,UAAU,CAAClB,GAAG,KAAK,QAAQ,IAClC,OAAO,IAAIkB,UAAU,IACrBA,UAAU,CAACT,KAAK,IAChB,OAAOS,UAAU,CAACT,KAAK,KAAK,QAAQ,IACpC,UAAU,IAAIS,UAAU,CAACT,KAAK;IAChC,MAAMK,YAAY,GAAGK,WAAW,GAAG,MAAM3F,gBAAgB,CAAC,CAAC,GAAGqC,SAAS;IAEvE,MAAMjC,YAAY,CAACqB,IAAI,EAAEiE,UAAU,EAAEjD,KAAK,CAAC;IAE3C,MAAMmD,aAAa,GACjBF,UAAU,IAAI,OAAOA,UAAU,KAAK,QAAQ,IAAI,MAAM,IAAIA,UAAU,GAChEG,MAAM,CAACH,UAAU,CAAC7D,IAAI,IAAI,OAAO,CAAC,GAClC,OAAO;IAEb,IACEyD,YAAY,IACZI,UAAU,IACV,OAAOA,UAAU,KAAK,QAAQ,IAC9B,MAAM,IAAIA,UAAU,KACnBA,UAAU,CAAC7D,IAAI,KAAK,KAAK,IAAI6D,UAAU,CAAC7D,IAAI,KAAK,MAAM,CAAC,IACzD,KAAK,IAAI6D,UAAU,IACnB,OAAOA,UAAU,CAAClB,GAAG,KAAK,QAAQ,EAClC;MACAvE,mBAAmB,CACjBwB,IAAI,EACJ;QAAEI,IAAI,EAAE6D,UAAU,CAAC7D,IAAI;QAAE2C,GAAG,EAAEkB,UAAU,CAAClB;MAAI,CAAC,EAC9Cc,YACF,CAAC;IACH;IAEA1F,QAAQ,CAAC,eAAe,EAAE;MACxB6C,KAAK,EACHA,KAAK,IAAI9C,0DAA0D;MACrEmG,MAAM,EACJ,MAAM,IAAInG,0DAA0D;MACtEkC,IAAI,EAAE+D,aAAa,IAAIjG;IACzB,CAAC,CAAC;IAEF4B,KAAK,CAAC,SAASqE,aAAa,eAAenE,IAAI,OAAOgB,KAAK,SAAS,CAAC;EACvE,CAAC,CAAC,OAAON,KAAK,EAAE;IACdb,QAAQ,CAAC,CAACa,KAAK,IAAIyB,KAAK,EAAEC,OAAO,CAAC;EACpC;AACF;;AAEA;AACA,OAAO,eAAekC,wBAAwBA,CAACvD,OAAO,EAAE;EACtDC,KAAK,CAAC,EAAE,MAAM;AAChB,CAAC,CAAC,EAAEd,OAAO,CAAC,IAAI,CAAC,CAAC;EAChB,IAAI;IACF,MAAMc,KAAK,GAAG7B,iBAAiB,CAAC4B,OAAO,CAACC,KAAK,CAAC;IAC9C,MAAMuD,QAAQ,GAAG3E,WAAW,CAAC,CAAC;IAE9BzB,QAAQ,CAAC,eAAe,EAAE;MACxB6C,KAAK,EACHA,KAAK,IAAI9C,0DAA0D;MACrEqG,QAAQ,EACNA,QAAQ,IAAIrG,0DAA0D;MACxEmG,MAAM,EACJ,SAAS,IAAInG;IACjB,CAAC,CAAC;IAEF,MAAM;MAAEsG;IAA4B,CAAC,GAAG,MAAM,MAAM,CAClD,8BACF,CAAC;IACD,MAAMhD,OAAO,GAAG,MAAMgD,2BAA2B,CAAC,CAAC;IAEnD,IAAIjC,MAAM,CAACC,IAAI,CAAChB,OAAO,CAAC,CAACQ,MAAM,KAAK,CAAC,EAAE;MACrClC,KAAK,CACH,4FACF,CAAC;IACH;IAEA,MAAM;MAAE2E;IAAQ,CAAC,GAAG,MAAMzG,MAAM,CAC9B,CAAC,gBAAgB;AACvB,QAAQ,CAAC,eAAe;AACxB,UAAU,CAAC,4BAA4B,CAC3B,OAAO,CAAC,CAACwD,OAAO,CAAC,CACjB,KAAK,CAAC,CAACR,KAAK,CAAC,CACb,MAAM,CAAC,CAAC,MAAM;UACZyD,OAAO,CAAC,CAAC;QACX,CAAC,CAAC;AAEd,QAAQ,EAAE,eAAe;AACzB,MAAM,EAAE,gBAAgB,CAAC,EACnB;MAAEC,WAAW,EAAE;IAAK,CACtB,CAAC;EACH,CAAC,CAAC,OAAOhE,KAAK,EAAE;IACdb,QAAQ,CAAC,CAACa,KAAK,IAAIyB,KAAK,EAAEC,OAAO,CAAC;EACpC;AACF;;AAEA;AACA,OAAO,eAAeuC,sBAAsBA,CAAA,CAAE,EAAEzE,OAAO,CAAC,IAAI,CAAC,CAAC;EAC5D/B,QAAQ,CAAC,iCAAiC,EAAE,CAAC,CAAC,CAAC;EAC/CqB,wBAAwB,CAACoF,OAAO,KAAK;IACnC,GAAGA,OAAO;IACVC,qBAAqB,EAAE,EAAE;IACzBC,sBAAsB,EAAE,EAAE;IAC1BC,0BAA0B,EAAE;EAC9B,CAAC,CAAC,CAAC;EACHjF,KAAK,CACH,mFAAmF,GACjF,oEACJ,CAAC;AACH","ignoreList":[]} \ No newline at end of file +//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"names":["stat","pMap","cwd","React","MCPServerDesktopImportDialog","render","KeybindingSetup","AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS","logEvent","clearMcpClientConfig","clearServerTokensFromLocalStorage","getMcpClientConfig","readClientSecret","saveMcpClientSecret","connectToServer","getMcpServerConnectionBatchSize","addMcpConfig","getAllMcpConfigs","getMcpConfigByName","getMcpConfigsByScope","removeMcpConfig","ConfigScope","ScopedMcpServerConfig","describeMcpConfigFilePath","ensureConfigScope","getScopeLabel","AppStateProvider","getCurrentProjectConfig","getGlobalConfig","saveCurrentProjectConfig","isFsInaccessible","gracefulShutdown","safeParseJSON","getPlatform","cliError","cliOk","checkMcpServerHealth","name","server","Promise","result","type","_error","mcpServeHandler","debug","verbose","providedCwd","error","setup","undefined","startMCPServer","mcpRemoveHandler","options","scope","serverBeforeRemoval","cleanupSecureStorage","process","stdout","write","projectConfig","globalConfig","servers","projectServers","mcpJsonExists","scopes","Array","Exclude","mcpServers","push","length","stderr","forEach","Error","message","mcpListHandler","configs","Object","keys","console","log","entries","results","status","concurrency","url","args","isArray","command","join","mcpGetHandler","headers","key","value","oauth","clientId","callbackPort","parts","clientConfig","clientSecret","env","mcpAddJsonHandler","json","parsedJson","needsSecret","transportType","String","source","mcpAddFromDesktopHandler","platform","readClaudeDesktopMcpServers","unmount","exitOnCtrlC","mcpResetChoicesHandler","current","enabledMcpjsonServers","disabledMcpjsonServers","enableAllProjectMcpServers"],"sources":["mcp.tsx"],"sourcesContent":["/**\n * MCP subcommand handlers — extracted from main.tsx for lazy loading.\n * These are dynamically imported only when the corresponding `claude mcp *` command runs.\n */\n\nimport { stat } from 'fs/promises'\nimport pMap from 'p-map'\nimport { cwd } from 'process'\nimport React from 'react'\nimport { MCPServerDesktopImportDialog } from '../../components/MCPServerDesktopImportDialog.js'\nimport { render } from '../../ink.js'\nimport { KeybindingSetup } from '../../keybindings/KeybindingProviderSetup.js'\nimport {\n  type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  logEvent,\n} from '../../services/analytics/index.js'\nimport {\n  clearMcpClientConfig,\n  clearServerTokensFromLocalStorage,\n  getMcpClientConfig,\n  readClientSecret,\n  saveMcpClientSecret,\n} from '../../services/mcp/auth.js'\nimport {\n  connectToServer,\n  getMcpServerConnectionBatchSize,\n} from '../../services/mcp/client.js'\nimport {\n  addMcpConfig,\n  getAllMcpConfigs,\n  getMcpConfigByName,\n  getMcpConfigsByScope,\n  removeMcpConfig,\n} from '../../services/mcp/config.js'\nimport type {\n  ConfigScope,\n  ScopedMcpServerConfig,\n} from '../../services/mcp/types.js'\nimport {\n  describeMcpConfigFilePath,\n  ensureConfigScope,\n  getScopeLabel,\n} from '../../services/mcp/utils.js'\nimport { AppStateProvider } from '../../state/AppState.js'\nimport {\n  getCurrentProjectConfig,\n  getGlobalConfig,\n  saveCurrentProjectConfig,\n} from '../../utils/config.js'\nimport { isFsInaccessible } from '../../utils/errors.js'\nimport { gracefulShutdown } from '../../utils/gracefulShutdown.js'\nimport { safeParseJSON } from '../../utils/json.js'\nimport { getPlatform } from '../../utils/platform.js'\nimport { cliError, cliOk } from '../exit.js'\n\nasync function checkMcpServerHealth(\n  name: string,\n  server: ScopedMcpServerConfig,\n): Promise<string> {\n  try {\n    const result = await connectToServer(name, server)\n    if (result.type === 'connected') {\n      return '✓ Connected'\n    } else if (result.type === 'needs-auth') {\n      return '! Needs authentication'\n    } else {\n      return '✗ Failed to connect'\n    }\n  } catch (_error) {\n    return '✗ Connection error'\n  }\n}\n\n// mcp serve (lines 4512–4532)\nexport async function mcpServeHandler({\n  debug,\n  verbose,\n}: {\n  debug?: boolean\n  verbose?: boolean\n}): Promise<void> {\n  const providedCwd = cwd()\n  logEvent('tengu_mcp_start', {})\n\n  try {\n    await stat(providedCwd)\n  } catch (error) {\n    if (isFsInaccessible(error)) {\n      cliError(`Error: Directory ${providedCwd} does not exist`)\n    }\n    throw error\n  }\n\n  try {\n    const { setup } = await import('../../setup.js')\n    await setup(providedCwd, 'default', false, false, undefined, false)\n    const { startMCPServer } = await import('../../entrypoints/mcp.js')\n    await startMCPServer(providedCwd, debug ?? false, verbose ?? false)\n  } catch (error) {\n    cliError(`Error: Failed to start MCP server: ${error}`)\n  }\n}\n\n// mcp remove (lines 4545–4635)\nexport async function mcpRemoveHandler(\n  name: string,\n  options: { scope?: string },\n): Promise<void> {\n  // Look up config before removing so we can clean up secure storage\n  const serverBeforeRemoval = getMcpConfigByName(name)\n\n  const cleanupSecureStorage = () => {\n    if (\n      serverBeforeRemoval &&\n      (serverBeforeRemoval.type === 'sse' ||\n        serverBeforeRemoval.type === 'http')\n    ) {\n      clearServerTokensFromLocalStorage(name, serverBeforeRemoval)\n      clearMcpClientConfig(name, serverBeforeRemoval)\n    }\n  }\n\n  try {\n    if (options.scope) {\n      const scope = ensureConfigScope(options.scope)\n      logEvent('tengu_mcp_delete', {\n        name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n        scope:\n          scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      })\n\n      await removeMcpConfig(name, scope)\n      cleanupSecureStorage()\n      process.stdout.write(`Removed MCP server ${name} from ${scope} config\\n`)\n      cliOk(`File modified: ${describeMcpConfigFilePath(scope)}`)\n    }\n\n    // If no scope specified, check where the server exists\n    const projectConfig = getCurrentProjectConfig()\n    const globalConfig = getGlobalConfig()\n\n    // Check if server exists in project scope (.mcp.json)\n    const { servers: projectServers } = getMcpConfigsByScope('project')\n    const mcpJsonExists = !!projectServers[name]\n\n    // Count how many scopes contain this server\n    const scopes: Array<Exclude<ConfigScope, 'dynamic'>> = []\n    if (projectConfig.mcpServers?.[name]) scopes.push('local')\n    if (mcpJsonExists) scopes.push('project')\n    if (globalConfig.mcpServers?.[name]) scopes.push('user')\n\n    if (scopes.length === 0) {\n      cliError(`No MCP server found with name: \"${name}\"`)\n    } else if (scopes.length === 1) {\n      // Server exists in only one scope, remove it\n      const scope = scopes[0]!\n      logEvent('tengu_mcp_delete', {\n        name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n        scope:\n          scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      })\n\n      await removeMcpConfig(name, scope)\n      cleanupSecureStorage()\n      process.stdout.write(\n        `Removed MCP server \"${name}\" from ${scope} config\\n`,\n      )\n      cliOk(`File modified: ${describeMcpConfigFilePath(scope)}`)\n    } else {\n      // Server exists in multiple scopes\n      process.stderr.write(`MCP server \"${name}\" exists in multiple scopes:\\n`)\n      scopes.forEach(scope => {\n        process.stderr.write(\n          `  - ${getScopeLabel(scope)} (${describeMcpConfigFilePath(scope)})\\n`,\n        )\n      })\n      process.stderr.write('\\nTo remove from a specific scope, use:\\n')\n      scopes.forEach(scope => {\n        process.stderr.write(`  claude mcp remove \"${name}\" -s ${scope}\\n`)\n      })\n      cliError()\n    }\n  } catch (error) {\n    cliError((error as Error).message)\n  }\n}\n\n// mcp list (lines 4641–4688)\nexport async function mcpListHandler(): Promise<void> {\n  logEvent('tengu_mcp_list', {})\n  const { servers: configs } = await getAllMcpConfigs()\n  if (Object.keys(configs).length === 0) {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(\n      'No MCP servers configured. Use `claude mcp add` to add a server.',\n    )\n  } else {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log('Checking MCP server health...\\n')\n\n    // Check servers concurrently\n    const entries = Object.entries(configs)\n    const results = await pMap(\n      entries,\n      async ([name, server]) => ({\n        name,\n        server,\n        status: await checkMcpServerHealth(name, server),\n      }),\n      { concurrency: getMcpServerConnectionBatchSize() },\n    )\n\n    for (const { name, server, status } of results) {\n      // Intentionally excluding sse-ide servers here since they're internal\n      if (server.type === 'sse') {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`${name}: ${server.url} (SSE) - ${status}`)\n      } else if (server.type === 'http') {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`${name}: ${server.url} (HTTP) - ${status}`)\n      } else if (server.type === 'claudeai-proxy') {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`${name}: ${server.url} - ${status}`)\n      } else if (!server.type || server.type === 'stdio') {\n        const args = Array.isArray(server.args) ? server.args : []\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`${name}: ${server.command} ${args.join(' ')} - ${status}`)\n      }\n    }\n  }\n  // Use gracefulShutdown to properly clean up MCP server connections\n  // (process.exit bypasses cleanup handlers, leaving child processes orphaned)\n  await gracefulShutdown(0)\n}\n\n// mcp get (lines 4694–4786)\nexport async function mcpGetHandler(name: string): Promise<void> {\n  logEvent('tengu_mcp_get', {\n    name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  })\n  const server = getMcpConfigByName(name)\n  if (!server) {\n    cliError(`No MCP server found with name: ${name}`)\n  }\n\n  // biome-ignore lint/suspicious/noConsole:: intentional console output\n  console.log(`${name}:`)\n  // biome-ignore lint/suspicious/noConsole:: intentional console output\n  console.log(`  Scope: ${getScopeLabel(server.scope)}`)\n\n  // Check server health\n  const status = await checkMcpServerHealth(name, server)\n  // biome-ignore lint/suspicious/noConsole:: intentional console output\n  console.log(`  Status: ${status}`)\n\n  // Intentionally excluding sse-ide servers here since they're internal\n  if (server.type === 'sse') {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Type: sse`)\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  URL: ${server.url}`)\n    if (server.headers) {\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log('  Headers:')\n      for (const [key, value] of Object.entries(server.headers)) {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`    ${key}: ${value}`)\n      }\n    }\n    if (server.oauth?.clientId || server.oauth?.callbackPort) {\n      const parts: string[] = []\n      if (server.oauth.clientId) {\n        parts.push('client_id configured')\n        const clientConfig = getMcpClientConfig(name, server)\n        if (clientConfig?.clientSecret) parts.push('client_secret configured')\n      }\n      if (server.oauth.callbackPort)\n        parts.push(`callback_port ${server.oauth.callbackPort}`)\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log(`  OAuth: ${parts.join(', ')}`)\n    }\n  } else if (server.type === 'http') {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Type: http`)\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  URL: ${server.url}`)\n    if (server.headers) {\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log('  Headers:')\n      for (const [key, value] of Object.entries(server.headers)) {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`    ${key}: ${value}`)\n      }\n    }\n    if (server.oauth?.clientId || server.oauth?.callbackPort) {\n      const parts: string[] = []\n      if (server.oauth.clientId) {\n        parts.push('client_id configured')\n        const clientConfig = getMcpClientConfig(name, server)\n        if (clientConfig?.clientSecret) parts.push('client_secret configured')\n      }\n      if (server.oauth.callbackPort)\n        parts.push(`callback_port ${server.oauth.callbackPort}`)\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log(`  OAuth: ${parts.join(', ')}`)\n    }\n  } else if (server.type === 'stdio') {\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Type: stdio`)\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Command: ${server.command}`)\n    const args = Array.isArray(server.args) ? server.args : []\n    // biome-ignore lint/suspicious/noConsole:: intentional console output\n    console.log(`  Args: ${args.join(' ')}`)\n    if (server.env) {\n      // biome-ignore lint/suspicious/noConsole:: intentional console output\n      console.log('  Environment:')\n      for (const [key, value] of Object.entries(server.env)) {\n        // biome-ignore lint/suspicious/noConsole:: intentional console output\n        console.log(`    ${key}=${value}`)\n      }\n    }\n  }\n  // biome-ignore lint/suspicious/noConsole:: intentional console output\n  console.log(\n    `\\nTo remove this server, run: claude mcp remove \"${name}\" -s ${server.scope}`,\n  )\n  // Use gracefulShutdown to properly clean up MCP server connections\n  // (process.exit bypasses cleanup handlers, leaving child processes orphaned)\n  await gracefulShutdown(0)\n}\n\n// mcp add-json (lines 4801–4870)\nexport async function mcpAddJsonHandler(\n  name: string,\n  json: string,\n  options: { scope?: string; clientSecret?: true },\n): Promise<void> {\n  try {\n    const scope = ensureConfigScope(options.scope)\n    const parsedJson = safeParseJSON(json)\n\n    // Read secret before writing config so cancellation doesn't leave partial state\n    const needsSecret =\n      options.clientSecret &&\n      parsedJson &&\n      typeof parsedJson === 'object' &&\n      'type' in parsedJson &&\n      (parsedJson.type === 'sse' || parsedJson.type === 'http') &&\n      'url' in parsedJson &&\n      typeof parsedJson.url === 'string' &&\n      'oauth' in parsedJson &&\n      parsedJson.oauth &&\n      typeof parsedJson.oauth === 'object' &&\n      'clientId' in parsedJson.oauth\n    const clientSecret = needsSecret ? await readClientSecret() : undefined\n\n    await addMcpConfig(name, parsedJson, scope)\n\n    const transportType =\n      parsedJson && typeof parsedJson === 'object' && 'type' in parsedJson\n        ? String(parsedJson.type || 'stdio')\n        : 'stdio'\n\n    if (\n      clientSecret &&\n      parsedJson &&\n      typeof parsedJson === 'object' &&\n      'type' in parsedJson &&\n      (parsedJson.type === 'sse' || parsedJson.type === 'http') &&\n      'url' in parsedJson &&\n      typeof parsedJson.url === 'string'\n    ) {\n      saveMcpClientSecret(\n        name,\n        { type: parsedJson.type, url: parsedJson.url },\n        clientSecret,\n      )\n    }\n\n    logEvent('tengu_mcp_add', {\n      scope:\n        scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      source:\n        'json' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      type: transportType as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n    })\n\n    cliOk(`Added ${transportType} MCP server ${name} to ${scope} config`)\n  } catch (error) {\n    cliError((error as Error).message)\n  }\n}\n\n// mcp add-from-claude-desktop (lines 4881–4927)\nexport async function mcpAddFromDesktopHandler(options: {\n  scope?: string\n}): Promise<void> {\n  try {\n    const scope = ensureConfigScope(options.scope)\n    const platform = getPlatform()\n\n    logEvent('tengu_mcp_add', {\n      scope:\n        scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      platform:\n        platform as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n      source:\n        'desktop' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n    })\n\n    const { readClaudeDesktopMcpServers } = await import(\n      '../../utils/claudeDesktop.js'\n    )\n    const servers = await readClaudeDesktopMcpServers()\n\n    if (Object.keys(servers).length === 0) {\n      cliOk(\n        'No MCP servers found in Claude Desktop configuration or configuration file does not exist.',\n      )\n    }\n\n    const { unmount } = await render(\n      <AppStateProvider>\n        <KeybindingSetup>\n          <MCPServerDesktopImportDialog\n            servers={servers}\n            scope={scope}\n            onDone={() => {\n              unmount()\n            }}\n          />\n        </KeybindingSetup>\n      </AppStateProvider>,\n      { exitOnCtrlC: true },\n    )\n  } catch (error) {\n    cliError((error as Error).message)\n  }\n}\n\n// mcp reset-project-choices (lines 4935–4952)\nexport async function mcpResetChoicesHandler(): Promise<void> {\n  logEvent('tengu_mcp_reset_mcpjson_choices', {})\n  saveCurrentProjectConfig(current => ({\n    ...current,\n    enabledMcpjsonServers: [],\n    disabledMcpjsonServers: [],\n    enableAllProjectMcpServers: false,\n  }))\n  cliOk(\n    'All project-scoped (.mcp.json) server approvals and rejections have been reset.\\n' +\n      'You will be prompted for approval next time you start Claude Code.',\n  )\n}\n"],"mappings":"AAAA;AACA;AACA;AACA;;AAEA,SAASA,IAAI,QAAQ,aAAa;AAClC,OAAOC,IAAI,MAAM,OAAO;AACxB,SAASC,GAAG,QAAQ,SAAS;AAC7B,OAAOC,KAAK,MAAM,OAAO;AACzB,SAASC,4BAA4B,QAAQ,kDAAkD;AAC/F,SAASC,MAAM,QAAQ,cAAc;AACrC,SAASC,eAAe,QAAQ,8CAA8C;AAC9E,SACE,KAAKC,0DAA0D,EAC/DC,QAAQ,QACH,mCAAmC;AAC1C,SACEC,oBAAoB,EACpBC,iCAAiC,EACjCC,kBAAkB,EAClBC,gBAAgB,EAChBC,mBAAmB,QACd,4BAA4B;AACnC,SACEC,eAAe,EACfC,+BAA+B,QAC1B,8BAA8B;AACrC,SACEC,YAAY,EACZC,gBAAgB,EAChBC,kBAAkB,EAClBC,oBAAoB,EACpBC,eAAe,QACV,8BAA8B;AACrC,cACEC,WAAW,EACXC,qBAAqB,QAChB,6BAA6B;AACpC,SACEC,yBAAyB,EACzBC,iBAAiB,EACjBC,aAAa,QACR,6BAA6B;AACpC,SAASC,gBAAgB,QAAQ,yBAAyB;AAC1D,SACEC,uBAAuB,EACvBC,eAAe,EACfC,wBAAwB,QACnB,uBAAuB;AAC9B,SAASC,gBAAgB,QAAQ,uBAAuB;AACxD,SAASC,gBAAgB,QAAQ,iCAAiC;AAClE,SAASC,aAAa,QAAQ,qBAAqB;AACnD,SAASC,WAAW,QAAQ,yBAAyB;AACrD,SAASC,QAAQ,EAAEC,KAAK,QAAQ,YAAY;AAE5C,eAAeC,oBAAoBA,CACjCC,IAAI,EAAE,MAAM,EACZC,MAAM,EAAEhB,qBAAqB,CAC9B,EAAEiB,OAAO,CAAC,MAAM,CAAC,CAAC;EACjB,IAAI;IACF,MAAMC,MAAM,GAAG,MAAM1B,eAAe,CAACuB,IAAI,EAAEC,MAAM,CAAC;IAClD,IAAIE,MAAM,CAACC,IAAI,KAAK,WAAW,EAAE;MAC/B,OAAO,aAAa;IACtB,CAAC,MAAM,IAAID,MAAM,CAACC,IAAI,KAAK,YAAY,EAAE;MACvC,OAAO,wBAAwB;IACjC,CAAC,MAAM;MACL,OAAO,qBAAqB;IAC9B;EACF,CAAC,CAAC,OAAOC,MAAM,EAAE;IACf,OAAO,oBAAoB;EAC7B;AACF;;AAEA;AACA,OAAO,eAAeC,eAAeA,CAAC;EACpCC,KAAK;EACLC;AAIF,CAHC,EAAE;EACDD,KAAK,CAAC,EAAE,OAAO;EACfC,OAAO,CAAC,EAAE,OAAO;AACnB,CAAC,CAAC,EAAEN,OAAO,CAAC,IAAI,CAAC,CAAC;EAChB,MAAMO,WAAW,GAAG5C,GAAG,CAAC,CAAC;EACzBM,QAAQ,CAAC,iBAAiB,EAAE,CAAC,CAAC,CAAC;EAE/B,IAAI;IACF,MAAMR,IAAI,CAAC8C,WAAW,CAAC;EACzB,CAAC,CAAC,OAAOC,KAAK,EAAE;IACd,IAAIjB,gBAAgB,CAACiB,KAAK,CAAC,EAAE;MAC3Bb,QAAQ,CAAC,oBAAoBY,WAAW,iBAAiB,CAAC;IAC5D;IACA,MAAMC,KAAK;EACb;EAEA,IAAI;IACF,MAAM;MAAEC;IAAM,CAAC,GAAG,MAAM,MAAM,CAAC,gBAAgB,CAAC;IAChD,MAAMA,KAAK,CAACF,WAAW,EAAE,SAAS,EAAE,KAAK,EAAE,KAAK,EAAEG,SAAS,EAAE,KAAK,CAAC;IACnE,MAAM;MAAEC;IAAe,CAAC,GAAG,MAAM,MAAM,CAAC,0BAA0B,CAAC;IACnE,MAAMA,cAAc,CAACJ,WAAW,EAAEF,KAAK,IAAI,KAAK,EAAEC,OAAO,IAAI,KAAK,CAAC;EACrE,CAAC,CAAC,OAAOE,KAAK,EAAE;IACdb,QAAQ,CAAC,sCAAsCa,KAAK,EAAE,CAAC;EACzD;AACF;;AAEA;AACA,OAAO,eAAeI,gBAAgBA,CACpCd,IAAI,EAAE,MAAM,EACZe,OAAO,EAAE;EAAEC,KAAK,CAAC,EAAE,MAAM;AAAC,CAAC,CAC5B,EAAEd,OAAO,CAAC,IAAI,CAAC,CAAC;EACf;EACA,MAAMe,mBAAmB,GAAGpC,kBAAkB,CAACmB,IAAI,CAAC;EAEpD,MAAMkB,oBAAoB,GAAGA,CAAA,KAAM;IACjC,IACED,mBAAmB,KAClBA,mBAAmB,CAACb,IAAI,KAAK,KAAK,IACjCa,mBAAmB,CAACb,IAAI,KAAK,MAAM,CAAC,EACtC;MACA/B,iCAAiC,CAAC2B,IAAI,EAAEiB,mBAAmB,CAAC;MAC5D7C,oBAAoB,CAAC4B,IAAI,EAAEiB,mBAAmB,CAAC;IACjD;EACF,CAAC;EAED,IAAI;IACF,IAAIF,OAAO,CAACC,KAAK,EAAE;MACjB,MAAMA,KAAK,GAAG7B,iBAAiB,CAAC4B,OAAO,CAACC,KAAK,CAAC;MAC9C7C,QAAQ,CAAC,kBAAkB,EAAE;QAC3B6B,IAAI,EAAEA,IAAI,IAAI9B,0DAA0D;QACxE8C,KAAK,EACHA,KAAK,IAAI9C;MACb,CAAC,CAAC;MAEF,MAAMa,eAAe,CAACiB,IAAI,EAAEgB,KAAK,CAAC;MAClCE,oBAAoB,CAAC,CAAC;MACtBC,OAAO,CAACC,MAAM,CAACC,KAAK,CAAC,sBAAsBrB,IAAI,SAASgB,KAAK,WAAW,CAAC;MACzElB,KAAK,CAAC,kBAAkBZ,yBAAyB,CAAC8B,KAAK,CAAC,EAAE,CAAC;IAC7D;;IAEA;IACA,MAAMM,aAAa,GAAGhC,uBAAuB,CAAC,CAAC;IAC/C,MAAMiC,YAAY,GAAGhC,eAAe,CAAC,CAAC;;IAEtC;IACA,MAAM;MAAEiC,OAAO,EAAEC;IAAe,CAAC,GAAG3C,oBAAoB,CAAC,SAAS,CAAC;IACnE,MAAM4C,aAAa,GAAG,CAAC,CAACD,cAAc,CAACzB,IAAI,CAAC;;IAE5C;IACA,MAAM2B,MAAM,EAAEC,KAAK,CAACC,OAAO,CAAC7C,WAAW,EAAE,SAAS,CAAC,CAAC,GAAG,EAAE;IACzD,IAAIsC,aAAa,CAACQ,UAAU,GAAG9B,IAAI,CAAC,EAAE2B,MAAM,CAACI,IAAI,CAAC,OAAO,CAAC;IAC1D,IAAIL,aAAa,EAAEC,MAAM,CAACI,IAAI,CAAC,SAAS,CAAC;IACzC,IAAIR,YAAY,CAACO,UAAU,GAAG9B,IAAI,CAAC,EAAE2B,MAAM,CAACI,IAAI,CAAC,MAAM,CAAC;IAExD,IAAIJ,MAAM,CAACK,MAAM,KAAK,CAAC,EAAE;MACvBnC,QAAQ,CAAC,mCAAmCG,IAAI,GAAG,CAAC;IACtD,CAAC,MAAM,IAAI2B,MAAM,CAACK,MAAM,KAAK,CAAC,EAAE;MAC9B;MACA,MAAMhB,KAAK,GAAGW,MAAM,CAAC,CAAC,CAAC,CAAC;MACxBxD,QAAQ,CAAC,kBAAkB,EAAE;QAC3B6B,IAAI,EAAEA,IAAI,IAAI9B,0DAA0D;QACxE8C,KAAK,EACHA,KAAK,IAAI9C;MACb,CAAC,CAAC;MAEF,MAAMa,eAAe,CAACiB,IAAI,EAAEgB,KAAK,CAAC;MAClCE,oBAAoB,CAAC,CAAC;MACtBC,OAAO,CAACC,MAAM,CAACC,KAAK,CAClB,uBAAuBrB,IAAI,UAAUgB,KAAK,WAC5C,CAAC;MACDlB,KAAK,CAAC,kBAAkBZ,yBAAyB,CAAC8B,KAAK,CAAC,EAAE,CAAC;IAC7D,CAAC,MAAM;MACL;MACAG,OAAO,CAACc,MAAM,CAACZ,KAAK,CAAC,eAAerB,IAAI,gCAAgC,CAAC;MACzE2B,MAAM,CAACO,OAAO,CAAClB,KAAK,IAAI;QACtBG,OAAO,CAACc,MAAM,CAACZ,KAAK,CAClB,OAAOjC,aAAa,CAAC4B,KAAK,CAAC,KAAK9B,yBAAyB,CAAC8B,KAAK,CAAC,KAClE,CAAC;MACH,CAAC,CAAC;MACFG,OAAO,CAACc,MAAM,CAACZ,KAAK,CAAC,2CAA2C,CAAC;MACjEM,MAAM,CAACO,OAAO,CAAClB,KAAK,IAAI;QACtBG,OAAO,CAACc,MAAM,CAACZ,KAAK,CAAC,wBAAwBrB,IAAI,QAAQgB,KAAK,IAAI,CAAC;MACrE,CAAC,CAAC;MACFnB,QAAQ,CAAC,CAAC;IACZ;EACF,CAAC,CAAC,OAAOa,KAAK,EAAE;IACdb,QAAQ,CAAC,CAACa,KAAK,IAAIyB,KAAK,EAAEC,OAAO,CAAC;EACpC;AACF;;AAEA;AACA,OAAO,eAAeC,cAAcA,CAAA,CAAE,EAAEnC,OAAO,CAAC,IAAI,CAAC,CAAC;EACpD/B,QAAQ,CAAC,gBAAgB,EAAE,CAAC,CAAC,CAAC;EAC9B,MAAM;IAAEqD,OAAO,EAAEc;EAAQ,CAAC,GAAG,MAAM1D,gBAAgB,CAAC,CAAC;EACrD,IAAI2D,MAAM,CAACC,IAAI,CAACF,OAAO,CAAC,CAACN,MAAM,KAAK,CAAC,EAAE;IACrC;IACAS,OAAO,CAACC,GAAG,CACT,kEACF,CAAC;EACH,CAAC,MAAM;IACL;IACAD,OAAO,CAACC,GAAG,CAAC,iCAAiC,CAAC;;IAE9C;IACA,MAAMC,OAAO,GAAGJ,MAAM,CAACI,OAAO,CAACL,OAAO,CAAC;IACvC,MAAMM,OAAO,GAAG,MAAMhF,IAAI,CACxB+E,OAAO,EACP,OAAO,CAAC3C,IAAI,EAAEC,MAAM,CAAC,MAAM;MACzBD,IAAI;MACJC,MAAM;MACN4C,MAAM,EAAE,MAAM9C,oBAAoB,CAACC,IAAI,EAAEC,MAAM;IACjD,CAAC,CAAC,EACF;MAAE6C,WAAW,EAAEpE,+BAA+B,CAAC;IAAE,CACnD,CAAC;IAED,KAAK,MAAM;MAAEsB,IAAI;MAAEC,MAAM;MAAE4C;IAAO,CAAC,IAAID,OAAO,EAAE;MAC9C;MACA,IAAI3C,MAAM,CAACG,IAAI,KAAK,KAAK,EAAE;QACzB;QACAqC,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,KAAKC,MAAM,CAAC8C,GAAG,YAAYF,MAAM,EAAE,CAAC;MACzD,CAAC,MAAM,IAAI5C,MAAM,CAACG,IAAI,KAAK,MAAM,EAAE;QACjC;QACAqC,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,KAAKC,MAAM,CAAC8C,GAAG,aAAaF,MAAM,EAAE,CAAC;MAC1D,CAAC,MAAM,IAAI5C,MAAM,CAACG,IAAI,KAAK,gBAAgB,EAAE;QAC3C;QACAqC,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,KAAKC,MAAM,CAAC8C,GAAG,MAAMF,MAAM,EAAE,CAAC;MACnD,CAAC,MAAM,IAAI,CAAC5C,MAAM,CAACG,IAAI,IAAIH,MAAM,CAACG,IAAI,KAAK,OAAO,EAAE;QAClD,MAAM4C,IAAI,GAAGpB,KAAK,CAACqB,OAAO,CAAChD,MAAM,CAAC+C,IAAI,CAAC,GAAG/C,MAAM,CAAC+C,IAAI,GAAG,EAAE;QAC1D;QACAP,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,KAAKC,MAAM,CAACiD,OAAO,IAAIF,IAAI,CAACG,IAAI,CAAC,GAAG,CAAC,MAAMN,MAAM,EAAE,CAAC;MACzE;IACF;EACF;EACA;EACA;EACA,MAAMnD,gBAAgB,CAAC,CAAC,CAAC;AAC3B;;AAEA;AACA,OAAO,eAAe0D,aAAaA,CAACpD,IAAI,EAAE,MAAM,CAAC,EAAEE,OAAO,CAAC,IAAI,CAAC,CAAC;EAC/D/B,QAAQ,CAAC,eAAe,EAAE;IACxB6B,IAAI,EAAEA,IAAI,IAAI9B;EAChB,CAAC,CAAC;EACF,MAAM+B,MAAM,GAAGpB,kBAAkB,CAACmB,IAAI,CAAC;EACvC,IAAI,CAACC,MAAM,EAAE;IACXJ,QAAQ,CAAC,kCAAkCG,IAAI,EAAE,CAAC;EACpD;;EAEA;EACAyC,OAAO,CAACC,GAAG,CAAC,GAAG1C,IAAI,GAAG,CAAC;EACvB;EACAyC,OAAO,CAACC,GAAG,CAAC,YAAYtD,aAAa,CAACa,MAAM,CAACe,KAAK,CAAC,EAAE,CAAC;;EAEtD;EACA,MAAM6B,MAAM,GAAG,MAAM9C,oBAAoB,CAACC,IAAI,EAAEC,MAAM,CAAC;EACvD;EACAwC,OAAO,CAACC,GAAG,CAAC,aAAaG,MAAM,EAAE,CAAC;;EAElC;EACA,IAAI5C,MAAM,CAACG,IAAI,KAAK,KAAK,EAAE;IACzB;IACAqC,OAAO,CAACC,GAAG,CAAC,aAAa,CAAC;IAC1B;IACAD,OAAO,CAACC,GAAG,CAAC,UAAUzC,MAAM,CAAC8C,GAAG,EAAE,CAAC;IACnC,IAAI9C,MAAM,CAACoD,OAAO,EAAE;MAClB;MACAZ,OAAO,CAACC,GAAG,CAAC,YAAY,CAAC;MACzB,KAAK,MAAM,CAACY,GAAG,EAAEC,KAAK,CAAC,IAAIhB,MAAM,CAACI,OAAO,CAAC1C,MAAM,CAACoD,OAAO,CAAC,EAAE;QACzD;QACAZ,OAAO,CAACC,GAAG,CAAC,OAAOY,GAAG,KAAKC,KAAK,EAAE,CAAC;MACrC;IACF;IACA,IAAItD,MAAM,CAACuD,KAAK,EAAEC,QAAQ,IAAIxD,MAAM,CAACuD,KAAK,EAAEE,YAAY,EAAE;MACxD,MAAMC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE;MAC1B,IAAI1D,MAAM,CAACuD,KAAK,CAACC,QAAQ,EAAE;QACzBE,KAAK,CAAC5B,IAAI,CAAC,sBAAsB,CAAC;QAClC,MAAM6B,YAAY,GAAGtF,kBAAkB,CAAC0B,IAAI,EAAEC,MAAM,CAAC;QACrD,IAAI2D,YAAY,EAAEC,YAAY,EAAEF,KAAK,CAAC5B,IAAI,CAAC,0BAA0B,CAAC;MACxE;MACA,IAAI9B,MAAM,CAACuD,KAAK,CAACE,YAAY,EAC3BC,KAAK,CAAC5B,IAAI,CAAC,iBAAiB9B,MAAM,CAACuD,KAAK,CAACE,YAAY,EAAE,CAAC;MAC1D;MACAjB,OAAO,CAACC,GAAG,CAAC,YAAYiB,KAAK,CAACR,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC;IAC7C;EACF,CAAC,MAAM,IAAIlD,MAAM,CAACG,IAAI,KAAK,MAAM,EAAE;IACjC;IACAqC,OAAO,CAACC,GAAG,CAAC,cAAc,CAAC;IAC3B;IACAD,OAAO,CAACC,GAAG,CAAC,UAAUzC,MAAM,CAAC8C,GAAG,EAAE,CAAC;IACnC,IAAI9C,MAAM,CAACoD,OAAO,EAAE;MAClB;MACAZ,OAAO,CAACC,GAAG,CAAC,YAAY,CAAC;MACzB,KAAK,MAAM,CAACY,GAAG,EAAEC,KAAK,CAAC,IAAIhB,MAAM,CAACI,OAAO,CAAC1C,MAAM,CAACoD,OAAO,CAAC,EAAE;QACzD;QACAZ,OAAO,CAACC,GAAG,CAAC,OAAOY,GAAG,KAAKC,KAAK,EAAE,CAAC;MACrC;IACF;IACA,IAAItD,MAAM,CAACuD,KAAK,EAAEC,QAAQ,IAAIxD,MAAM,CAACuD,KAAK,EAAEE,YAAY,EAAE;MACxD,MAAMC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE;MAC1B,IAAI1D,MAAM,CAACuD,KAAK,CAACC,QAAQ,EAAE;QACzBE,KAAK,CAAC5B,IAAI,CAAC,sBAAsB,CAAC;QAClC,MAAM6B,YAAY,GAAGtF,kBAAkB,CAAC0B,IAAI,EAAEC,MAAM,CAAC;QACrD,IAAI2D,YAAY,EAAEC,YAAY,EAAEF,KAAK,CAAC5B,IAAI,CAAC,0BAA0B,CAAC;MACxE;MACA,IAAI9B,MAAM,CAACuD,KAAK,CAACE,YAAY,EAC3BC,KAAK,CAAC5B,IAAI,CAAC,iBAAiB9B,MAAM,CAACuD,KAAK,CAACE,YAAY,EAAE,CAAC;MAC1D;MACAjB,OAAO,CAACC,GAAG,CAAC,YAAYiB,KAAK,CAACR,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC;IAC7C;EACF,CAAC,MAAM,IAAIlD,MAAM,CAACG,IAAI,KAAK,OAAO,EAAE;IAClC;IACAqC,OAAO,CAACC,GAAG,CAAC,eAAe,CAAC;IAC5B;IACAD,OAAO,CAACC,GAAG,CAAC,cAAczC,MAAM,CAACiD,OAAO,EAAE,CAAC;IAC3C,MAAMF,IAAI,GAAGpB,KAAK,CAACqB,OAAO,CAAChD,MAAM,CAAC+C,IAAI,CAAC,GAAG/C,MAAM,CAAC+C,IAAI,GAAG,EAAE;IAC1D;IACAP,OAAO,CAACC,GAAG,CAAC,WAAWM,IAAI,CAACG,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC;IACxC,IAAIlD,MAAM,CAAC6D,GAAG,EAAE;MACd;MACArB,OAAO,CAACC,GAAG,CAAC,gBAAgB,CAAC;MAC7B,KAAK,MAAM,CAACY,GAAG,EAAEC,KAAK,CAAC,IAAIhB,MAAM,CAACI,OAAO,CAAC1C,MAAM,CAAC6D,GAAG,CAAC,EAAE;QACrD;QACArB,OAAO,CAACC,GAAG,CAAC,OAAOY,GAAG,IAAIC,KAAK,EAAE,CAAC;MACpC;IACF;EACF;EACA;EACAd,OAAO,CAACC,GAAG,CACT,oDAAoD1C,IAAI,QAAQC,MAAM,CAACe,KAAK,EAC9E,CAAC;EACD;EACA;EACA,MAAMtB,gBAAgB,CAAC,CAAC,CAAC;AAC3B;;AAEA;AACA,OAAO,eAAeqE,iBAAiBA,CACrC/D,IAAI,EAAE,MAAM,EACZgE,IAAI,EAAE,MAAM,EACZjD,OAAO,EAAE;EAAEC,KAAK,CAAC,EAAE,MAAM;EAAE6C,YAAY,CAAC,EAAE,IAAI;AAAC,CAAC,CACjD,EAAE3D,OAAO,CAAC,IAAI,CAAC,CAAC;EACf,IAAI;IACF,MAAMc,KAAK,GAAG7B,iBAAiB,CAAC4B,OAAO,CAACC,KAAK,CAAC;IAC9C,MAAMiD,UAAU,GAAGtE,aAAa,CAACqE,IAAI,CAAC;;IAEtC;IACA,MAAME,WAAW,GACfnD,OAAO,CAAC8C,YAAY,IACpBI,UAAU,IACV,OAAOA,UAAU,KAAK,QAAQ,IAC9B,MAAM,IAAIA,UAAU,KACnBA,UAAU,CAAC7D,IAAI,KAAK,KAAK,IAAI6D,UAAU,CAAC7D,IAAI,KAAK,MAAM,CAAC,IACzD,KAAK,IAAI6D,UAAU,IACnB,OAAOA,UAAU,CAAClB,GAAG,KAAK,QAAQ,IAClC,OAAO,IAAIkB,UAAU,IACrBA,UAAU,CAACT,KAAK,IAChB,OAAOS,UAAU,CAACT,KAAK,KAAK,QAAQ,IACpC,UAAU,IAAIS,UAAU,CAACT,KAAK;IAChC,MAAMK,YAAY,GAAGK,WAAW,GAAG,MAAM3F,gBAAgB,CAAC,CAAC,GAAGqC,SAAS;IAEvE,MAAMjC,YAAY,CAACqB,IAAI,EAAEiE,UAAU,EAAEjD,KAAK,CAAC;IAE3C,MAAMmD,aAAa,GACjBF,UAAU,IAAI,OAAOA,UAAU,KAAK,QAAQ,IAAI,MAAM,IAAIA,UAAU,GAChEG,MAAM,CAACH,UAAU,CAAC7D,IAAI,IAAI,OAAO,CAAC,GAClC,OAAO;IAEb,IACEyD,YAAY,IACZI,UAAU,IACV,OAAOA,UAAU,KAAK,QAAQ,IAC9B,MAAM,IAAIA,UAAU,KACnBA,UAAU,CAAC7D,IAAI,KAAK,KAAK,IAAI6D,UAAU,CAAC7D,IAAI,KAAK,MAAM,CAAC,IACzD,KAAK,IAAI6D,UAAU,IACnB,OAAOA,UAAU,CAAClB,GAAG,KAAK,QAAQ,EAClC;MACAvE,mBAAmB,CACjBwB,IAAI,EACJ;QAAEI,IAAI,EAAE6D,UAAU,CAAC7D,IAAI;QAAE2C,GAAG,EAAEkB,UAAU,CAAClB;MAAI,CAAC,EAC9Cc,YACF,CAAC;IACH;IAEA1F,QAAQ,CAAC,eAAe,EAAE;MACxB6C,KAAK,EACHA,KAAK,IAAI9C,0DAA0D;MACrEmG,MAAM,EACJ,MAAM,IAAInG,0DAA0D;MACtEkC,IAAI,EAAE+D,aAAa,IAAIjG;IACzB,CAAC,CAAC;IAEF4B,KAAK,CAAC,SAASqE,aAAa,eAAenE,IAAI,OAAOgB,KAAK,SAAS,CAAC;EACvE,CAAC,CAAC,OAAON,KAAK,EAAE;IACdb,QAAQ,CAAC,CAACa,KAAK,IAAIyB,KAAK,EAAEC,OAAO,CAAC;EACpC;AACF;;AAEA;AACA,OAAO,eAAekC,wBAAwBA,CAACvD,OAAO,EAAE;EACtDC,KAAK,CAAC,EAAE,MAAM;AAChB,CAAC,CAAC,EAAEd,OAAO,CAAC,IAAI,CAAC,CAAC;EAChB,IAAI;IACF,MAAMc,KAAK,GAAG7B,iBAAiB,CAAC4B,OAAO,CAACC,KAAK,CAAC;IAC9C,MAAMuD,QAAQ,GAAG3E,WAAW,CAAC,CAAC;IAE9BzB,QAAQ,CAAC,eAAe,EAAE;MACxB6C,KAAK,EACHA,KAAK,IAAI9C,0DAA0D;MACrEqG,QAAQ,EACNA,QAAQ,IAAIrG,0DAA0D;MACxEmG,MAAM,EACJ,SAAS,IAAInG;IACjB,CAAC,CAAC;IAEF,MAAM;MAAEsG;IAA4B,CAAC,GAAG,MAAM,MAAM,CAClD,8BACF,CAAC;IACD,MAAMhD,OAAO,GAAG,MAAMgD,2BAA2B,CAAC,CAAC;IAEnD,IAAIjC,MAAM,CAACC,IAAI,CAAChB,OAAO,CAAC,CAACQ,MAAM,KAAK,CAAC,EAAE;MACrClC,KAAK,CACH,4FACF,CAAC;IACH;IAEA,MAAM;MAAE2E;IAAQ,CAAC,GAAG,MAAMzG,MAAM,CAC9B,CAAC,gBAAgB;AACvB,QAAQ,CAAC,eAAe;AACxB,UAAU,CAAC,4BAA4B,CAC3B,OAAO,CAAC,CAACwD,OAAO,CAAC,CACjB,KAAK,CAAC,CAACR,KAAK,CAAC,CACb,MAAM,CAAC,CAAC,MAAM;UACZyD,OAAO,CAAC,CAAC;QACX,CAAC,CAAC;AAEd,QAAQ,EAAE,eAAe;AACzB,MAAM,EAAE,gBAAgB,CAAC,EACnB;MAAEC,WAAW,EAAE;IAAK,CACtB,CAAC;EACH,CAAC,CAAC,OAAOhE,KAAK,EAAE;IACdb,QAAQ,CAAC,CAACa,KAAK,IAAIyB,KAAK,EAAEC,OAAO,CAAC;EACpC;AACF;;AAEA;AACA,OAAO,eAAeuC,sBAAsBA,CAAA,CAAE,EAAEzE,OAAO,CAAC,IAAI,CAAC,CAAC;EAC5D/B,QAAQ,CAAC,iCAAiC,EAAE,CAAC,CAAC,CAAC;EAC/CqB,wBAAwB,CAACoF,OAAO,KAAK;IACnC,GAAGA,OAAO;IACVC,qBAAqB,EAAE,EAAE;IACzBC,sBAAsB,EAAE,EAAE;IAC1BC,0BAA0B,EAAE;EAC9B,CAAC,CAAC,CAAC;EACHjF,KAAK,CACH,mFAAmF,GACjF,oEACJ,CAAC;AACH","ignoreList":[]} diff --git a/src/commands/mcp/doctorCommand.test.ts b/src/commands/mcp/doctorCommand.test.ts new file mode 100644 index 00000000..8e4754c4 --- /dev/null +++ b/src/commands/mcp/doctorCommand.test.ts @@ -0,0 +1,19 @@ +import assert from 'node:assert/strict' +import test from 'node:test' + +import { Command } from '@commander-js/extra-typings' + +import { registerMcpDoctorCommand } from './doctorCommand.js' + +test('registerMcpDoctorCommand adds the doctor subcommand with expected options', () => { + const mcp = new Command('mcp') + + registerMcpDoctorCommand(mcp) + + const doctor = mcp.commands.find(command => command.name() === 'doctor') + assert.ok(doctor) + assert.equal(doctor?.usage(), '[options] [name]') + + const optionFlags = doctor?.options.map(option => option.long) + assert.deepEqual(optionFlags, ['--scope', '--config-only', '--json']) +}) diff --git a/src/commands/mcp/doctorCommand.ts b/src/commands/mcp/doctorCommand.ts new file mode 100644 index 00000000..75ed6a10 --- /dev/null +++ b/src/commands/mcp/doctorCommand.ts @@ -0,0 +1,25 @@ +/** + * MCP doctor CLI subcommand. + */ +import { type Command } from '@commander-js/extra-typings' + +export function registerMcpDoctorCommand(mcp: Command): void { + mcp + .command('doctor [name]') + .description( + 'Diagnose MCP configuration, precedence, disabled/pending state, and connection health. ' + + 'Note: unless --config-only is used, stdio servers may be spawned and remote servers may be contacted. ' + + 'Only use this command in directories you trust.', + ) + .option('-s, --scope ', 'Restrict config analysis to a specific scope (local, project, user, or enterprise)') + .option('--config-only', 'Skip live connection checks and only analyze configuration state') + .option('--json', 'Output the diagnostics report as JSON') + .action(async (name: string | undefined, options: { + scope?: string + configOnly?: boolean + json?: boolean + }) => { + const { mcpDoctorHandler } = await import('../../cli/handlers/mcp.js') + await mcpDoctorHandler(name, options) + }) +} diff --git a/src/main.tsx b/src/main.tsx index 07a3a3d2..9988dea7 100644 --- a/src/main.tsx +++ b/src/main.tsx @@ -139,6 +139,7 @@ import { validateUuid } from './utils/uuid.js'; // Plugin startup checks are now handled non-blockingly in REPL.tsx import { registerMcpAddCommand } from 'src/commands/mcp/addCommand.js'; +import { registerMcpDoctorCommand } from 'src/commands/mcp/doctorCommand.js'; import { registerMcpXaaIdpCommand } from 'src/commands/mcp/xaaIdpCommand.js'; import { logPermissionContextForAnts } from 'src/services/internalLogging.js'; import { fetchClaudeAIMcpConfigsIfEligible } from 'src/services/mcp/claudeai.js'; @@ -3887,6 +3888,7 @@ async function run(): Promise { // Register the mcp add subcommand (extracted for testability) registerMcpAddCommand(mcp); + registerMcpDoctorCommand(mcp); if (isXaaEnabled()) { registerMcpXaaIdpCommand(mcp); } From fb27164ddfb9d4260bda7e1bd45f7c5361927073 Mon Sep 17 00:00:00 2001 From: gnanam1990 Date: Thu, 2 Apr 2026 14:55:05 +0530 Subject: [PATCH 32/35] fix(mcp): await failed transport cleanup on Windows Wait for failed MCP transport cleanup before command exit so targeted live checks do not crash on Windows. Co-Authored-By: Claude --- src/services/mcp/client.test.ts | 48 +++++++++++++++++++++++++++++++++ src/services/mcp/client.ts | 25 +++++++++++++---- 2 files changed, 68 insertions(+), 5 deletions(-) create mode 100644 src/services/mcp/client.test.ts diff --git a/src/services/mcp/client.test.ts b/src/services/mcp/client.test.ts new file mode 100644 index 00000000..6f69ee7b --- /dev/null +++ b/src/services/mcp/client.test.ts @@ -0,0 +1,48 @@ +import assert from 'node:assert/strict' +import test from 'node:test' + +import { cleanupFailedConnection } from './client.js' + +test('cleanupFailedConnection awaits transport close before resolving', async () => { + let closed = false + let resolveClose: (() => void) | undefined + + const transport = { + close: async () => + await new Promise(resolve => { + resolveClose = () => { + closed = true + resolve() + } + }), + } + + const cleanupPromise = cleanupFailedConnection(transport) + + assert.equal(closed, false) + resolveClose?.() + await cleanupPromise + assert.equal(closed, true) +}) + +test('cleanupFailedConnection closes in-process server and transport', async () => { + let inProcessClosed = false + let transportClosed = false + + const inProcessServer = { + close: async () => { + inProcessClosed = true + }, + } + + const transport = { + close: async () => { + transportClosed = true + }, + } + + await cleanupFailedConnection(transport, inProcessServer) + + assert.equal(inProcessClosed, true) + assert.equal(transportClosed, true) +}) diff --git a/src/services/mcp/client.ts b/src/services/mcp/client.ts index b053dbb6..8857b56c 100644 --- a/src/services/mcp/client.ts +++ b/src/services/mcp/client.ts @@ -560,6 +560,22 @@ function getRemoteMcpServerConnectionBatchSize(): number { ) } +type InProcessMcpServer = { + connect(t: Transport): Promise + close(): Promise +} + +export async function cleanupFailedConnection( + transport: Pick, + inProcessServer?: Pick, +): Promise { + if (inProcessServer) { + await inProcessServer.close().catch(() => {}) + } + + await transport.close().catch(() => {}) +} + function isLocalMcpServer(config: ScopedMcpServerConfig): boolean { return !config.type || config.type === 'stdio' || config.type === 'sdk' } @@ -606,9 +622,7 @@ export const connectToServer = memoize( }, ): Promise => { const connectStartTime = Date.now() - let inProcessServer: - | { connect(t: Transport): Promise; close(): Promise } - | undefined + let inProcessServer: InProcessMcpServer | undefined try { let transport @@ -1145,9 +1159,10 @@ export const connectToServer = memoize( }) } if (inProcessServer) { - inProcessServer.close().catch(() => { }) + await cleanupFailedConnection(transport, inProcessServer) + } else { + await cleanupFailedConnection(transport) } - transport.close().catch(() => { }) if (stderrOutput) { logMCPError(name, `Server stderr: ${stderrOutput}`) } From 64ba7fdb9af883d714e963df4459962be297bb3d Mon Sep 17 00:00:00 2001 From: Misha Skvortsov Date: Thu, 2 Apr 2026 12:27:12 +0300 Subject: [PATCH 33/35] refactor: enhance Atomic Chat API URL handling - Updated the `getAtomicChatApiBaseUrl` function to parse the base URL correctly and ensure the pathname is formatted without trailing version segments. - Cleared search and hash components from the URL to standardize the output. This change improves the robustness of the URL handling for the Atomic Chat provider. --- scripts/provider-discovery.ts | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/provider-discovery.ts b/scripts/provider-discovery.ts index 9c463f2f..e65d2e8f 100644 --- a/scripts/provider-discovery.ts +++ b/scripts/provider-discovery.ts @@ -97,8 +97,16 @@ export async function listOllamaModels( // ── Atomic Chat discovery (Apple Silicon local LLMs at 127.0.0.1:1337) ────── export function getAtomicChatApiBaseUrl(baseUrl?: string): string { - const raw = baseUrl || process.env.ATOMIC_CHAT_BASE_URL || DEFAULT_ATOMIC_CHAT_BASE_URL - return trimTrailingSlash(raw) + const parsed = new URL( + baseUrl || process.env.ATOMIC_CHAT_BASE_URL || DEFAULT_ATOMIC_CHAT_BASE_URL, + ) + const pathname = trimTrailingSlash(parsed.pathname) + parsed.pathname = pathname.endsWith('/v1') + ? pathname.slice(0, -3) || '/' + : pathname || '/' + parsed.search = '' + parsed.hash = '' + return trimTrailingSlash(parsed.toString()) } export function getAtomicChatChatBaseUrl(baseUrl?: string): string { From 0a428394757cd918efbff6ff8cc78054c76d85d2 Mon Sep 17 00:00:00 2001 From: Rithul Kamesh Date: Thu, 2 Apr 2026 15:38:54 +0530 Subject: [PATCH 34/35] fix(github): address PR feedback for onboarding flow - Set competing provider flags to undefined in updateSettingsForSource to ensure clean GitHub boot - Fix resolveProviderRequest to default to github:copilot when OPENAI_MODEL is unset - Hydrate secure tokens and managed settings in system-check.ts to prevent false negatives - Add models:read scope to GitHub device flow --- scripts/system-check.ts | 7 +++++++ src/commands/onboard-github/onboard-github.tsx | 15 ++++++++++++--- src/services/api/providerConfig.ts | 3 ++- src/services/github/deviceFlow.ts | 2 +- 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/scripts/system-check.ts b/scripts/system-check.ts index 6626149a..34990b04 100644 --- a/scripts/system-check.ts +++ b/scripts/system-check.ts @@ -447,6 +447,13 @@ async function main(): Promise { const options = parseOptions(process.argv.slice(2)) const results: CheckResult[] = [] + const { enableConfigs } = await import('../src/utils/config.js') + enableConfigs() + const { applySafeConfigEnvironmentVariables } = await import('../src/utils/managedEnv.js') + applySafeConfigEnvironmentVariables() + const { hydrateGithubModelsTokenFromSecureStorage } = await import('../src/utils/githubModelsCredentials.js') + hydrateGithubModelsTokenFromSecureStorage() + results.push(checkNodeVersion()) results.push(checkBunRuntime()) results.push(checkBuildArtifacts()) diff --git a/src/commands/onboard-github/onboard-github.tsx b/src/commands/onboard-github/onboard-github.tsx index 26088392..66326957 100644 --- a/src/commands/onboard-github/onboard-github.tsx +++ b/src/commands/onboard-github/onboard-github.tsx @@ -29,6 +29,11 @@ function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } { env: { CLAUDE_CODE_USE_GITHUB: '1', OPENAI_MODEL: model, + CLAUDE_CODE_USE_OPENAI: undefined as any, + CLAUDE_CODE_USE_GEMINI: undefined as any, + CLAUDE_CODE_USE_BEDROCK: undefined as any, + CLAUDE_CODE_USE_VERTEX: undefined as any, + CLAUDE_CODE_USE_FOUNDRY: undefined as any, }, }) if (error) { @@ -49,6 +54,7 @@ function OnboardGithub(props: { verification_uri: string } | null>(null) const [patDraft, setPatDraft] = useState('') + const [cursorOffset, setCursorOffset] = useState(0) const finalize = useCallback( async (token: string, model: string = DEFAULT_MODEL) => { @@ -117,7 +123,7 @@ function OnboardGithub(props: { {errorMsg} { + onChange={(v: string) => { if (v === 'cancel') { onDone('GitHub onboard cancelled', { display: 'system' }) return diff --git a/src/services/api/providerConfig.ts b/src/services/api/providerConfig.ts index bbbc2cb9..90643aa1 100644 --- a/src/services/api/providerConfig.ts +++ b/src/services/api/providerConfig.ts @@ -194,11 +194,12 @@ export function resolveProviderRequest(options?: { baseUrl?: string fallbackModel?: string }): ResolvedProviderRequest { + const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) const requestedModel = options?.model?.trim() || process.env.OPENAI_MODEL?.trim() || options?.fallbackModel?.trim() || - 'gpt-4o' + (isGithubMode ? 'github:copilot' : 'gpt-4o') const descriptor = parseModelDescriptor(requestedModel) const rawBaseUrl = options?.baseUrl ?? diff --git a/src/services/github/deviceFlow.ts b/src/services/github/deviceFlow.ts index 0e207b7f..379d757e 100644 --- a/src/services/github/deviceFlow.ts +++ b/src/services/github/deviceFlow.ts @@ -11,7 +11,7 @@ export const GITHUB_DEVICE_ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token' /** Match runtime devsper github_oauth DEFAULT_SCOPE */ -export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user' +export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user,models:read' export class GitHubDeviceFlowError extends Error { constructor(message: string) { From 7c09b1f01cd062db42d6462e429623335c781f7b Mon Sep 17 00:00:00 2001 From: Misha Skvortsov Date: Thu, 2 Apr 2026 13:58:50 +0300 Subject: [PATCH 35/35] docs: add Atomic Chat to README provider examples and launch profiles Made-with: Cursor --- README.md | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 8a0690f9..17c0e2f3 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Use Claude Code with **any LLM** — not just Claude. -OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API. It now also supports the ChatGPT Codex backend for `codexplan` and `codexspark`. +OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API. It now also supports the ChatGPT Codex backend for `codexplan` and `codexspark`, and local inference via [Atomic Chat](https://atomic.chat/) on Apple Silicon. All of Claude Code's tools work — bash, file read/write/edit, grep, glob, agents, tasks, MCP — just powered by whatever model you choose. @@ -130,6 +130,23 @@ export OPENAI_MODEL=llama3.3:70b # no API key needed for local models ``` +### Atomic Chat (local, Apple Silicon) + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_BASE_URL=http://127.0.0.1:1337/v1 +export OPENAI_MODEL=your-model-name +# no API key needed for local models +``` + +Or use the profile launcher: + +```bash +bun run dev:atomic-chat +``` + +Download Atomic Chat from [atomic.chat](https://atomic.chat/). The app must be running with a model loaded before launching. + ### LM Studio (local) ```bash @@ -181,7 +198,7 @@ export OPENAI_MODEL=gpt-4o | Variable | Required | Description | |----------|----------|-------------| | `CLAUDE_CODE_USE_OPENAI` | Yes | Set to `1` to enable the OpenAI provider | -| `OPENAI_API_KEY` | Yes* | Your API key (*not needed for local models like Ollama) | +| `OPENAI_API_KEY` | Yes* | Your API key (*not needed for local models like Ollama/Atomic Chat) | | `OPENAI_MODEL` | Yes | Model name (e.g. `gpt-4o`, `deepseek-chat`, `llama3.3:70b`) | | `OPENAI_BASE_URL` | No | API endpoint (defaults to `https://api.openai.com/v1`) | | `CODEX_API_KEY` | Codex only | Codex/ChatGPT access token override | @@ -241,6 +258,9 @@ bun run profile:codex # openai bootstrap with explicit key bun run profile:init -- --provider openai --api-key sk-... +# atomic-chat bootstrap (auto-detects running model) +bun run profile:init -- --provider atomic-chat + # ollama bootstrap with custom model bun run profile:init -- --provider ollama --model llama3.1:8b @@ -261,6 +281,9 @@ bun run dev:openai # Ollama profile (defaults: localhost:11434, llama3.1:8b) bun run dev:ollama + +# Atomic Chat profile (Apple Silicon local LLMs at 127.0.0.1:1337) +bun run dev:atomic-chat ``` `profile:recommend` ranks installed Ollama models for `latency`, `balanced`, or `coding`, and `profile:auto` can persist the recommendation directly. @@ -271,8 +294,9 @@ Goal-based Ollama selection only recommends among models that are already instal Use `profile:codex` or `--provider codex` when you want the ChatGPT Codex backend. -`dev:openai`, `dev:ollama`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass. +`dev:openai`, `dev:ollama`, `dev:atomic-chat`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass. For `dev:ollama`, make sure Ollama is running locally before launch. +For `dev:atomic-chat`, make sure Atomic Chat is running with a model loaded. ---