Merge upstream/main into docs/non-technical-setup-guide
This commit is contained in:
6
.github/workflows/pr-checks.yml
vendored
6
.github/workflows/pr-checks.yml
vendored
@@ -12,15 +12,15 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Set up Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@4bc047ad259df6fc24a6c9b0f9a0cb08cf17fbe5 # v2.0.1
|
||||
with:
|
||||
bun-version: 1.3.11
|
||||
|
||||
|
||||
10
README.md
10
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
Use Claude Code with **any LLM** — not just Claude.
|
||||
|
||||
OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API. It now also supports the ChatGPT Codex backend for `codexplan` and `codexspark`.
|
||||
OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API. It now also supports the ChatGPT Codex backend for `codexplan` and `codexspark`, and local inference via [Atomic Chat](https://atomic.chat/) on Apple Silicon.
|
||||
|
||||
All of Claude Code's tools work — bash, file read/write/edit, grep, glob, agents, tasks, MCP — just powered by whatever model you choose.
|
||||
|
||||
@@ -36,6 +36,8 @@ The package name is `@gitlawb/openclaude`, but the command you run is:
|
||||
openclaude
|
||||
```
|
||||
|
||||
If you install via npm and later see `ripgrep not found`, install ripgrep system-wide and confirm `rg --version` works in the same terminal before starting OpenClaude.
|
||||
|
||||
---
|
||||
|
||||
## Fastest Setup
|
||||
@@ -78,7 +80,7 @@ That is enough to start with OpenAI.
|
||||
|
||||
### Advanced
|
||||
|
||||
- Want source builds, Bun, local profiles, and runtime checks: [Advanced Setup](docs/advanced-setup.md)
|
||||
- Want source builds, Bun, local profiles, runtime checks, or more provider choices: [Advanced Setup](docs/advanced-setup.md)
|
||||
|
||||
---
|
||||
|
||||
@@ -96,7 +98,9 @@ Best if you want to run models locally on your own machine.
|
||||
|
||||
Best if you already use the Codex CLI or ChatGPT Codex backend.
|
||||
|
||||
---
|
||||
### Atomic Chat
|
||||
|
||||
Best if you want local inference on Apple Silicon with Atomic Chat. See [Advanced Setup](docs/advanced-setup.md).
|
||||
|
||||
---
|
||||
|
||||
|
||||
146
atomic_chat_provider.py
Normal file
146
atomic_chat_provider.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
atomic_chat_provider.py
|
||||
-----------------------
|
||||
Adds native Atomic Chat support to openclaude.
|
||||
Lets Claude Code route requests to any locally-running model via
|
||||
Atomic Chat (Apple Silicon only) at 127.0.0.1:1337.
|
||||
|
||||
Atomic Chat exposes an OpenAI-compatible API, so messages are forwarded
|
||||
directly without translation.
|
||||
|
||||
Usage (.env):
|
||||
PREFERRED_PROVIDER=atomic-chat
|
||||
ATOMIC_CHAT_BASE_URL=http://127.0.0.1:1337
|
||||
"""
|
||||
|
||||
import httpx
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import AsyncIterator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ATOMIC_CHAT_BASE_URL = os.getenv("ATOMIC_CHAT_BASE_URL", "http://127.0.0.1:1337")
|
||||
|
||||
|
||||
def _api_url(path: str) -> str:
|
||||
return f"{ATOMIC_CHAT_BASE_URL}/v1{path}"
|
||||
|
||||
|
||||
async def check_atomic_chat_running() -> bool:
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=3.0) as client:
|
||||
resp = await client.get(_api_url("/models"))
|
||||
return resp.status_code == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
async def list_atomic_chat_models() -> list[str]:
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||
resp = await client.get(_api_url("/models"))
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
return [m["id"] for m in data.get("data", [])]
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not list Atomic Chat models: {e}")
|
||||
return []
|
||||
|
||||
|
||||
async def atomic_chat(
|
||||
model: str,
|
||||
messages: list[dict],
|
||||
system: str | None = None,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 1.0,
|
||||
) -> dict:
|
||||
chat_messages = list(messages)
|
||||
if system:
|
||||
chat_messages.insert(0, {"role": "system", "content": system})
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": chat_messages,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
resp = await client.post(_api_url("/chat/completions"), json=payload)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
choice = data.get("choices", [{}])[0]
|
||||
assistant_text = choice.get("message", {}).get("content", "")
|
||||
usage = data.get("usage", {})
|
||||
|
||||
return {
|
||||
"id": data.get("id", "msg_atomic_chat"),
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{"type": "text", "text": assistant_text}],
|
||||
"model": model,
|
||||
"stop_reason": "end_turn",
|
||||
"stop_sequence": None,
|
||||
"usage": {
|
||||
"input_tokens": usage.get("prompt_tokens", 0),
|
||||
"output_tokens": usage.get("completion_tokens", 0),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
async def atomic_chat_stream(
|
||||
model: str,
|
||||
messages: list[dict],
|
||||
system: str | None = None,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 1.0,
|
||||
) -> AsyncIterator[str]:
|
||||
chat_messages = list(messages)
|
||||
if system:
|
||||
chat_messages.insert(0, {"role": "system", "content": system})
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": chat_messages,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"stream": True,
|
||||
}
|
||||
|
||||
yield "event: message_start\n"
|
||||
yield f'data: {json.dumps({"type": "message_start", "message": {"id": "msg_atomic_chat_stream", "type": "message", "role": "assistant", "content": [], "model": model, "stop_reason": None, "usage": {"input_tokens": 0, "output_tokens": 0}}})}\n\n'
|
||||
yield "event: content_block_start\n"
|
||||
yield f'data: {json.dumps({"type": "content_block_start", "index": 0, "content_block": {"type": "text", "text": ""}})}\n\n'
|
||||
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
async with client.stream("POST", _api_url("/chat/completions"), json=payload) as resp:
|
||||
resp.raise_for_status()
|
||||
async for line in resp.aiter_lines():
|
||||
if not line or not line.startswith("data: "):
|
||||
continue
|
||||
raw = line[len("data: "):]
|
||||
if raw.strip() == "[DONE]":
|
||||
break
|
||||
try:
|
||||
chunk = json.loads(raw)
|
||||
delta = chunk.get("choices", [{}])[0].get("delta", {})
|
||||
delta_text = delta.get("content", "")
|
||||
if delta_text:
|
||||
yield "event: content_block_delta\n"
|
||||
yield f'data: {json.dumps({"type": "content_block_delta", "index": 0, "delta": {"type": "text_delta", "text": delta_text}})}\n\n'
|
||||
|
||||
finish_reason = chunk.get("choices", [{}])[0].get("finish_reason")
|
||||
if finish_reason:
|
||||
usage = chunk.get("usage", {})
|
||||
yield "event: content_block_stop\n"
|
||||
yield f'data: {json.dumps({"type": "content_block_stop", "index": 0})}\n\n'
|
||||
yield "event: message_delta\n"
|
||||
yield f'data: {json.dumps({"type": "message_delta", "delta": {"stop_reason": "end_turn", "stop_sequence": None}, "usage": {"output_tokens": usage.get("completion_tokens", 0)}})}\n\n'
|
||||
yield "event: message_stop\n"
|
||||
yield f'data: {json.dumps({"type": "message_stop"})}\n\n'
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
148
bun.lock
148
bun.lock
@@ -5,82 +5,82 @@
|
||||
"": {
|
||||
"name": "openclaude",
|
||||
"dependencies": {
|
||||
"@alcalzone/ansi-tokenize": "^0.3.0",
|
||||
"@anthropic-ai/bedrock-sdk": "^0.26.0",
|
||||
"@anthropic-ai/foundry-sdk": "^0.2.0",
|
||||
"@anthropic-ai/sandbox-runtime": "^0.0.46",
|
||||
"@anthropic-ai/sdk": "^0.81.0",
|
||||
"@anthropic-ai/vertex-sdk": "^0.14.0",
|
||||
"@commander-js/extra-typings": "^12.0.0",
|
||||
"@growthbook/growthbook": "^1.3.0",
|
||||
"@modelcontextprotocol/sdk": "^1.12.0",
|
||||
"@opentelemetry/api": "^1.9.1",
|
||||
"@opentelemetry/api-logs": "^0.214.0",
|
||||
"@opentelemetry/core": "^2.6.1",
|
||||
"@opentelemetry/exporter-logs-otlp-http": "^0.214.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "^0.57.0",
|
||||
"@opentelemetry/resources": "^2.6.1",
|
||||
"@opentelemetry/sdk-logs": "^0.214.0",
|
||||
"@opentelemetry/sdk-metrics": "^2.6.1",
|
||||
"@opentelemetry/sdk-trace-base": "^2.6.1",
|
||||
"@opentelemetry/sdk-trace-node": "^2.6.1",
|
||||
"@opentelemetry/semantic-conventions": "^1.40.0",
|
||||
"ajv": "^8.17.0",
|
||||
"auto-bind": "^5.0.1",
|
||||
"axios": "^1.14.0",
|
||||
"bidi-js": "^1.0.3",
|
||||
"chalk": "^5.4.0",
|
||||
"chokidar": "^4.0.0",
|
||||
"cli-boxes": "^3.0.0",
|
||||
"cli-highlight": "^2.1.0",
|
||||
"code-excerpt": "^4.0.0",
|
||||
"commander": "^12.0.0",
|
||||
"diff": "^7.0.0",
|
||||
"emoji-regex": "^10.4.0",
|
||||
"env-paths": "^3.0.0",
|
||||
"execa": "^9.5.0",
|
||||
"fflate": "^0.8.2",
|
||||
"figures": "^6.1.0",
|
||||
"fuse.js": "^7.1.0",
|
||||
"get-east-asian-width": "^1.3.0",
|
||||
"google-auth-library": "^9.15.0",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ignore": "^7.0.0",
|
||||
"indent-string": "^5.0.0",
|
||||
"jsonc-parser": "^3.3.1",
|
||||
"lodash-es": "^4.17.21",
|
||||
"lru-cache": "^11.0.0",
|
||||
"marked": "^15.0.0",
|
||||
"p-map": "^7.0.3",
|
||||
"picomatch": "^4.0.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"qrcode": "^1.5.4",
|
||||
"react": "^19.2.4",
|
||||
"react-compiler-runtime": "^1.0.0",
|
||||
"react-reconciler": "^0.33.0",
|
||||
"semver": "^7.6.3",
|
||||
"shell-quote": "^1.8.2",
|
||||
"signal-exit": "^4.1.0",
|
||||
"stack-utils": "^2.0.6",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"supports-hyperlinks": "^3.1.0",
|
||||
"tree-kill": "^1.2.2",
|
||||
"turndown": "^7.2.0",
|
||||
"type-fest": "^4.30.0",
|
||||
"undici": "^7.3.0",
|
||||
"usehooks-ts": "^3.1.1",
|
||||
"vscode-languageserver-protocol": "^3.17.5",
|
||||
"wrap-ansi": "^9.0.0",
|
||||
"ws": "^8.18.0",
|
||||
"xss": "^1.0.15",
|
||||
"yaml": "^2.7.0",
|
||||
"zod": "^3.24.0",
|
||||
"@alcalzone/ansi-tokenize": "0.3.0",
|
||||
"@anthropic-ai/bedrock-sdk": "0.26.4",
|
||||
"@anthropic-ai/foundry-sdk": "0.2.3",
|
||||
"@anthropic-ai/sandbox-runtime": "0.0.46",
|
||||
"@anthropic-ai/sdk": "0.81.0",
|
||||
"@anthropic-ai/vertex-sdk": "0.14.4",
|
||||
"@commander-js/extra-typings": "12.1.0",
|
||||
"@growthbook/growthbook": "1.6.5",
|
||||
"@modelcontextprotocol/sdk": "1.29.0",
|
||||
"@opentelemetry/api": "1.9.1",
|
||||
"@opentelemetry/api-logs": "0.214.0",
|
||||
"@opentelemetry/core": "2.6.1",
|
||||
"@opentelemetry/exporter-logs-otlp-http": "0.214.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "0.57.2",
|
||||
"@opentelemetry/resources": "2.6.1",
|
||||
"@opentelemetry/sdk-logs": "0.214.0",
|
||||
"@opentelemetry/sdk-metrics": "2.6.1",
|
||||
"@opentelemetry/sdk-trace-base": "2.6.1",
|
||||
"@opentelemetry/sdk-trace-node": "2.6.1",
|
||||
"@opentelemetry/semantic-conventions": "1.40.0",
|
||||
"ajv": "8.18.0",
|
||||
"auto-bind": "5.0.1",
|
||||
"axios": "1.14.0",
|
||||
"bidi-js": "1.0.3",
|
||||
"chalk": "5.6.2",
|
||||
"chokidar": "4.0.3",
|
||||
"cli-boxes": "3.0.0",
|
||||
"cli-highlight": "2.1.11",
|
||||
"code-excerpt": "4.0.0",
|
||||
"commander": "12.1.0",
|
||||
"diff": "7.0.0",
|
||||
"emoji-regex": "10.6.0",
|
||||
"env-paths": "3.0.0",
|
||||
"execa": "9.6.1",
|
||||
"fflate": "0.8.2",
|
||||
"figures": "6.1.0",
|
||||
"fuse.js": "7.1.0",
|
||||
"get-east-asian-width": "1.5.0",
|
||||
"google-auth-library": "9.15.1",
|
||||
"https-proxy-agent": "7.0.6",
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.17.23",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
"picomatch": "4.0.4",
|
||||
"proper-lockfile": "4.1.2",
|
||||
"qrcode": "1.5.4",
|
||||
"react": "19.2.4",
|
||||
"react-compiler-runtime": "1.0.0",
|
||||
"react-reconciler": "0.33.0",
|
||||
"semver": "7.7.4",
|
||||
"shell-quote": "1.8.3",
|
||||
"signal-exit": "4.1.0",
|
||||
"stack-utils": "2.0.6",
|
||||
"strip-ansi": "7.2.0",
|
||||
"supports-hyperlinks": "3.2.0",
|
||||
"tree-kill": "1.2.2",
|
||||
"turndown": "7.2.2",
|
||||
"type-fest": "4.41.0",
|
||||
"undici": "7.24.6",
|
||||
"usehooks-ts": "3.1.1",
|
||||
"vscode-languageserver-protocol": "3.17.5",
|
||||
"wrap-ansi": "9.0.2",
|
||||
"ws": "8.20.0",
|
||||
"xss": "1.0.15",
|
||||
"yaml": "2.8.3",
|
||||
"zod": "3.25.76",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.2.0",
|
||||
"@types/node": "^25.5.0",
|
||||
"@types/react": "^19.2.14",
|
||||
"typescript": "^5.7.0",
|
||||
"@types/bun": "1.3.11",
|
||||
"@types/node": "25.5.0",
|
||||
"@types/react": "19.2.14",
|
||||
"typescript": "5.9.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -90,6 +90,24 @@ export OPENAI_BASE_URL=http://localhost:11434/v1
|
||||
export OPENAI_MODEL=llama3.3:70b
|
||||
```
|
||||
|
||||
### Atomic Chat (local, Apple Silicon)
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://127.0.0.1:1337/v1
|
||||
export OPENAI_MODEL=your-model-name
|
||||
```
|
||||
|
||||
No API key is needed for Atomic Chat local models.
|
||||
|
||||
Or use the profile launcher:
|
||||
|
||||
```bash
|
||||
bun run dev:atomic-chat
|
||||
```
|
||||
|
||||
Download Atomic Chat from [atomic.chat](https://atomic.chat/). The app must be running with a model loaded before launching.
|
||||
|
||||
### LM Studio
|
||||
|
||||
```bash
|
||||
@@ -139,7 +157,7 @@ export OPENAI_MODEL=gpt-4o
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `CLAUDE_CODE_USE_OPENAI` | Yes | Set to `1` to enable the OpenAI provider |
|
||||
| `OPENAI_API_KEY` | Yes* | Your API key (`*` not needed for local models like Ollama) |
|
||||
| `OPENAI_API_KEY` | Yes* | Your API key (`*` not needed for local models like Ollama or Atomic Chat) |
|
||||
| `OPENAI_MODEL` | Yes | Model name such as `gpt-4o`, `deepseek-chat`, or `llama3.3:70b` |
|
||||
| `OPENAI_BASE_URL` | No | API endpoint, defaulting to `https://api.openai.com/v1` |
|
||||
| `CODEX_API_KEY` | Codex only | Codex or ChatGPT access token override |
|
||||
@@ -176,7 +194,7 @@ bun run hardening:strict
|
||||
Notes:
|
||||
|
||||
- `doctor:runtime` fails fast if `CLAUDE_CODE_USE_OPENAI=1` with a placeholder key or a missing key for non-local providers.
|
||||
- Local providers such as `http://localhost:11434/v1` can run without `OPENAI_API_KEY`.
|
||||
- Local providers such as `http://localhost:11434/v1` and `http://127.0.0.1:1337/v1` can run without `OPENAI_API_KEY`.
|
||||
- Codex profiles validate `CODEX_API_KEY` or the Codex CLI auth file and probe `POST /responses` instead of `GET /models`.
|
||||
|
||||
## Provider Launch Profiles
|
||||
@@ -205,6 +223,9 @@ bun run profile:init -- --provider ollama --model llama3.1:8b
|
||||
# ollama bootstrap with intelligent model auto-selection
|
||||
bun run profile:init -- --provider ollama --goal coding
|
||||
|
||||
# atomic-chat bootstrap (auto-detects running model)
|
||||
bun run profile:init -- --provider atomic-chat
|
||||
|
||||
# codex bootstrap with a fast model alias
|
||||
bun run profile:init -- --provider codex --model codexspark
|
||||
|
||||
@@ -219,6 +240,9 @@ bun run dev:openai
|
||||
|
||||
# Ollama profile (defaults: localhost:11434, llama3.1:8b)
|
||||
bun run dev:ollama
|
||||
|
||||
# Atomic Chat profile (Apple Silicon local LLMs at 127.0.0.1:1337)
|
||||
bun run dev:atomic-chat
|
||||
```
|
||||
|
||||
`profile:recommend` ranks installed Ollama models for `latency`, `balanced`, or `coding`, and `profile:auto` can persist the recommendation directly.
|
||||
@@ -227,8 +251,12 @@ If no profile exists yet, `dev:profile` uses the same goal-aware defaults when p
|
||||
|
||||
Use `--provider ollama` when you want a local-only path. Auto mode falls back to OpenAI when no viable local chat model is installed.
|
||||
|
||||
Use `--provider atomic-chat` when you want Atomic Chat as the local Apple Silicon provider.
|
||||
|
||||
Use `profile:codex` or `--provider codex` when you want the ChatGPT Codex backend.
|
||||
|
||||
`dev:openai`, `dev:ollama`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass.
|
||||
`dev:openai`, `dev:ollama`, `dev:atomic-chat`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass.
|
||||
|
||||
For `dev:ollama`, make sure Ollama is running locally before launch.
|
||||
|
||||
For `dev:atomic-chat`, make sure Atomic Chat is running with a model loaded before launch.
|
||||
|
||||
151
package.json
151
package.json
@@ -21,6 +21,7 @@
|
||||
"dev:gemini": "bun run scripts/provider-launch.ts gemini",
|
||||
"dev:ollama": "bun run scripts/provider-launch.ts ollama",
|
||||
"dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare",
|
||||
"dev:atomic-chat": "bun run scripts/provider-launch.ts atomic-chat",
|
||||
"profile:init": "bun run scripts/provider-bootstrap.ts",
|
||||
"profile:recommend": "bun run scripts/provider-recommend.ts",
|
||||
"profile:auto": "bun run scripts/provider-recommend.ts --apply",
|
||||
@@ -30,7 +31,7 @@
|
||||
"dev:fast": "bun run profile:fast && bun run dev:ollama:fast",
|
||||
"dev:code": "bun run profile:code && bun run dev:profile",
|
||||
"start": "node dist/cli.mjs",
|
||||
"test:provider-recommendation": "node --test --experimental-strip-types src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts",
|
||||
"test:provider-recommendation": "bun test src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"smoke": "bun run build && node dist/cli.mjs --version",
|
||||
"test:provider": "bun test src/services/api/*.test.ts src/utils/context.test.ts",
|
||||
@@ -42,82 +43,82 @@
|
||||
"prepack": "npm run build"
|
||||
},
|
||||
"dependencies": {
|
||||
"@alcalzone/ansi-tokenize": "^0.3.0",
|
||||
"@anthropic-ai/bedrock-sdk": "^0.26.0",
|
||||
"@anthropic-ai/foundry-sdk": "^0.2.0",
|
||||
"@anthropic-ai/sandbox-runtime": "^0.0.46",
|
||||
"@anthropic-ai/sdk": "^0.81.0",
|
||||
"@anthropic-ai/vertex-sdk": "^0.14.0",
|
||||
"@commander-js/extra-typings": "^12.0.0",
|
||||
"@growthbook/growthbook": "^1.3.0",
|
||||
"@modelcontextprotocol/sdk": "^1.12.0",
|
||||
"@opentelemetry/api": "^1.9.1",
|
||||
"@opentelemetry/api-logs": "^0.214.0",
|
||||
"@opentelemetry/core": "^2.6.1",
|
||||
"@opentelemetry/exporter-logs-otlp-http": "^0.214.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "^0.57.0",
|
||||
"@opentelemetry/resources": "^2.6.1",
|
||||
"@opentelemetry/sdk-logs": "^0.214.0",
|
||||
"@opentelemetry/sdk-metrics": "^2.6.1",
|
||||
"@opentelemetry/sdk-trace-base": "^2.6.1",
|
||||
"@opentelemetry/sdk-trace-node": "^2.6.1",
|
||||
"@opentelemetry/semantic-conventions": "^1.40.0",
|
||||
"ajv": "^8.17.0",
|
||||
"auto-bind": "^5.0.1",
|
||||
"axios": "^1.14.0",
|
||||
"bidi-js": "^1.0.3",
|
||||
"chalk": "^5.4.0",
|
||||
"chokidar": "^4.0.0",
|
||||
"cli-boxes": "^3.0.0",
|
||||
"cli-highlight": "^2.1.0",
|
||||
"code-excerpt": "^4.0.0",
|
||||
"commander": "^12.0.0",
|
||||
"diff": "^7.0.0",
|
||||
"emoji-regex": "^10.4.0",
|
||||
"env-paths": "^3.0.0",
|
||||
"execa": "^9.5.0",
|
||||
"fflate": "^0.8.2",
|
||||
"figures": "^6.1.0",
|
||||
"fuse.js": "^7.1.0",
|
||||
"get-east-asian-width": "^1.3.0",
|
||||
"google-auth-library": "^9.15.0",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ignore": "^7.0.0",
|
||||
"indent-string": "^5.0.0",
|
||||
"jsonc-parser": "^3.3.1",
|
||||
"lodash-es": "^4.17.21",
|
||||
"lru-cache": "^11.0.0",
|
||||
"marked": "^15.0.0",
|
||||
"p-map": "^7.0.3",
|
||||
"picomatch": "^4.0.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"qrcode": "^1.5.4",
|
||||
"react": "^19.2.4",
|
||||
"react-compiler-runtime": "^1.0.0",
|
||||
"react-reconciler": "^0.33.0",
|
||||
"semver": "^7.6.3",
|
||||
"shell-quote": "^1.8.2",
|
||||
"signal-exit": "^4.1.0",
|
||||
"stack-utils": "^2.0.6",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"supports-hyperlinks": "^3.1.0",
|
||||
"tree-kill": "^1.2.2",
|
||||
"turndown": "^7.2.0",
|
||||
"type-fest": "^4.30.0",
|
||||
"undici": "^7.3.0",
|
||||
"usehooks-ts": "^3.1.1",
|
||||
"vscode-languageserver-protocol": "^3.17.5",
|
||||
"wrap-ansi": "^9.0.0",
|
||||
"ws": "^8.18.0",
|
||||
"xss": "^1.0.15",
|
||||
"yaml": "^2.7.0",
|
||||
"zod": "^3.24.0"
|
||||
"@alcalzone/ansi-tokenize": "0.3.0",
|
||||
"@anthropic-ai/bedrock-sdk": "0.26.4",
|
||||
"@anthropic-ai/foundry-sdk": "0.2.3",
|
||||
"@anthropic-ai/sandbox-runtime": "0.0.46",
|
||||
"@anthropic-ai/sdk": "0.81.0",
|
||||
"@anthropic-ai/vertex-sdk": "0.14.4",
|
||||
"@commander-js/extra-typings": "12.1.0",
|
||||
"@growthbook/growthbook": "1.6.5",
|
||||
"@modelcontextprotocol/sdk": "1.29.0",
|
||||
"@opentelemetry/api": "1.9.1",
|
||||
"@opentelemetry/api-logs": "0.214.0",
|
||||
"@opentelemetry/core": "2.6.1",
|
||||
"@opentelemetry/exporter-logs-otlp-http": "0.214.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "0.57.2",
|
||||
"@opentelemetry/resources": "2.6.1",
|
||||
"@opentelemetry/sdk-logs": "0.214.0",
|
||||
"@opentelemetry/sdk-metrics": "2.6.1",
|
||||
"@opentelemetry/sdk-trace-base": "2.6.1",
|
||||
"@opentelemetry/sdk-trace-node": "2.6.1",
|
||||
"@opentelemetry/semantic-conventions": "1.40.0",
|
||||
"ajv": "8.18.0",
|
||||
"auto-bind": "5.0.1",
|
||||
"axios": "1.14.0",
|
||||
"bidi-js": "1.0.3",
|
||||
"chalk": "5.6.2",
|
||||
"chokidar": "4.0.3",
|
||||
"cli-boxes": "3.0.0",
|
||||
"cli-highlight": "2.1.11",
|
||||
"code-excerpt": "4.0.0",
|
||||
"commander": "12.1.0",
|
||||
"diff": "7.0.0",
|
||||
"emoji-regex": "10.6.0",
|
||||
"env-paths": "3.0.0",
|
||||
"execa": "9.6.1",
|
||||
"fflate": "0.8.2",
|
||||
"figures": "6.1.0",
|
||||
"fuse.js": "7.1.0",
|
||||
"get-east-asian-width": "1.5.0",
|
||||
"google-auth-library": "9.15.1",
|
||||
"https-proxy-agent": "7.0.6",
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.17.23",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
"picomatch": "4.0.4",
|
||||
"proper-lockfile": "4.1.2",
|
||||
"qrcode": "1.5.4",
|
||||
"react": "19.2.4",
|
||||
"react-compiler-runtime": "1.0.0",
|
||||
"react-reconciler": "0.33.0",
|
||||
"semver": "7.7.4",
|
||||
"shell-quote": "1.8.3",
|
||||
"signal-exit": "4.1.0",
|
||||
"stack-utils": "2.0.6",
|
||||
"strip-ansi": "7.2.0",
|
||||
"supports-hyperlinks": "3.2.0",
|
||||
"tree-kill": "1.2.2",
|
||||
"turndown": "7.2.2",
|
||||
"type-fest": "4.41.0",
|
||||
"undici": "7.24.6",
|
||||
"usehooks-ts": "3.1.1",
|
||||
"vscode-languageserver-protocol": "3.17.5",
|
||||
"wrap-ansi": "9.0.2",
|
||||
"ws": "8.20.0",
|
||||
"xss": "1.0.15",
|
||||
"yaml": "2.8.3",
|
||||
"zod": "3.25.76"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.2.0",
|
||||
"@types/node": "^25.5.0",
|
||||
"@types/react": "^19.2.14",
|
||||
"typescript": "^5.7.0"
|
||||
"@types/bun": "1.3.11",
|
||||
"@types/node": "25.5.0",
|
||||
"@types/react": "19.2.14",
|
||||
"typescript": "5.9.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
import { readFileSync } from 'fs'
|
||||
import { noTelemetryPlugin } from './no-telemetry-plugin'
|
||||
|
||||
const pkg = JSON.parse(readFileSync('./package.json', 'utf-8'))
|
||||
const version = pkg.version
|
||||
@@ -64,6 +65,7 @@ const result = await Bun.build({
|
||||
'MACRO.NATIVE_PACKAGE_URL': 'undefined',
|
||||
},
|
||||
plugins: [
|
||||
noTelemetryPlugin,
|
||||
{
|
||||
name: 'bun-bundle-shim',
|
||||
setup(build) {
|
||||
|
||||
221
scripts/no-telemetry-plugin.ts
Normal file
221
scripts/no-telemetry-plugin.ts
Normal file
@@ -0,0 +1,221 @@
|
||||
/**
|
||||
* No-Telemetry Build Plugin for OpenClaude
|
||||
*
|
||||
* Replaces all analytics, telemetry, and phone-home modules with no-op stubs
|
||||
* at compile time. Zero runtime cost, zero network calls to Anthropic.
|
||||
*
|
||||
* This file is NOT tracked upstream — merge conflicts are impossible.
|
||||
* Only build.ts needs a one-line import + one-line array entry.
|
||||
*
|
||||
* Kills:
|
||||
* - GrowthBook remote feature flags (api.anthropic.com)
|
||||
* - Datadog event intake
|
||||
* - 1P event logging (api.anthropic.com/api/event_logging/batch)
|
||||
* - BigQuery metrics exporter (api.anthropic.com/api/claude_code/metrics)
|
||||
* - Perfetto / OpenTelemetry session tracing
|
||||
* - Auto-updater (storage.googleapis.com, npm registry)
|
||||
* - Plugin fetch telemetry
|
||||
* - Transcript / feedback sharing
|
||||
*/
|
||||
|
||||
import type { BunPlugin } from 'bun'
|
||||
|
||||
// Module path (relative to src/, without extension) → stub source
|
||||
const stubs: Record<string, string> = {
|
||||
|
||||
// ─── Analytics core ─────────────────────────────────────────────
|
||||
|
||||
'services/analytics/index': `
|
||||
export function stripProtoFields(metadata) { return metadata; }
|
||||
export function attachAnalyticsSink() {}
|
||||
export function logEvent() {}
|
||||
export async function logEventAsync() {}
|
||||
export function _resetForTesting() {}
|
||||
`,
|
||||
|
||||
'services/analytics/growthbook': `
|
||||
const noop = () => {};
|
||||
export function onGrowthBookRefresh() { return noop; }
|
||||
export function hasGrowthBookEnvOverride() { return false; }
|
||||
export function getAllGrowthBookFeatures() { return {}; }
|
||||
export function getGrowthBookConfigOverrides() { return {}; }
|
||||
export function setGrowthBookConfigOverride() {}
|
||||
export function clearGrowthBookConfigOverrides() {}
|
||||
export function getApiBaseUrlHost() { return undefined; }
|
||||
export const initializeGrowthBook = async () => null;
|
||||
export async function getFeatureValue_DEPRECATED(feature, defaultValue) { return defaultValue; }
|
||||
export function getFeatureValue_CACHED_MAY_BE_STALE(feature, defaultValue) { return defaultValue; }
|
||||
export function getFeatureValue_CACHED_WITH_REFRESH(feature, defaultValue) { return defaultValue; }
|
||||
export function checkStatsigFeatureGate_CACHED_MAY_BE_STALE() { return false; }
|
||||
export async function checkSecurityRestrictionGate() { return false; }
|
||||
export async function checkGate_CACHED_OR_BLOCKING() { return false; }
|
||||
export function refreshGrowthBookAfterAuthChange() {}
|
||||
export function resetGrowthBook() {}
|
||||
export async function refreshGrowthBookFeatures() {}
|
||||
export function setupPeriodicGrowthBookRefresh() {}
|
||||
export function stopPeriodicGrowthBookRefresh() {}
|
||||
export async function getDynamicConfig_BLOCKS_ON_INIT(configName, defaultValue) { return defaultValue; }
|
||||
export function getDynamicConfig_CACHED_MAY_BE_STALE(configName, defaultValue) { return defaultValue; }
|
||||
`,
|
||||
|
||||
'services/analytics/sink': `
|
||||
export function initializeAnalyticsGates() {}
|
||||
export function initializeAnalyticsSink() {}
|
||||
`,
|
||||
|
||||
'services/analytics/config': `
|
||||
export function isAnalyticsDisabled() { return true; }
|
||||
export function isFeedbackSurveyDisabled() { return true; }
|
||||
`,
|
||||
|
||||
'services/analytics/datadog': `
|
||||
export const initializeDatadog = async () => false;
|
||||
export async function shutdownDatadog() {}
|
||||
export async function trackDatadogEvent() {}
|
||||
`,
|
||||
|
||||
'services/analytics/firstPartyEventLogger': `
|
||||
export function getEventSamplingConfig() { return {}; }
|
||||
export function shouldSampleEvent() { return null; }
|
||||
export async function shutdown1PEventLogging() {}
|
||||
export function is1PEventLoggingEnabled() { return false; }
|
||||
export function logEventTo1P() {}
|
||||
export function logGrowthBookExperimentTo1P() {}
|
||||
export function initialize1PEventLogging() {}
|
||||
export async function reinitialize1PEventLoggingIfConfigChanged() {}
|
||||
`,
|
||||
|
||||
'services/analytics/firstPartyEventLoggingExporter': `
|
||||
export class FirstPartyEventLoggingExporter {
|
||||
constructor() {}
|
||||
async export(logs, resultCallback) { resultCallback({ code: 0 }); }
|
||||
async getQueuedEventCount() { return 0; }
|
||||
async shutdown() {}
|
||||
async forceFlush() {}
|
||||
}
|
||||
`,
|
||||
|
||||
'services/analytics/metadata': `
|
||||
export function sanitizeToolNameForAnalytics(toolName) { return toolName; }
|
||||
export function isToolDetailsLoggingEnabled() { return false; }
|
||||
export function isAnalyticsToolDetailsLoggingEnabled() { return false; }
|
||||
export function mcpToolDetailsForAnalytics() { return {}; }
|
||||
export function extractMcpToolDetails() { return undefined; }
|
||||
export function extractSkillName() { return undefined; }
|
||||
export function extractToolInputForTelemetry() { return undefined; }
|
||||
export function getFileExtensionForAnalytics() { return undefined; }
|
||||
export function getFileExtensionsFromBashCommand() { return undefined; }
|
||||
export async function getEventMetadata() { return {}; }
|
||||
export function to1PEventFormat() { return {}; }
|
||||
`,
|
||||
|
||||
// ─── Telemetry subsystems ───────────────────────────────────────
|
||||
|
||||
'utils/telemetry/bigqueryExporter': `
|
||||
export class BigQueryMetricsExporter {
|
||||
constructor() {}
|
||||
async export(metrics, resultCallback) { resultCallback({ code: 0 }); }
|
||||
async shutdown() {}
|
||||
async forceFlush() {}
|
||||
selectAggregationTemporality() { return 0; }
|
||||
}
|
||||
`,
|
||||
|
||||
'utils/telemetry/perfettoTracing': `
|
||||
export function initializePerfettoTracing() {}
|
||||
export function isPerfettoTracingEnabled() { return false; }
|
||||
export function registerAgent() {}
|
||||
export function unregisterAgent() {}
|
||||
export function startLLMRequestPerfettoSpan() { return ''; }
|
||||
export function endLLMRequestPerfettoSpan() {}
|
||||
export function startToolPerfettoSpan() { return ''; }
|
||||
export function endToolPerfettoSpan() {}
|
||||
export function startUserInputPerfettoSpan() { return ''; }
|
||||
export function endUserInputPerfettoSpan() {}
|
||||
export function emitPerfettoInstant() {}
|
||||
export function emitPerfettoCounter() {}
|
||||
export function startInteractionPerfettoSpan() { return ''; }
|
||||
export function endInteractionPerfettoSpan() {}
|
||||
export function getPerfettoEvents() { return []; }
|
||||
export function resetPerfettoTracer() {}
|
||||
export async function triggerPeriodicWriteForTesting() {}
|
||||
export function evictStaleSpansForTesting() {}
|
||||
export const MAX_EVENTS_FOR_TESTING = 0;
|
||||
export function evictOldestEventsForTesting() {}
|
||||
`,
|
||||
|
||||
'utils/telemetry/sessionTracing': `
|
||||
const noopSpan = {
|
||||
end() {}, setAttribute() {}, setStatus() {},
|
||||
recordException() {}, addEvent() {}, isRecording() { return false; },
|
||||
};
|
||||
export function isBetaTracingEnabled() { return false; }
|
||||
export function isEnhancedTelemetryEnabled() { return false; }
|
||||
export function startInteractionSpan() { return noopSpan; }
|
||||
export function endInteractionSpan() {}
|
||||
export function startLLMRequestSpan() { return noopSpan; }
|
||||
export function endLLMRequestSpan() {}
|
||||
export function startToolSpan() { return noopSpan; }
|
||||
export function startToolBlockedOnUserSpan() { return noopSpan; }
|
||||
export function endToolBlockedOnUserSpan() {}
|
||||
export function startToolExecutionSpan() { return noopSpan; }
|
||||
export function endToolExecutionSpan() {}
|
||||
export function endToolSpan() {}
|
||||
export function addToolContentEvent() {}
|
||||
export function getCurrentSpan() { return null; }
|
||||
export async function executeInSpan(spanName, fn) { return fn(noopSpan); }
|
||||
export function startHookSpan() { return noopSpan; }
|
||||
export function endHookSpan() {}
|
||||
`,
|
||||
|
||||
// ─── Auto-updater (phones home to GCS + npm) ──────────────────
|
||||
|
||||
'utils/autoUpdater': `
|
||||
export async function assertMinVersion() {}
|
||||
export async function getMaxVersion() { return undefined; }
|
||||
export async function getMaxVersionMessage() { return undefined; }
|
||||
export function shouldSkipVersion() { return true; }
|
||||
export function getLockFilePath() { return '/tmp/openclaude-update.lock'; }
|
||||
export async function checkGlobalInstallPermissions() { return { hasPermissions: false, npmPrefix: null }; }
|
||||
export async function getLatestVersion() { return null; }
|
||||
export async function getNpmDistTags() { return { latest: null, stable: null }; }
|
||||
export async function getLatestVersionFromGcs() { return null; }
|
||||
export async function getGcsDistTags() { return { latest: null, stable: null }; }
|
||||
export async function getVersionHistory() { return []; }
|
||||
export async function installGlobalPackage() { return 'success'; }
|
||||
`,
|
||||
|
||||
// ─── Plugin fetch telemetry (not the marketplace itself) ───────
|
||||
|
||||
'utils/plugins/fetchTelemetry': `
|
||||
export function logPluginFetch() {}
|
||||
export function classifyFetchError() { return 'disabled'; }
|
||||
`,
|
||||
|
||||
// ─── Transcript / feedback sharing ─────────────────────────────
|
||||
|
||||
'components/FeedbackSurvey/submitTranscriptShare': `
|
||||
export async function submitTranscriptShare() { return { success: false }; }
|
||||
`,
|
||||
}
|
||||
|
||||
export const noTelemetryPlugin: BunPlugin = {
|
||||
name: 'no-telemetry',
|
||||
setup(build) {
|
||||
for (const [modulePath, contents] of Object.entries(stubs)) {
|
||||
// Build regex that matches the resolved file path on any OS
|
||||
// e.g. "services/analytics/growthbook" → /services[/\\]analytics[/\\]growthbook\.(ts|js)$/
|
||||
const escaped = modulePath
|
||||
.replace(/\//g, '[/\\\\]')
|
||||
.replace(/\./g, '\\.')
|
||||
const filter = new RegExp(`${escaped}\\.(ts|js)$`)
|
||||
|
||||
build.onLoad({ filter }, () => ({
|
||||
contents,
|
||||
loader: 'js',
|
||||
}))
|
||||
}
|
||||
|
||||
console.log(` 🔇 no-telemetry: stubbed ${Object.keys(stubs).length} modules`)
|
||||
},
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
recommendOllamaModel,
|
||||
} from '../src/utils/providerRecommendation.ts'
|
||||
import {
|
||||
buildAtomicChatProfileEnv,
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
@@ -20,8 +21,11 @@ import {
|
||||
type ProviderProfile,
|
||||
} from '../src/utils/providerProfile.ts'
|
||||
import {
|
||||
getAtomicChatChatBaseUrl,
|
||||
getOllamaChatBaseUrl,
|
||||
hasLocalAtomicChat,
|
||||
hasLocalOllama,
|
||||
listAtomicChatModels,
|
||||
listOllamaModels,
|
||||
} from './provider-discovery.ts'
|
||||
|
||||
@@ -34,7 +38,7 @@ function parseArg(name: string): string | null {
|
||||
|
||||
function parseProviderArg(): ProviderProfile | 'auto' {
|
||||
const p = parseArg('--provider')?.toLowerCase()
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini') return p
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p
|
||||
return 'auto'
|
||||
}
|
||||
|
||||
@@ -102,6 +106,21 @@ async function main(): Promise<void> {
|
||||
getOllamaChatBaseUrl,
|
||||
},
|
||||
)
|
||||
} else if (selected === 'atomic-chat') {
|
||||
const model = argModel || (await listAtomicChatModels(argBaseUrl || undefined))[0]
|
||||
if (!model) {
|
||||
if (!(await hasLocalAtomicChat(argBaseUrl || undefined))) {
|
||||
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
|
||||
} else {
|
||||
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
|
||||
}
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = buildAtomicChatProfileEnv(model, {
|
||||
baseUrl: argBaseUrl,
|
||||
getAtomicChatChatBaseUrl,
|
||||
})
|
||||
} else if (selected === 'codex') {
|
||||
const builtEnv = buildCodexProfileEnv({
|
||||
model: argModel,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts'
|
||||
|
||||
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
|
||||
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
|
||||
|
||||
function withTimeoutSignal(timeoutMs: number): {
|
||||
signal: AbortSignal
|
||||
@@ -93,6 +94,69 @@ export async function listOllamaModels(
|
||||
}
|
||||
}
|
||||
|
||||
// ── Atomic Chat discovery (Apple Silicon local LLMs at 127.0.0.1:1337) ──────
|
||||
|
||||
export function getAtomicChatApiBaseUrl(baseUrl?: string): string {
|
||||
const parsed = new URL(
|
||||
baseUrl || process.env.ATOMIC_CHAT_BASE_URL || DEFAULT_ATOMIC_CHAT_BASE_URL,
|
||||
)
|
||||
const pathname = trimTrailingSlash(parsed.pathname)
|
||||
parsed.pathname = pathname.endsWith('/v1')
|
||||
? pathname.slice(0, -3) || '/'
|
||||
: pathname || '/'
|
||||
parsed.search = ''
|
||||
parsed.hash = ''
|
||||
return trimTrailingSlash(parsed.toString())
|
||||
}
|
||||
|
||||
export function getAtomicChatChatBaseUrl(baseUrl?: string): string {
|
||||
return `${getAtomicChatApiBaseUrl(baseUrl)}/v1`
|
||||
}
|
||||
|
||||
export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> {
|
||||
const { signal, clear } = withTimeoutSignal(1200)
|
||||
try {
|
||||
const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
return response.ok
|
||||
} catch {
|
||||
return false
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
export async function listAtomicChatModels(
|
||||
baseUrl?: string,
|
||||
): Promise<string[]> {
|
||||
const { signal, clear } = withTimeoutSignal(5000)
|
||||
try {
|
||||
const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
if (!response.ok) {
|
||||
return []
|
||||
}
|
||||
|
||||
const data = await response.json() as {
|
||||
data?: Array<{ id?: string }>
|
||||
}
|
||||
|
||||
return (data.data ?? [])
|
||||
.filter(model => Boolean(model.id))
|
||||
.map(model => model.id!)
|
||||
} catch {
|
||||
return []
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Ollama benchmarking ─────────────────────────────────────────────────────
|
||||
|
||||
export async function benchmarkOllamaModel(
|
||||
modelName: string,
|
||||
baseUrl?: string,
|
||||
|
||||
@@ -16,8 +16,11 @@ import {
|
||||
type ProviderProfile,
|
||||
} from '../src/utils/providerProfile.ts'
|
||||
import {
|
||||
getAtomicChatChatBaseUrl,
|
||||
getOllamaChatBaseUrl,
|
||||
hasLocalAtomicChat,
|
||||
hasLocalOllama,
|
||||
listAtomicChatModels,
|
||||
listOllamaModels,
|
||||
} from './provider-discovery.ts'
|
||||
|
||||
@@ -48,7 +51,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
|
||||
continue
|
||||
}
|
||||
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') {
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
requestedProfile = lower as ProviderProfile | 'auto'
|
||||
continue
|
||||
}
|
||||
@@ -79,7 +82,7 @@ function loadPersistedProfile(): ProfileFile | null {
|
||||
if (!existsSync(path)) return null
|
||||
try {
|
||||
const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile
|
||||
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') {
|
||||
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini' || parsed.profile === 'atomic-chat') {
|
||||
return parsed
|
||||
}
|
||||
return null
|
||||
@@ -96,6 +99,11 @@ async function resolveOllamaDefaultModel(
|
||||
return recommended?.name ?? null
|
||||
}
|
||||
|
||||
async function resolveAtomicChatDefaultModel(): Promise<string | null> {
|
||||
const models = await listAtomicChatModels()
|
||||
return models[0] ?? null
|
||||
}
|
||||
|
||||
function runCommand(command: string, env: NodeJS.ProcessEnv): Promise<number> {
|
||||
return runProcess(command, [], env)
|
||||
}
|
||||
@@ -132,6 +140,10 @@ function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`)
|
||||
} else if (profile === 'atomic-chat') {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
console.log('OPENAI_API_KEY_SET=false (local provider, no key required)')
|
||||
} else {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
@@ -143,7 +155,7 @@ async function main(): Promise<void> {
|
||||
const options = parseLaunchOptions(process.argv.slice(2))
|
||||
const requestedProfile = options.requestedProfile
|
||||
if (!requestedProfile) {
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
@@ -175,12 +187,30 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
let resolvedAtomicChatModel: string | null = null
|
||||
if (
|
||||
profile === 'atomic-chat' &&
|
||||
(persisted?.profile !== 'atomic-chat' || !persisted?.env?.OPENAI_MODEL)
|
||||
) {
|
||||
if (!(await hasLocalAtomicChat())) {
|
||||
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
|
||||
process.exit(1)
|
||||
}
|
||||
resolvedAtomicChatModel = await resolveAtomicChatDefaultModel()
|
||||
if (!resolvedAtomicChatModel) {
|
||||
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
const env = await buildLaunchEnv({
|
||||
profile,
|
||||
persisted,
|
||||
goal: options.goal,
|
||||
getOllamaChatBaseUrl,
|
||||
resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b',
|
||||
getAtomicChatChatBaseUrl,
|
||||
resolveAtomicChatDefaultModel: async () => resolvedAtomicChatModel,
|
||||
})
|
||||
if (options.fast) {
|
||||
applyFastFlags(env)
|
||||
|
||||
@@ -93,11 +93,15 @@ function isLocalBaseUrl(baseUrl: string): boolean {
|
||||
}
|
||||
|
||||
const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
|
||||
|
||||
function currentBaseUrl(): string {
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
|
||||
}
|
||||
return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
|
||||
}
|
||||
|
||||
@@ -126,15 +130,47 @@ function checkGeminiEnv(): CheckResult[] {
|
||||
return results
|
||||
}
|
||||
|
||||
function checkGithubEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
|
||||
results.push(pass('Provider mode', 'GitHub Models provider enabled.'))
|
||||
|
||||
const token = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
|
||||
if (!token?.trim()) {
|
||||
results.push(fail('GITHUB_TOKEN', 'Missing. Set GITHUB_TOKEN or GH_TOKEN.'))
|
||||
} else {
|
||||
results.push(pass('GITHUB_TOKEN', 'Configured.'))
|
||||
}
|
||||
|
||||
if (!process.env.OPENAI_MODEL) {
|
||||
results.push(
|
||||
pass(
|
||||
'OPENAI_MODEL',
|
||||
'Not set. Default github:copilot → openai/gpt-4.1 at runtime.',
|
||||
),
|
||||
)
|
||||
} else {
|
||||
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
|
||||
}
|
||||
|
||||
results.push(pass('OPENAI_BASE_URL', baseUrl))
|
||||
return results
|
||||
}
|
||||
|
||||
function checkOpenAIEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
|
||||
if (useGemini) {
|
||||
return checkGeminiEnv()
|
||||
}
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
return checkGithubEnv()
|
||||
}
|
||||
|
||||
if (!useOpenAI) {
|
||||
results.push(pass('Provider mode', 'Anthropic login flow enabled (CLAUDE_CODE_USE_OPENAI is off).'))
|
||||
return results
|
||||
@@ -181,12 +217,21 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
}
|
||||
|
||||
const key = process.env.OPENAI_API_KEY
|
||||
const githubToken = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
|
||||
if (key === 'SUA_CHAVE') {
|
||||
results.push(fail('OPENAI_API_KEY', 'Placeholder value detected: SUA_CHAVE.'))
|
||||
} else if (!key && !isLocalBaseUrl(request.baseUrl)) {
|
||||
} else if (
|
||||
!key &&
|
||||
!isLocalBaseUrl(request.baseUrl) &&
|
||||
!(useGithub && githubToken?.trim())
|
||||
) {
|
||||
results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.'))
|
||||
} else if (!key && useGithub && githubToken?.trim()) {
|
||||
results.push(
|
||||
pass('OPENAI_API_KEY', 'Not set; GITHUB_TOKEN/GH_TOKEN will be used for GitHub Models.'),
|
||||
)
|
||||
} else if (!key) {
|
||||
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).'))
|
||||
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Atomic Chat/Ollama/LM Studio).'))
|
||||
} else {
|
||||
results.push(pass('OPENAI_API_KEY', 'Configured.'))
|
||||
}
|
||||
@@ -197,11 +242,19 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
|
||||
if (!useGemini && !useOpenAI) {
|
||||
if (!useGemini && !useOpenAI && !useGithub) {
|
||||
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
if (useGithub) {
|
||||
return pass(
|
||||
'Provider reachability',
|
||||
'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).',
|
||||
)
|
||||
}
|
||||
|
||||
const geminiBaseUrl = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const resolvedBaseUrl = useGemini
|
||||
? (process.env.GEMINI_BASE_URL ?? geminiBaseUrl)
|
||||
@@ -271,8 +324,21 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
}
|
||||
}
|
||||
|
||||
function isAtomicChatUrl(baseUrl: string): boolean {
|
||||
try {
|
||||
const parsed = new URL(baseUrl)
|
||||
return parsed.port === '1337' && isLocalBaseUrl(baseUrl)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
function checkOllamaProcessorMode(): CheckResult {
|
||||
if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
if (
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
) {
|
||||
return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
@@ -281,6 +347,10 @@ function checkOllamaProcessorMode(): CheckResult {
|
||||
return pass('Ollama processor mode', 'Skipped (provider URL is not local).')
|
||||
}
|
||||
|
||||
if (isAtomicChatUrl(baseUrl)) {
|
||||
return pass('Ollama processor mode', 'Skipped (Atomic Chat local provider detected, not Ollama).')
|
||||
}
|
||||
|
||||
const result = spawnSync('ollama', ['ps'], {
|
||||
cwd: process.cwd(),
|
||||
encoding: 'utf8',
|
||||
@@ -289,7 +359,7 @@ function checkOllamaProcessorMode(): CheckResult {
|
||||
|
||||
if (result.status !== 0) {
|
||||
const detail = (result.stderr || result.stdout || 'Unable to run ollama ps').trim()
|
||||
return fail('Ollama processor mode', detail)
|
||||
return pass('Ollama processor mode', `Native CLI check failed (${detail}). Assuming valid Docker/remote backend since HTTP ping passed.`)
|
||||
}
|
||||
|
||||
const output = (result.stdout || '').trim()
|
||||
@@ -319,6 +389,22 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
|
||||
GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY),
|
||||
}
|
||||
}
|
||||
if (
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
) {
|
||||
return {
|
||||
CLAUDE_CODE_USE_GITHUB: true,
|
||||
OPENAI_MODEL:
|
||||
process.env.OPENAI_MODEL ??
|
||||
'(unset, default: github:copilot → openai/gpt-4.1)',
|
||||
OPENAI_BASE_URL:
|
||||
process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE,
|
||||
GITHUB_TOKEN_SET: Boolean(
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN,
|
||||
),
|
||||
}
|
||||
}
|
||||
const request = resolveProviderRequest({
|
||||
model: process.env.OPENAI_MODEL,
|
||||
baseUrl: process.env.OPENAI_BASE_URL,
|
||||
@@ -374,6 +460,13 @@ async function main(): Promise<void> {
|
||||
const options = parseOptions(process.argv.slice(2))
|
||||
const results: CheckResult[] = []
|
||||
|
||||
const { enableConfigs } = await import('../src/utils/config.js')
|
||||
enableConfigs()
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../src/utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../src/utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
|
||||
results.push(checkNodeVersion())
|
||||
results.push(checkBunRuntime())
|
||||
results.push(checkBuildArtifacts())
|
||||
|
||||
@@ -57,8 +57,8 @@ class Provider:
|
||||
@property
|
||||
def is_configured(self) -> bool:
|
||||
"""True if the provider has an API key set."""
|
||||
if self.name == "ollama":
|
||||
return True # Ollama needs no API key
|
||||
if self.name in ("ollama", "atomic-chat"):
|
||||
return True # Local providers need no API key
|
||||
return bool(self.api_key)
|
||||
|
||||
@property
|
||||
@@ -93,6 +93,7 @@ def build_default_providers() -> list[Provider]:
|
||||
big = os.getenv("BIG_MODEL", "gpt-4.1")
|
||||
small = os.getenv("SMALL_MODEL", "gpt-4.1-mini")
|
||||
ollama_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||
atomic_chat_url = os.getenv("ATOMIC_CHAT_BASE_URL", "http://127.0.0.1:1337")
|
||||
|
||||
return [
|
||||
Provider(
|
||||
@@ -119,6 +120,14 @@ def build_default_providers() -> list[Provider]:
|
||||
big_model=big if "gemini" not in big and "gpt" not in big else "llama3:8b",
|
||||
small_model=small if "gemini" not in small and "gpt" not in small else "llama3:8b",
|
||||
),
|
||||
Provider(
|
||||
name="atomic-chat",
|
||||
ping_url=f"{atomic_chat_url}/v1/models",
|
||||
api_key_env="",
|
||||
cost_per_1k_tokens=0.0, # free — local (Apple Silicon)
|
||||
big_model=big if "gemini" not in big and "gpt" not in big else "llama3:8b",
|
||||
small_model=small if "gemini" not in small and "gpt" not in small else "llama3:8b",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -19,6 +19,7 @@ import cost from './commands/cost/index.js'
|
||||
import diff from './commands/diff/index.js'
|
||||
import ctx_viz from './commands/ctx_viz/index.js'
|
||||
import doctor from './commands/doctor/index.js'
|
||||
import onboardGithub from './commands/onboard-github/index.js'
|
||||
import memory from './commands/memory/index.js'
|
||||
import help from './commands/help/index.js'
|
||||
import ide from './commands/ide/index.js'
|
||||
@@ -288,6 +289,7 @@ const COMMANDS = memoize((): Command[] => [
|
||||
memory,
|
||||
mobile,
|
||||
model,
|
||||
onboardGithub,
|
||||
outputStyle,
|
||||
remoteEnv,
|
||||
plugin,
|
||||
|
||||
19
src/commands/mcp/doctorCommand.test.ts
Normal file
19
src/commands/mcp/doctorCommand.test.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import assert from 'node:assert/strict'
|
||||
import test from 'node:test'
|
||||
|
||||
import { Command } from '@commander-js/extra-typings'
|
||||
|
||||
import { registerMcpDoctorCommand } from './doctorCommand.js'
|
||||
|
||||
test('registerMcpDoctorCommand adds the doctor subcommand with expected options', () => {
|
||||
const mcp = new Command('mcp')
|
||||
|
||||
registerMcpDoctorCommand(mcp)
|
||||
|
||||
const doctor = mcp.commands.find(command => command.name() === 'doctor')
|
||||
assert.ok(doctor)
|
||||
assert.equal(doctor?.usage(), '[options] [name]')
|
||||
|
||||
const optionFlags = doctor?.options.map(option => option.long)
|
||||
assert.deepEqual(optionFlags, ['--scope', '--config-only', '--json'])
|
||||
})
|
||||
25
src/commands/mcp/doctorCommand.ts
Normal file
25
src/commands/mcp/doctorCommand.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
/**
|
||||
* MCP doctor CLI subcommand.
|
||||
*/
|
||||
import { type Command } from '@commander-js/extra-typings'
|
||||
|
||||
export function registerMcpDoctorCommand(mcp: Command): void {
|
||||
mcp
|
||||
.command('doctor [name]')
|
||||
.description(
|
||||
'Diagnose MCP configuration, precedence, disabled/pending state, and connection health. ' +
|
||||
'Note: unless --config-only is used, stdio servers may be spawned and remote servers may be contacted. ' +
|
||||
'Only use this command in directories you trust.',
|
||||
)
|
||||
.option('-s, --scope <scope>', 'Restrict config analysis to a specific scope (local, project, user, or enterprise)')
|
||||
.option('--config-only', 'Skip live connection checks and only analyze configuration state')
|
||||
.option('--json', 'Output the diagnostics report as JSON')
|
||||
.action(async (name: string | undefined, options: {
|
||||
scope?: string
|
||||
configOnly?: boolean
|
||||
json?: boolean
|
||||
}) => {
|
||||
const { mcpDoctorHandler } = await import('../../cli/handlers/mcp.js')
|
||||
await mcpDoctorHandler(name, options)
|
||||
})
|
||||
}
|
||||
11
src/commands/onboard-github/index.ts
Normal file
11
src/commands/onboard-github/index.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
|
||||
const onboardGithub: Command = {
|
||||
name: 'onboard-github',
|
||||
description:
|
||||
'Interactive setup for GitHub Models: device login or PAT, saved to secure storage',
|
||||
type: 'local-jsx',
|
||||
load: () => import('./onboard-github.js'),
|
||||
}
|
||||
|
||||
export default onboardGithub
|
||||
237
src/commands/onboard-github/onboard-github.tsx
Normal file
237
src/commands/onboard-github/onboard-github.tsx
Normal file
@@ -0,0 +1,237 @@
|
||||
import * as React from 'react'
|
||||
import { useCallback, useState } from 'react'
|
||||
import { Select } from '../../components/CustomSelect/select.js'
|
||||
import { Spinner } from '../../components/Spinner.js'
|
||||
import TextInput from '../../components/TextInput.js'
|
||||
import { Box, Text } from '../../ink.js'
|
||||
import {
|
||||
openVerificationUri,
|
||||
pollAccessToken,
|
||||
requestDeviceCode,
|
||||
} from '../../services/github/deviceFlow.js'
|
||||
import type { LocalJSXCommandCall } from '../../types/command.js'
|
||||
import {
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
saveGithubModelsToken,
|
||||
} from '../../utils/githubModelsCredentials.js'
|
||||
import { updateSettingsForSource } from '../../utils/settings/settings.js'
|
||||
|
||||
const DEFAULT_MODEL = 'github:copilot'
|
||||
|
||||
type Step =
|
||||
| 'menu'
|
||||
| 'device-busy'
|
||||
| 'pat'
|
||||
| 'error'
|
||||
|
||||
function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } {
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: model,
|
||||
CLAUDE_CODE_USE_OPENAI: undefined as any,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined as any,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined as any,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined as any,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
|
||||
},
|
||||
})
|
||||
if (error) {
|
||||
return { ok: false, detail: error.message }
|
||||
}
|
||||
return { ok: true }
|
||||
}
|
||||
|
||||
function OnboardGithub(props: {
|
||||
onDone: Parameters<LocalJSXCommandCall>[0]
|
||||
onChangeAPIKey: () => void
|
||||
}): React.ReactNode {
|
||||
const { onDone, onChangeAPIKey } = props
|
||||
const [step, setStep] = useState<Step>('menu')
|
||||
const [errorMsg, setErrorMsg] = useState<string | null>(null)
|
||||
const [deviceHint, setDeviceHint] = useState<{
|
||||
user_code: string
|
||||
verification_uri: string
|
||||
} | null>(null)
|
||||
const [patDraft, setPatDraft] = useState('')
|
||||
const [cursorOffset, setCursorOffset] = useState(0)
|
||||
|
||||
const finalize = useCallback(
|
||||
async (token: string, model: string = DEFAULT_MODEL) => {
|
||||
const saved = saveGithubModelsToken(token)
|
||||
if (!saved.success) {
|
||||
setErrorMsg(saved.warning ?? 'Could not save token to secure storage.')
|
||||
setStep('error')
|
||||
return
|
||||
}
|
||||
const merged = mergeUserSettingsEnv(model.trim() || DEFAULT_MODEL)
|
||||
if (!merged.ok) {
|
||||
setErrorMsg(
|
||||
`Token saved, but settings were not updated: ${merged.detail ?? 'unknown error'}. ` +
|
||||
`Add env CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL to ~/.claude/settings.json manually.`,
|
||||
)
|
||||
setStep('error')
|
||||
return
|
||||
}
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = model.trim() || DEFAULT_MODEL
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
onChangeAPIKey()
|
||||
onDone(
|
||||
'GitHub Models onboard complete. Token stored in secure storage; user settings updated. Restart if the model does not switch.',
|
||||
{ display: 'user' },
|
||||
)
|
||||
},
|
||||
[onChangeAPIKey, onDone],
|
||||
)
|
||||
|
||||
const runDeviceFlow = useCallback(async () => {
|
||||
setStep('device-busy')
|
||||
setErrorMsg(null)
|
||||
setDeviceHint(null)
|
||||
try {
|
||||
const device = await requestDeviceCode()
|
||||
setDeviceHint({
|
||||
user_code: device.user_code,
|
||||
verification_uri: device.verification_uri,
|
||||
})
|
||||
await openVerificationUri(device.verification_uri)
|
||||
const token = await pollAccessToken(device.device_code, {
|
||||
initialInterval: device.interval,
|
||||
timeoutSeconds: device.expires_in,
|
||||
})
|
||||
await finalize(token, DEFAULT_MODEL)
|
||||
} catch (e) {
|
||||
setErrorMsg(e instanceof Error ? e.message : String(e))
|
||||
setStep('error')
|
||||
}
|
||||
}, [finalize])
|
||||
|
||||
if (step === 'error' && errorMsg) {
|
||||
const options = [
|
||||
{
|
||||
label: 'Back to menu',
|
||||
value: 'back' as const,
|
||||
},
|
||||
{
|
||||
label: 'Exit',
|
||||
value: 'exit' as const,
|
||||
},
|
||||
]
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="red">{errorMsg}</Text>
|
||||
<Select
|
||||
options={options}
|
||||
onChange={(v: string) => {
|
||||
if (v === 'back') {
|
||||
setStep('menu')
|
||||
setErrorMsg(null)
|
||||
} else {
|
||||
onDone('GitHub onboard cancelled', { display: 'system' })
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
if (step === 'device-busy') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>GitHub device login</Text>
|
||||
{deviceHint ? (
|
||||
<>
|
||||
<Text>
|
||||
Enter code <Text bold>{deviceHint.user_code}</Text> at{' '}
|
||||
{deviceHint.verification_uri}
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
A browser window may have opened. Waiting for authorization…
|
||||
</Text>
|
||||
</>
|
||||
) : (
|
||||
<Text dimColor>Requesting device code from GitHub…</Text>
|
||||
)}
|
||||
<Spinner />
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
if (step === 'pat') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>Paste a GitHub personal access token with access to GitHub Models.</Text>
|
||||
<Text dimColor>Input is masked. Enter to submit; Esc to go back.</Text>
|
||||
<TextInput
|
||||
value={patDraft}
|
||||
mask="*"
|
||||
onChange={setPatDraft}
|
||||
onSubmit={async (value: string) => {
|
||||
const t = value.trim()
|
||||
if (!t) {
|
||||
return
|
||||
}
|
||||
await finalize(t, DEFAULT_MODEL)
|
||||
}}
|
||||
onExit={() => {
|
||||
setStep('menu')
|
||||
setPatDraft('')
|
||||
}}
|
||||
columns={80}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={setCursorOffset}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
const menuOptions = [
|
||||
{
|
||||
label: 'Sign in with browser (device code)',
|
||||
value: 'device' as const,
|
||||
},
|
||||
{
|
||||
label: 'Paste personal access token',
|
||||
value: 'pat' as const,
|
||||
},
|
||||
{
|
||||
label: 'Cancel',
|
||||
value: 'cancel' as const,
|
||||
},
|
||||
]
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text bold>GitHub Models setup</Text>
|
||||
<Text dimColor>
|
||||
Stores your token in the OS credential store (macOS Keychain when available)
|
||||
and enables CLAUDE_CODE_USE_GITHUB in your user settings — no export
|
||||
GITHUB_TOKEN needed for future runs.
|
||||
</Text>
|
||||
<Select
|
||||
options={menuOptions}
|
||||
onChange={(v: string) => {
|
||||
if (v === 'cancel') {
|
||||
onDone('GitHub onboard cancelled', { display: 'system' })
|
||||
return
|
||||
}
|
||||
if (v === 'pat') {
|
||||
setStep('pat')
|
||||
return
|
||||
}
|
||||
void runDeviceFlow()
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
export const call: LocalJSXCommandCall = async (onDone, context) => {
|
||||
return (
|
||||
<OnboardGithub
|
||||
onDone={onDone}
|
||||
onChangeAPIKey={context.onChangeAPIKey}
|
||||
/>
|
||||
)
|
||||
}
|
||||
@@ -1,50 +1,53 @@
|
||||
import { c as _c } from "react-compiler-runtime";
|
||||
import React from 'react';
|
||||
import { Box, Link, Text } from '../ink.js';
|
||||
import { Select } from './CustomSelect/index.js';
|
||||
import { Dialog } from './design-system/Dialog.js';
|
||||
import React from 'react'
|
||||
import { Box, Link, Text } from '../ink.js'
|
||||
import { Select } from './CustomSelect/index.js'
|
||||
import { Dialog } from './design-system/Dialog.js'
|
||||
import { getAPIProvider } from '../utils/model/providers.js'
|
||||
|
||||
type Props = {
|
||||
onDone: () => void;
|
||||
};
|
||||
export function CostThresholdDialog(t0) {
|
||||
const $ = _c(7);
|
||||
const {
|
||||
onDone
|
||||
} = t0;
|
||||
let t1;
|
||||
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t1 = <Box flexDirection="column"><Text>Learn more about how to monitor your spending:</Text><Link url="https://code.claude.com/docs/en/costs" /></Box>;
|
||||
$[0] = t1;
|
||||
} else {
|
||||
t1 = $[0];
|
||||
}
|
||||
let t2;
|
||||
if ($[1] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t2 = [{
|
||||
value: "ok",
|
||||
label: "Got it, thanks!"
|
||||
}];
|
||||
$[1] = t2;
|
||||
} else {
|
||||
t2 = $[1];
|
||||
}
|
||||
let t3;
|
||||
if ($[2] !== onDone) {
|
||||
t3 = <Select options={t2} onChange={onDone} />;
|
||||
$[2] = onDone;
|
||||
$[3] = t3;
|
||||
} else {
|
||||
t3 = $[3];
|
||||
}
|
||||
let t4;
|
||||
if ($[4] !== onDone || $[5] !== t3) {
|
||||
t4 = <Dialog title="You've spent $5 on the Anthropic API this session." onCancel={onDone}>{t1}{t3}</Dialog>;
|
||||
$[4] = onDone;
|
||||
$[5] = t3;
|
||||
$[6] = t4;
|
||||
} else {
|
||||
t4 = $[6];
|
||||
}
|
||||
return t4;
|
||||
onDone: () => void
|
||||
}
|
||||
|
||||
function getProviderLabel(): string {
|
||||
const provider = getAPIProvider()
|
||||
switch (provider) {
|
||||
case 'firstParty':
|
||||
return 'Anthropic API'
|
||||
case 'bedrock':
|
||||
return 'AWS Bedrock'
|
||||
case 'vertex':
|
||||
return 'Google Vertex'
|
||||
case 'foundry':
|
||||
return 'Azure Foundry'
|
||||
case 'openai':
|
||||
return 'OpenAI-compatible API'
|
||||
case 'gemini':
|
||||
return 'Gemini API'
|
||||
default:
|
||||
return 'API'
|
||||
}
|
||||
}
|
||||
|
||||
export function CostThresholdDialog({ onDone }: Props): React.ReactNode {
|
||||
const providerLabel = getProviderLabel()
|
||||
return (
|
||||
<Dialog
|
||||
title={`You've spent $5 on the ${providerLabel} this session.`}
|
||||
onCancel={onDone}
|
||||
>
|
||||
<Box flexDirection="column">
|
||||
<Text>Learn more about how to monitor your spending:</Text>
|
||||
<Link url="https://code.claude.com/docs/en/costs" />
|
||||
</Box>
|
||||
<Select
|
||||
options={[
|
||||
{
|
||||
value: 'ok',
|
||||
label: 'Got it, thanks!',
|
||||
},
|
||||
]}
|
||||
onChange={onDone}
|
||||
/>
|
||||
</Dialog>
|
||||
)
|
||||
}
|
||||
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJuYW1lcyI6WyJSZWFjdCIsIkJveCIsIkxpbmsiLCJUZXh0IiwiU2VsZWN0IiwiRGlhbG9nIiwiUHJvcHMiLCJvbkRvbmUiLCJDb3N0VGhyZXNob2xkRGlhbG9nIiwidDAiLCIkIiwiX2MiLCJ0MSIsIlN5bWJvbCIsImZvciIsInQyIiwidmFsdWUiLCJsYWJlbCIsInQzIiwidDQiXSwic291cmNlcyI6WyJDb3N0VGhyZXNob2xkRGlhbG9nLnRzeCJdLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgUmVhY3QgZnJvbSAncmVhY3QnXG5pbXBvcnQgeyBCb3gsIExpbmssIFRleHQgfSBmcm9tICcuLi9pbmsuanMnXG5pbXBvcnQgeyBTZWxlY3QgfSBmcm9tICcuL0N1c3RvbVNlbGVjdC9pbmRleC5qcydcbmltcG9ydCB7IERpYWxvZyB9IGZyb20gJy4vZGVzaWduLXN5c3RlbS9EaWFsb2cuanMnXG5cbnR5cGUgUHJvcHMgPSB7XG4gIG9uRG9uZTogKCkgPT4gdm9pZFxufVxuXG5leHBvcnQgZnVuY3Rpb24gQ29zdFRocmVzaG9sZERpYWxvZyh7IG9uRG9uZSB9OiBQcm9wcyk6IFJlYWN0LlJlYWN0Tm9kZSB7XG4gIHJldHVybiAoXG4gICAgPERpYWxvZ1xuICAgICAgdGl0bGU9XCJZb3UndmUgc3BlbnQgJDUgb24gdGhlIEFudGhyb3BpYyBBUEkgdGhpcyBzZXNzaW9uLlwiXG4gICAgICBvbkNhbmNlbD17b25Eb25lfVxuICAgID5cbiAgICAgIDxCb3ggZmxleERpcmVjdGlvbj1cImNvbHVtblwiPlxuICAgICAgICA8VGV4dD5MZWFybiBtb3JlIGFib3V0IGhvdyB0byBtb25pdG9yIHlvdXIgc3BlbmRpbmc6PC9UZXh0PlxuICAgICAgICA8TGluayB1cmw9XCJodHRwczovL2NvZGUuY2xhdWRlLmNvbS9kb2NzL2VuL2Nvc3RzXCIgLz5cbiAgICAgIDwvQm94PlxuICAgICAgPFNlbGVjdFxuICAgICAgICBvcHRpb25zPXtbXG4gICAgICAgICAge1xuICAgICAgICAgICAgdmFsdWU6ICdvaycsXG4gICAgICAgICAgICBsYWJlbDogJ0dvdCBpdCwgdGhhbmtzIScsXG4gICAgICAgICAgfSxcbiAgICAgICAgXX1cbiAgICAgICAgb25DaGFuZ2U9e29uRG9uZX1cbiAgICAgIC8+XG4gICAgPC9EaWFsb2c+XG4gIClcbn1cbiJdLCJtYXBwaW5ncyI6IjtBQUFBLE9BQU9BLEtBQUssTUFBTSxPQUFPO0FBQ3pCLFNBQVNDLEdBQUcsRUFBRUMsSUFBSSxFQUFFQyxJQUFJLFFBQVEsV0FBVztBQUMzQyxTQUFTQyxNQUFNLFFBQVEseUJBQXlCO0FBQ2hELFNBQVNDLE1BQU0sUUFBUSwyQkFBMkI7QUFFbEQsS0FBS0MsS0FBSyxHQUFHO0VBQ1hDLE1BQU0sRUFBRSxHQUFHLEdBQUcsSUFBSTtBQUNwQixDQUFDO0FBRUQsT0FBTyxTQUFBQyxvQkFBQUMsRUFBQTtFQUFBLE1BQUFDLENBQUEsR0FBQUMsRUFBQTtFQUE2QjtJQUFBSjtFQUFBLElBQUFFLEVBQWlCO0VBQUEsSUFBQUcsRUFBQTtFQUFBLElBQUFGLENBQUEsUUFBQUcsTUFBQSxDQUFBQyxHQUFBO0lBTS9DRixFQUFBLElBQUMsR0FBRyxDQUFlLGFBQVEsQ0FBUixRQUFRLENBQ3pCLENBQUMsSUFBSSxDQUFDLDhDQUE4QyxFQUFuRCxJQUFJLENBQ0wsQ0FBQyxJQUFJLENBQUssR0FBdUMsQ0FBdkMsdUNBQXVDLEdBQ25ELEVBSEMsR0FBRyxDQUdFO0lBQUFGLENBQUEsTUFBQUUsRUFBQTtFQUFBO0lBQUFBLEVBQUEsR0FBQUYsQ0FBQTtFQUFBO0VBQUEsSUFBQUssRUFBQTtFQUFBLElBQUFMLENBQUEsUUFBQUcsTUFBQSxDQUFBQyxHQUFBO0lBRUtDLEVBQUEsSUFDUDtNQUFBQyxLQUFBLEVBQ1MsSUFBSTtNQUFBQyxLQUFBLEVBQ0o7SUFDVCxDQUFDLENBQ0Y7SUFBQVAsQ0FBQSxNQUFBSyxFQUFBO0VBQUE7SUFBQUEsRUFBQSxHQUFBTCxDQUFBO0VBQUE7RUFBQSxJQUFBUSxFQUFBO0VBQUEsSUFBQVIsQ0FBQSxRQUFBSCxNQUFBO0lBTkhXLEVBQUEsSUFBQyxNQUFNLENBQ0ksT0FLUixDQUxRLENBQUFILEVBS1QsQ0FBQyxDQUNTUixRQUFNLENBQU5BLE9BQUssQ0FBQyxHQUNoQjtJQUFBRyxDQUFBLE1BQUFILE1BQUE7SUFBQUcsQ0FBQSxNQUFBUSxFQUFBO0VBQUE7SUFBQUEsRUFBQSxHQUFBUixDQUFBO0VBQUE7RUFBQSxJQUFBUyxFQUFBO0VBQUEsSUFBQVQsQ0FBQSxRQUFBSCxNQUFBLElBQUFHLENBQUEsUUFBQVEsRUFBQTtJQWhCSkMsRUFBQSxJQUFDLE1BQU0sQ0FDQyxLQUFvRCxDQUFwRCxvREFBb0QsQ0FDaERaLFFBQU0sQ0FBTkEsT0FBSyxDQUFDLENBRWhCLENBQUFLLEVBR0ssQ0FDTCxDQUFBTSxFQVFDLENBQ0gsRUFqQkMsTUFBTSxDQWlCRTtJQUFBUixDQUFBLE1BQUFILE1BQUE7SUFBQUcsQ0FBQSxNQUFBUSxFQUFBO0lBQUFSLENBQUEsTUFBQVMsRUFBQTtFQUFBO0lBQUFBLEVBQUEsR0FBQVQsQ0FBQTtFQUFBO0VBQUEsT0FqQlRTLEVBaUJTO0FBQUEiLCJpZ25vcmVMaXN0IjpbXX0=
|
||||
@@ -5,6 +5,8 @@
|
||||
* Addresses: https://github.com/Gitlawb/openclaude/issues/55
|
||||
*/
|
||||
|
||||
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
|
||||
|
||||
const ESC = '\x1b['
|
||||
const RESET = `${ESC}0m`
|
||||
const DIM = `${ESC}2m`
|
||||
@@ -78,6 +80,7 @@ const LOGO_CLAUDE = [
|
||||
|
||||
function detectProvider(): { name: string; model: string; baseUrl: string; isLocal: boolean } {
|
||||
const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
||||
const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true'
|
||||
|
||||
if (useGemini) {
|
||||
@@ -86,6 +89,13 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
return { name: 'Google Gemini', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useGithub) {
|
||||
const model = process.env.OPENAI_MODEL || 'github:copilot'
|
||||
const baseUrl =
|
||||
process.env.OPENAI_BASE_URL || 'https://models.github.ai/inference'
|
||||
return { name: 'GitHub Models', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useOpenAI) {
|
||||
const model = process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
|
||||
@@ -172,7 +182,7 @@ export function printStartupScreen(): void {
|
||||
out.push(boxRow(sRow, W, sLen))
|
||||
|
||||
out.push(`${rgb(...BORDER)}\u255a${'\u2550'.repeat(W - 2)}\u255d${RESET}`)
|
||||
out.push(` ${DIM}${rgb(...DIMCOL)}openclaude v${(globalThis as Record<string, unknown>)['MACRO_DISPLAY_VERSION'] ?? '0.1.4'}${RESET}`)
|
||||
out.push(` ${DIM}${rgb(...DIMCOL)}openclaude ${RESET}${rgb(...ACCENT)}v${MACRO.DISPLAY_VERSION ?? MACRO.VERSION}${RESET}`)
|
||||
out.push('')
|
||||
|
||||
process.stdout.write(out.join('\n') + '\n')
|
||||
|
||||
@@ -441,3 +441,8 @@ export async function connectRemoteControl(
|
||||
): Promise<RemoteControlHandle | null> {
|
||||
throw new Error('not implemented')
|
||||
}
|
||||
|
||||
// add exit reason types for removing the error within gracefulShutdown file
|
||||
export type ExitReason = {
|
||||
|
||||
}
|
||||
@@ -46,7 +46,22 @@ function isLocalProviderUrl(baseUrl: string | undefined): boolean {
|
||||
}
|
||||
|
||||
function validateProviderEnvOrExit(): void {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
|
||||
const useOpenAI = isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
const token =
|
||||
(process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()) ?? ''
|
||||
if (!token) {
|
||||
console.error(
|
||||
'GITHUB_TOKEN or GH_TOKEN is required when CLAUDE_CODE_USE_GITHUB=1.',
|
||||
)
|
||||
process.exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (!useOpenAI) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -77,8 +92,15 @@ function validateProviderEnvOrExit(): void {
|
||||
}
|
||||
|
||||
if (!process.env.OPENAI_API_KEY && !isLocalProviderUrl(request.baseUrl)) {
|
||||
console.error('OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local.')
|
||||
process.exit(1)
|
||||
const hasGithubToken = !!(
|
||||
process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()
|
||||
)
|
||||
if (!(useGithub && hasGithubToken)) {
|
||||
console.error(
|
||||
'OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local. When CLAUDE_CODE_USE_GITHUB=1, GITHUB_TOKEN or GH_TOKEN may be used instead.',
|
||||
)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,6 +120,15 @@ async function main(): Promise<void> {
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
const { enableConfigs } = await import('../utils/config.js')
|
||||
enableConfigs()
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
}
|
||||
|
||||
validateProviderEnvOrExit()
|
||||
|
||||
// Print the gradient startup screen before the Ink UI loads
|
||||
|
||||
49
src/ink/parse-keypress.test.ts
Normal file
49
src/ink/parse-keypress.test.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
INITIAL_STATE,
|
||||
parseMultipleKeypresses,
|
||||
type ParsedKey,
|
||||
} from './parse-keypress.ts'
|
||||
import { InputEvent } from './events/input-event.ts'
|
||||
|
||||
function parseInputEvent(sequence: string): InputEvent {
|
||||
const [items] = parseMultipleKeypresses(INITIAL_STATE, sequence)
|
||||
|
||||
expect(items).toHaveLength(1)
|
||||
|
||||
const item = items[0]
|
||||
expect(item?.kind).toBe('key')
|
||||
|
||||
return new InputEvent(item as ParsedKey)
|
||||
}
|
||||
|
||||
test('treats CSI-u modifier 0 as unmodified printable input', () => {
|
||||
const event = parseInputEvent('\x1b[47;0u')
|
||||
|
||||
expect(event.input).toBe('/')
|
||||
expect(event.key.ctrl).toBe(false)
|
||||
expect(event.key.meta).toBe(false)
|
||||
expect(event.key.shift).toBe(false)
|
||||
expect(event.key.super).toBe(false)
|
||||
})
|
||||
|
||||
test('preserves printable Unicode CSI-u input', () => {
|
||||
const event = parseInputEvent('\x1b[231u')
|
||||
|
||||
expect(event.input).toBe('ç')
|
||||
expect(event.key.ctrl).toBe(false)
|
||||
expect(event.key.meta).toBe(false)
|
||||
expect(event.key.shift).toBe(false)
|
||||
expect(event.key.super).toBe(false)
|
||||
})
|
||||
|
||||
test('preserves printable Unicode CSI-u input with explicit modifier 0', () => {
|
||||
const event = parseInputEvent('\x1b[231;0u')
|
||||
|
||||
expect(event.input).toBe('ç')
|
||||
expect(event.key.ctrl).toBe(false)
|
||||
expect(event.key.meta).toBe(false)
|
||||
expect(event.key.shift).toBe(false)
|
||||
expect(event.key.super).toBe(false)
|
||||
})
|
||||
@@ -468,7 +468,10 @@ function decodeModifier(modifier: number): {
|
||||
ctrl: boolean
|
||||
super: boolean
|
||||
} {
|
||||
const m = modifier - 1
|
||||
// Some Windows VT stacks use 0 instead of 1 for an unmodified CSI-u key.
|
||||
// Clamp to the protocol default so plain printable keys don't look like
|
||||
// ctrl+meta+shift+super all at once.
|
||||
const m = Math.max(modifier, 1) - 1
|
||||
return {
|
||||
shift: !!(m & 1),
|
||||
meta: !!(m & 2),
|
||||
@@ -477,6 +480,14 @@ function decodeModifier(modifier: number): {
|
||||
}
|
||||
}
|
||||
|
||||
function isPrivateUseCodepoint(codepoint: number): boolean {
|
||||
return (
|
||||
(codepoint >= 0xe000 && codepoint <= 0xf8ff) ||
|
||||
(codepoint >= 0xf0000 && codepoint <= 0xffffd) ||
|
||||
(codepoint >= 0x100000 && codepoint <= 0x10fffd)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Map keycode to key name for modifyOtherKeys/CSI u sequences.
|
||||
* Handles both ASCII keycodes and Kitty keyboard protocol functional keys.
|
||||
@@ -536,6 +547,21 @@ function keycodeToName(keycode: number): string | undefined {
|
||||
if (keycode >= 32 && keycode <= 126) {
|
||||
return String.fromCharCode(keycode).toLowerCase()
|
||||
}
|
||||
|
||||
// CSI-u can carry printable Unicode codepoints directly on some
|
||||
// Windows terminals and keyboard layouts. Keep kitty's private-use
|
||||
// functional key range excluded so special keys still stay non-text.
|
||||
if (
|
||||
keycode > 0x1f &&
|
||||
keycode !== 0x7f &&
|
||||
(keycode < 0x80 || keycode > 0x9f) &&
|
||||
keycode <= 0x10ffff &&
|
||||
(keycode < 0xd800 || keycode > 0xdfff) &&
|
||||
!isPrivateUseCodepoint(keycode)
|
||||
) {
|
||||
return String.fromCodePoint(keycode)
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,6 +139,7 @@ import { validateUuid } from './utils/uuid.js';
|
||||
// Plugin startup checks are now handled non-blockingly in REPL.tsx
|
||||
|
||||
import { registerMcpAddCommand } from 'src/commands/mcp/addCommand.js';
|
||||
import { registerMcpDoctorCommand } from 'src/commands/mcp/doctorCommand.js';
|
||||
import { registerMcpXaaIdpCommand } from 'src/commands/mcp/xaaIdpCommand.js';
|
||||
import { logPermissionContextForAnts } from 'src/services/internalLogging.js';
|
||||
import { fetchClaudeAIMcpConfigsIfEligible } from 'src/services/mcp/claudeai.js';
|
||||
@@ -2313,7 +2314,11 @@ async function run(): Promise<CommanderCommand> {
|
||||
errors
|
||||
} = getSettingsWithErrors();
|
||||
const nonMcpErrors = errors.filter(e => !e.mcpErrorMetadata);
|
||||
if (nonMcpErrors.length > 0 && !isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
|
||||
if (
|
||||
nonMcpErrors.length > 0 &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
) {
|
||||
await launchInvalidSettingsDialog(root, {
|
||||
settingsErrors: nonMcpErrors,
|
||||
onExit: () => gracefulShutdownSync(1)
|
||||
@@ -3887,6 +3892,7 @@ async function run(): Promise<CommanderCommand> {
|
||||
|
||||
// Register the mcp add subcommand (extracted for testability)
|
||||
registerMcpAddCommand(mcp);
|
||||
registerMcpDoctorCommand(mcp);
|
||||
if (isXaaEnabled()) {
|
||||
registerMcpXaaIdpCommand(mcp);
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import {
|
||||
setMainLoopModelOverride,
|
||||
} from '../bootstrap/state.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from '../utils/config.js'
|
||||
import { getAPIProvider } from '../utils/model/providers.js'
|
||||
import {
|
||||
getSettingsForSource,
|
||||
updateSettingsForSource,
|
||||
@@ -23,6 +24,10 @@ import {
|
||||
* tracked by a completion flag in global config.
|
||||
*/
|
||||
export function migrateSonnet1mToSonnet45(): void {
|
||||
if (getAPIProvider() !== 'firstParty') {
|
||||
return
|
||||
}
|
||||
|
||||
const config = getGlobalConfig()
|
||||
if (config.sonnet1m45MigrationComplete) {
|
||||
return
|
||||
|
||||
@@ -97,8 +97,8 @@ import { logError } from '../utils/log.js';
|
||||
/* eslint-disable custom-rules/no-process-env-top-level, @typescript-eslint/no-require-imports */
|
||||
const useVoiceIntegration: typeof import('../hooks/useVoiceIntegration.js').useVoiceIntegration = feature('VOICE_MODE') ? require('../hooks/useVoiceIntegration.js').useVoiceIntegration : () => ({
|
||||
stripTrailing: () => 0,
|
||||
handleKeyEvent: () => {},
|
||||
resetAnchor: () => {}
|
||||
handleKeyEvent: () => { },
|
||||
resetAnchor: () => { }
|
||||
});
|
||||
const VoiceKeybindingHandler: typeof import('../hooks/useVoiceIntegration.js').VoiceKeybindingHandler = feature('VOICE_MODE') ? require('../hooks/useVoiceIntegration.js').VoiceKeybindingHandler : () => null;
|
||||
// Frustration detection is ant-only (dogfooding). Conditional require so external
|
||||
@@ -106,11 +106,11 @@ const VoiceKeybindingHandler: typeof import('../hooks/useVoiceIntegration.js').V
|
||||
// on every messages change, plus the GrowthBook fetch).
|
||||
const useFrustrationDetection: typeof import('../components/FeedbackSurvey/useFrustrationDetection.js').useFrustrationDetection = "external" === 'ant' ? require('../components/FeedbackSurvey/useFrustrationDetection.js').useFrustrationDetection : () => ({
|
||||
state: 'closed',
|
||||
handleTranscriptSelect: () => {}
|
||||
handleTranscriptSelect: () => { }
|
||||
});
|
||||
// Ant-only org warning. Conditional require so the org UUID list is
|
||||
// eliminated from external builds (one UUID is on excluded-strings).
|
||||
const useAntOrgWarningNotification: typeof import('../hooks/notifs/useAntOrgWarningNotification.js').useAntOrgWarningNotification = "external" === 'ant' ? require('../hooks/notifs/useAntOrgWarningNotification.js').useAntOrgWarningNotification : () => {};
|
||||
const useAntOrgWarningNotification: typeof import('../hooks/notifs/useAntOrgWarningNotification.js').useAntOrgWarningNotification = "external" === 'ant' ? require('../hooks/notifs/useAntOrgWarningNotification.js').useAntOrgWarningNotification : () => { };
|
||||
// Dead code elimination: conditional import for coordinator mode
|
||||
const getCoordinatorUserContext: (mcpClients: ReadonlyArray<{
|
||||
name: string;
|
||||
@@ -137,7 +137,7 @@ import { generateSessionTitle } from '../utils/sessionTitle.js';
|
||||
import { BASH_INPUT_TAG, COMMAND_MESSAGE_TAG, COMMAND_NAME_TAG, LOCAL_COMMAND_STDOUT_TAG } from '../constants/xml.js';
|
||||
import { escapeXml } from '../utils/xml.js';
|
||||
import type { ThinkingConfig } from '../utils/thinking.js';
|
||||
import { gracefulShutdownSync } from '../utils/gracefulShutdown.js';
|
||||
import { gracefulShutdownSync, isShuttingDown } from '../utils/gracefulShutdown.js';
|
||||
import { handlePromptSubmit, type PromptInputHelpers } from '../utils/handlePromptSubmit.js';
|
||||
import { useQueueProcessor } from '../hooks/useQueueProcessor.js';
|
||||
import { useMailboxBridge } from '../hooks/useMailboxBridge.js';
|
||||
@@ -192,7 +192,7 @@ import { useInboxPoller } from '../hooks/useInboxPoller.js';
|
||||
// Dead code elimination: conditional import for loop mode
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const proactiveModule = feature('PROACTIVE') || feature('KAIROS') ? require('../proactive/index.js') : null;
|
||||
const PROACTIVE_NO_OP_SUBSCRIBE = (_cb: () => void) => () => {};
|
||||
const PROACTIVE_NO_OP_SUBSCRIBE = (_cb: () => void) => () => { };
|
||||
const PROACTIVE_FALSE = () => false;
|
||||
const SUGGEST_BG_PR_NOOP = (_p: string, _n: string): boolean => false;
|
||||
const useProactive = feature('PROACTIVE') || feature('KAIROS') ? require('../proactive/useProactive.js').useProactive : null;
|
||||
@@ -297,7 +297,7 @@ const EMPTY_MCP_CLIENTS: MCPServerConnection[] = [];
|
||||
// Stable stub for useAssistantHistory's non-KAIROS branch — avoids a new
|
||||
// function identity each render, which would break composedOnScroll's memo.
|
||||
const HISTORY_STUB = {
|
||||
maybeLoadOlder: (_: ScrollBoxHandle) => {}
|
||||
maybeLoadOlder: (_: ScrollBoxHandle) => { }
|
||||
};
|
||||
// Window after a user-initiated scroll during which type-into-empty does NOT
|
||||
// repin to bottom. Josh Rosen's workflow: Claude emits long output → scroll
|
||||
@@ -448,28 +448,28 @@ function TranscriptSearchBar({
|
||||
const off = cursorOffset;
|
||||
const cursorChar = off < query.length ? query[off] : ' ';
|
||||
return <Box borderTopDimColor borderBottom={false} borderLeft={false} borderRight={false} borderStyle="single" marginTop={1} paddingLeft={2} width="100%"
|
||||
// applySearchHighlight scans the whole screen buffer. The query
|
||||
// text rendered here IS on screen — /foo matches its own 'foo' in
|
||||
// the bar. With no content matches that's the ONLY visible match →
|
||||
// gets CURRENT → underlined. noSelect makes searchHighlight.ts:76
|
||||
// skip these cells (same exclusion as gutters). You can't text-
|
||||
// select the bar either; it's transient chrome, fine.
|
||||
noSelect>
|
||||
<Text>/</Text>
|
||||
<Text>{query.slice(0, off)}</Text>
|
||||
<Text inverse>{cursorChar}</Text>
|
||||
{off < query.length && <Text>{query.slice(off + 1)}</Text>}
|
||||
<Box flexGrow={1} />
|
||||
{indexStatus === 'building' ? <Text dimColor>indexing… </Text> : indexStatus ? <Text dimColor>indexed in {indexStatus.ms}ms </Text> : count === 0 && query ? <Text color="error">no matches </Text> : count > 0 ?
|
||||
// Engine-counted (indexOf on extractSearchText). May drift from
|
||||
// render-count for ghost/phantom messages — badge is a rough
|
||||
// location hint. scanElement gives exact per-message positions
|
||||
// but counting ALL would cost ~1-3ms × matched-messages.
|
||||
<Text dimColor>
|
||||
{current}/{count}
|
||||
{' '}
|
||||
</Text> : null}
|
||||
</Box>;
|
||||
// applySearchHighlight scans the whole screen buffer. The query
|
||||
// text rendered here IS on screen — /foo matches its own 'foo' in
|
||||
// the bar. With no content matches that's the ONLY visible match →
|
||||
// gets CURRENT → underlined. noSelect makes searchHighlight.ts:76
|
||||
// skip these cells (same exclusion as gutters). You can't text-
|
||||
// select the bar either; it's transient chrome, fine.
|
||||
noSelect>
|
||||
<Text>/</Text>
|
||||
<Text>{query.slice(0, off)}</Text>
|
||||
<Text inverse>{cursorChar}</Text>
|
||||
{off < query.length && <Text>{query.slice(off + 1)}</Text>}
|
||||
<Box flexGrow={1} />
|
||||
{indexStatus === 'building' ? <Text dimColor>indexing… </Text> : indexStatus ? <Text dimColor>indexed in {indexStatus.ms}ms </Text> : count === 0 && query ? <Text color="error">no matches </Text> : count > 0 ?
|
||||
// Engine-counted (indexOf on extractSearchText). May drift from
|
||||
// render-count for ghost/phantom messages — badge is a rough
|
||||
// location hint. scanElement gives exact per-message positions
|
||||
// but counting ALL would cost ~1-3ms × matched-messages.
|
||||
<Text dimColor>
|
||||
{current}/{count}
|
||||
{' '}
|
||||
</Text> : null}
|
||||
</Box>;
|
||||
}
|
||||
const TITLE_ANIMATION_FRAMES = ['⠂', '⠐'];
|
||||
const TITLE_STATIC_PREFIX = '✳';
|
||||
@@ -605,8 +605,8 @@ export function REPL({
|
||||
const moreRightEnabled = useMemo(() => "external" === 'ant' && isEnvTruthy(process.env.CLAUDE_MORERIGHT), []);
|
||||
const disableVirtualScroll = useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_DISABLE_VIRTUAL_SCROLL), []);
|
||||
const disableMessageActions = feature('MESSAGE_ACTIONS') ?
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
|
||||
useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_DISABLE_MESSAGE_ACTIONS), []) : false;
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
|
||||
useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_DISABLE_MESSAGE_ACTIONS), []) : false;
|
||||
|
||||
// Agent definition is state so /resume can update it mid-session
|
||||
const [mainThreadAgentDefinition, setMainThreadAgentDefinition] = useState(initialMainThreadAgentDefinition);
|
||||
@@ -865,11 +865,11 @@ export function REPL({
|
||||
|
||||
// Ref for the bridge result callback — set after useReplBridge initializes,
|
||||
// read in the onQuery finally block to notify mobile clients that a turn ended.
|
||||
const sendBridgeResultRef = useRef<() => void>(() => {});
|
||||
const sendBridgeResultRef = useRef<() => void>(() => { });
|
||||
|
||||
// Ref for the synchronous restore callback — set after restoreMessageSync is
|
||||
// defined, read in the onQuery finally block for auto-restore on interrupt.
|
||||
const restoreMessageSyncRef = useRef<(m: UserMessage) => void>(() => {});
|
||||
const restoreMessageSyncRef = useRef<(m: UserMessage) => void>(() => { });
|
||||
|
||||
// Ref to the fullscreen layout's scroll box for keyboard scrolling.
|
||||
// Null when fullscreen mode is disabled (ref never attached).
|
||||
@@ -1127,7 +1127,7 @@ export function REPL({
|
||||
// session from mid-conversation context.
|
||||
const haikuTitleAttemptedRef = useRef((initialMessages?.length ?? 0) > 0);
|
||||
const agentTitle = mainThreadAgentDefinition?.agentType;
|
||||
const terminalTitle = sessionTitle ?? agentTitle ?? haikuTitle ?? 'Claude Code';
|
||||
const terminalTitle = sessionTitle ?? agentTitle ?? haikuTitle ?? 'Open Claude';
|
||||
const isWaitingForApproval = toolUseConfirmQueue.length > 0 || promptQueue.length > 0 || pendingWorkerRequest || pendingSandboxRequest;
|
||||
// Local-jsx commands (like /plugin, /config) show user-facing dialogs that
|
||||
// wait for input. Require jsx != null — if the flag is stuck true but jsx
|
||||
@@ -1246,8 +1246,8 @@ export function REPL({
|
||||
const cursorNavRef = useRef<MessageActionsNav | null>(null);
|
||||
// Memoized so Messages' React.memo holds.
|
||||
const unseenDivider = useMemo(() => computeUnseenDivider(messages, dividerIndex),
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps -- length change covers appends; useUnseenDivider's count-drop guard clears dividerIndex on replace/rewind
|
||||
[dividerIndex, messages.length]);
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps -- length change covers appends; useUnseenDivider's count-drop guard clears dividerIndex on replace/rewind
|
||||
[dividerIndex, messages.length]);
|
||||
// Re-pin scroll to bottom and clear the unseen-messages baseline. Called
|
||||
// on any user-driven return-to-live action (submit, type-into-empty,
|
||||
// overlay appear/dismiss).
|
||||
@@ -1276,13 +1276,13 @@ export function REPL({
|
||||
const {
|
||||
maybeLoadOlder
|
||||
} = feature('KAIROS') ?
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
|
||||
useAssistantHistory({
|
||||
config: remoteSessionConfig,
|
||||
setMessages,
|
||||
scrollRef,
|
||||
onPrepend: shiftDivider
|
||||
}) : HISTORY_STUB;
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
|
||||
useAssistantHistory({
|
||||
config: remoteSessionConfig,
|
||||
setMessages,
|
||||
scrollRef,
|
||||
onPrepend: shiftDivider
|
||||
}) : HISTORY_STUB;
|
||||
// Compose useUnseenDivider's callbacks with the lazy-load trigger.
|
||||
const composedOnScroll = useCallback((sticky: boolean, handle: ScrollBoxHandle) => {
|
||||
lastUserScrollTsRef.current = Date.now();
|
||||
@@ -1593,12 +1593,12 @@ export function REPL({
|
||||
swarmStartTimeRef.current = null;
|
||||
swarmBudgetInfoRef.current = undefined;
|
||||
setMessages(prev => [...prev, createTurnDurationMessage(totalMs, deferredBudget,
|
||||
// Count only what recordTranscript will persist — ephemeral
|
||||
// progress ticks and non-ant attachments are filtered by
|
||||
// isLoggableMessage and never reach disk. Using raw prev.length
|
||||
// would make checkResumeConsistency report false delta<0 for
|
||||
// every turn that ran a progress-emitting tool.
|
||||
count(prev, isLoggableMessage))]);
|
||||
// Count only what recordTranscript will persist — ephemeral
|
||||
// progress ticks and non-ant attachments are filtered by
|
||||
// isLoggableMessage and never reach disk. Using raw prev.length
|
||||
// would make checkResumeConsistency report false delta<0 for
|
||||
// every turn that ran a progress-emitting tool.
|
||||
count(prev, isLoggableMessage))]);
|
||||
}
|
||||
}, [hasRunningTeammates, setMessages]);
|
||||
|
||||
@@ -1665,19 +1665,19 @@ export function REPL({
|
||||
setToolJSX
|
||||
});
|
||||
const showSpinner = (!toolJSX || toolJSX.showSpinner === true) && toolUseConfirmQueue.length === 0 && promptQueue.length === 0 && (
|
||||
// Show spinner during input processing, API call, while teammates are running,
|
||||
// or while pending task notifications are queued (prevents spinner bounce between consecutive notifications)
|
||||
isLoading || userInputOnProcessing || hasRunningTeammates ||
|
||||
// Keep spinner visible while task notifications are queued for processing.
|
||||
// Without this, the spinner briefly disappears between consecutive notifications
|
||||
// (e.g., multiple background agents completing in rapid succession) because
|
||||
// isLoading goes false momentarily between processing each one.
|
||||
getCommandQueueLength() > 0) &&
|
||||
// Hide spinner when waiting for leader to approve permission request
|
||||
!pendingWorkerRequest && !onlySleepToolActive && (
|
||||
// Hide spinner when streaming text is visible (the text IS the feedback),
|
||||
// but keep it when isBriefOnly suppresses the streaming text display
|
||||
!visibleStreamingText || isBriefOnly);
|
||||
// Show spinner during input processing, API call, while teammates are running,
|
||||
// or while pending task notifications are queued (prevents spinner bounce between consecutive notifications)
|
||||
isLoading || userInputOnProcessing || hasRunningTeammates ||
|
||||
// Keep spinner visible while task notifications are queued for processing.
|
||||
// Without this, the spinner briefly disappears between consecutive notifications
|
||||
// (e.g., multiple background agents completing in rapid succession) because
|
||||
// isLoading goes false momentarily between processing each one.
|
||||
getCommandQueueLength() > 0) &&
|
||||
// Hide spinner when waiting for leader to approve permission request
|
||||
!pendingWorkerRequest && !onlySleepToolActive && (
|
||||
// Hide spinner when streaming text is visible (the text IS the feedback),
|
||||
// but keep it when isBriefOnly suppresses the streaming text display
|
||||
!visibleStreamingText || isBriefOnly);
|
||||
|
||||
// Check if any permission or ask question prompt is currently visible
|
||||
// This is used to prevent the survey from opening while prompts are active
|
||||
@@ -2323,9 +2323,9 @@ export function REPL({
|
||||
addNotification({
|
||||
key: 'sandbox-unavailable',
|
||||
jsx: <>
|
||||
<Text color="warning">sandbox disabled</Text>
|
||||
<Text dimColor> · /sandbox</Text>
|
||||
</>,
|
||||
<Text color="warning">sandbox disabled</Text>
|
||||
<Text dimColor> · /sandbox</Text>
|
||||
</>,
|
||||
priority: 'medium'
|
||||
});
|
||||
}, [addNotification]);
|
||||
@@ -2676,7 +2676,7 @@ export function REPL({
|
||||
// useDeferredHookMessages) and attachment messages (appended by
|
||||
// processTextPrompt) — both pushed length past 1 on turn one, so the
|
||||
// title silently fell through to the "Claude Code" default.
|
||||
if (getAPIProvider() === 'firstParty' && !titleDisabled && !sessionTitle && !agentTitle && !haikuTitleAttemptedRef.current) {
|
||||
if (!titleDisabled && !sessionTitle && !agentTitle && !haikuTitleAttemptedRef.current) {
|
||||
const firstUserMessage = newMessages.find(m => m.type === 'user' && !m.isMeta);
|
||||
const text = firstUserMessage?.type === 'user' ? getContentText(firstUserMessage.message.content) : null;
|
||||
// Skip synthetic breadcrumbs — slash-command output, prompt-skill
|
||||
@@ -2686,7 +2686,7 @@ export function REPL({
|
||||
if (text && !text.startsWith(`<${LOCAL_COMMAND_STDOUT_TAG}>`) && !text.startsWith(`<${COMMAND_MESSAGE_TAG}>`) && !text.startsWith(`<${COMMAND_NAME_TAG}>`) && !text.startsWith(`<${BASH_INPUT_TAG}>`)) {
|
||||
haikuTitleAttemptedRef.current = true;
|
||||
void generateSessionTitle(text, new AbortController().signal).then(title => {
|
||||
if (title) setHaikuTitle(title);else haikuTitleAttemptedRef.current = false;
|
||||
if (title) setHaikuTitle(title); else haikuTitleAttemptedRef.current = false;
|
||||
}, () => {
|
||||
haikuTitleAttemptedRef.current = false;
|
||||
});
|
||||
@@ -2760,11 +2760,11 @@ export function REPL({
|
||||
});
|
||||
}
|
||||
queryCheckpoint('query_context_loading_start');
|
||||
const [,, defaultSystemPrompt, baseUserContext, systemContext] = await Promise.all([
|
||||
// IMPORTANT: do this after setMessages() above, to avoid UI jank
|
||||
checkAndDisableBypassPermissionsIfNeeded(toolPermissionContext, setAppState),
|
||||
// Gated on TRANSCRIPT_CLASSIFIER so GrowthBook kill switch runs wherever auto mode is built in
|
||||
feature('TRANSCRIPT_CLASSIFIER') ? checkAndDisableAutoModeIfNeeded(toolPermissionContext, setAppState, store.getState().fastMode) : undefined, getSystemPrompt(freshTools, mainLoopModelParam, Array.from(toolPermissionContext.additionalWorkingDirectories.keys()), freshMcpClients), getUserContext(), getSystemContext()]);
|
||||
const [, , defaultSystemPrompt, baseUserContext, systemContext] = await Promise.all([
|
||||
// IMPORTANT: do this after setMessages() above, to avoid UI jank
|
||||
checkAndDisableBypassPermissionsIfNeeded(toolPermissionContext, setAppState),
|
||||
// Gated on TRANSCRIPT_CLASSIFIER so GrowthBook kill switch runs wherever auto mode is built in
|
||||
feature('TRANSCRIPT_CLASSIFIER') ? checkAndDisableAutoModeIfNeeded(toolPermissionContext, setAppState, store.getState().fastMode) : undefined, getSystemPrompt(freshTools, mainLoopModelParam, Array.from(toolPermissionContext.additionalWorkingDirectories.keys()), freshMcpClients), getUserContext(), getSystemContext()]);
|
||||
const userContext = {
|
||||
...baseUserContext,
|
||||
...getCoordinatorUserContext(freshMcpClients, isScratchpadEnabled() ? getScratchpadDir() : undefined),
|
||||
@@ -3110,9 +3110,9 @@ export function REPL({
|
||||
if (typeof content === 'string' && !initialMsg.message.planContent) {
|
||||
// Route through onSubmit for proper processing including UserPromptSubmit hooks
|
||||
void onSubmit(content, {
|
||||
setCursorOffset: () => {},
|
||||
clearBuffer: () => {},
|
||||
resetHistory: () => {}
|
||||
setCursorOffset: () => { },
|
||||
clearBuffer: () => { },
|
||||
resetHistory: () => { }
|
||||
});
|
||||
} else {
|
||||
// Plan messages or complex content (images, etc.) - send directly to model
|
||||
@@ -3121,10 +3121,10 @@ export function REPL({
|
||||
const newAbortController = createAbortController();
|
||||
setAbortController(newAbortController);
|
||||
void onQuery([initialMsg.message], newAbortController, true,
|
||||
// shouldQuery
|
||||
[],
|
||||
// additionalAllowedTools
|
||||
mainLoopModel);
|
||||
// shouldQuery
|
||||
[],
|
||||
// additionalAllowedTools
|
||||
mainLoopModel);
|
||||
}
|
||||
|
||||
// Reset ref after a delay to allow new initial messages
|
||||
@@ -3526,18 +3526,18 @@ export function REPL({
|
||||
setStashedPrompt(undefined);
|
||||
}
|
||||
}, [queryGuard,
|
||||
// isLoading is read at the !isLoading checks above for input-clearing
|
||||
// and submitCount gating. It's derived from isQueryActive || isExternalLoading,
|
||||
// so including it here ensures the closure captures the fresh value.
|
||||
isLoading, isExternalLoading, inputMode, commands, setInputValue, setInputMode, setPastedContents, setSubmitCount, setIDESelection, setToolJSX, getToolUseContext,
|
||||
// messages is read via messagesRef.current inside the callback to
|
||||
// keep onSubmit stable across message updates (see L2384/L2400/L2662).
|
||||
// Without this, each setMessages call (~30× per turn) recreates
|
||||
// onSubmit, pinning the REPL render scope (1776B) + that render's
|
||||
// messages array in downstream closures (PromptInput, handleAutoRunIssue).
|
||||
// Heap analysis showed ~9 REPL scopes and ~15 messages array versions
|
||||
// accumulating after #20174/#20175, all traced to this dep.
|
||||
mainLoopModel, pastedContents, ideSelection, setUserInputOnProcessing, setAbortController, addNotification, onQuery, stashedPrompt, setStashedPrompt, setAppState, onBeforeQuery, canUseTool, remoteSession, setMessages, awaitPendingHooks, repinScroll]);
|
||||
// isLoading is read at the !isLoading checks above for input-clearing
|
||||
// and submitCount gating. It's derived from isQueryActive || isExternalLoading,
|
||||
// so including it here ensures the closure captures the fresh value.
|
||||
isLoading, isExternalLoading, inputMode, commands, setInputValue, setInputMode, setPastedContents, setSubmitCount, setIDESelection, setToolJSX, getToolUseContext,
|
||||
// messages is read via messagesRef.current inside the callback to
|
||||
// keep onSubmit stable across message updates (see L2384/L2400/L2662).
|
||||
// Without this, each setMessages call (~30× per turn) recreates
|
||||
// onSubmit, pinning the REPL render scope (1776B) + that render's
|
||||
// messages array in downstream closures (PromptInput, handleAutoRunIssue).
|
||||
// Heap analysis showed ~9 REPL scopes and ~15 messages array versions
|
||||
// accumulating after #20174/#20175, all traced to this dep.
|
||||
mainLoopModel, pastedContents, ideSelection, setUserInputOnProcessing, setAbortController, addNotification, onQuery, stashedPrompt, setStashedPrompt, setAppState, onBeforeQuery, canUseTool, remoteSession, setMessages, awaitPendingHooks, repinScroll]);
|
||||
|
||||
// Callback for when user submits input while viewing a teammate's transcript
|
||||
const onAgentSubmit = useCallback(async (input: string, task: InProcessTeammateTaskState | LocalAgentTaskState, helpers: PromptInputHelpers) => {
|
||||
@@ -3558,8 +3558,8 @@ export function REPL({
|
||||
addNotification({
|
||||
key: `resume-agent-failed-${task.id}`,
|
||||
jsx: <Text color="error">
|
||||
Failed to resume agent: {errorMessage(err)}
|
||||
</Text>,
|
||||
Failed to resume agent: {errorMessage(err)}
|
||||
</Text>,
|
||||
priority: 'low'
|
||||
});
|
||||
});
|
||||
@@ -3577,9 +3577,9 @@ export function REPL({
|
||||
const command = autoRunIssueReason ? getAutoRunCommand(autoRunIssueReason) : '/issue';
|
||||
setAutoRunIssueReason(null); // Clear the state
|
||||
onSubmit(command, {
|
||||
setCursorOffset: () => {},
|
||||
clearBuffer: () => {},
|
||||
resetHistory: () => {}
|
||||
setCursorOffset: () => { },
|
||||
clearBuffer: () => { },
|
||||
resetHistory: () => { }
|
||||
}).catch(err => {
|
||||
logForDebugging(`Auto-run ${command} failed: ${errorMessage(err)}`);
|
||||
});
|
||||
@@ -3592,9 +3592,9 @@ export function REPL({
|
||||
const handleSurveyRequestFeedback = useCallback(() => {
|
||||
const command = "external" === 'ant' ? '/issue' : '/feedback';
|
||||
onSubmit(command, {
|
||||
setCursorOffset: () => {},
|
||||
clearBuffer: () => {},
|
||||
resetHistory: () => {}
|
||||
setCursorOffset: () => { },
|
||||
clearBuffer: () => { },
|
||||
resetHistory: () => { }
|
||||
}).catch(err => {
|
||||
logForDebugging(`Survey feedback request failed: ${err instanceof Error ? err.message : String(err)}`);
|
||||
});
|
||||
@@ -3609,9 +3609,9 @@ export function REPL({
|
||||
onSubmitRef.current = onSubmit;
|
||||
const handleOpenRateLimitOptions = useCallback(() => {
|
||||
void onSubmitRef.current('/rate-limit-options', {
|
||||
setCursorOffset: () => {},
|
||||
clearBuffer: () => {},
|
||||
resetHistory: () => {}
|
||||
setCursorOffset: () => { },
|
||||
clearBuffer: () => { },
|
||||
resetHistory: () => { }
|
||||
});
|
||||
}, []);
|
||||
const handleExit = useCallback(async () => {
|
||||
@@ -3628,14 +3628,14 @@ export function REPL({
|
||||
}
|
||||
const showWorktree = getCurrentWorktreeSession() !== null;
|
||||
if (showWorktree) {
|
||||
setExitFlow(<ExitFlow showWorktree onDone={() => {}} onCancel={() => {
|
||||
setExitFlow(<ExitFlow showWorktree onDone={() => { }} onCancel={() => {
|
||||
setExitFlow(null);
|
||||
setIsExiting(false);
|
||||
}} />);
|
||||
return;
|
||||
}
|
||||
const exitMod = await exit.load();
|
||||
const exitFlowResult = await exitMod.call(() => {});
|
||||
const exitFlowResult = await exitMod.call(() => { });
|
||||
setExitFlow(exitFlowResult);
|
||||
// If call() returned without killing the process (bg session detach),
|
||||
// clear isExiting so the UI is usable on reattach. No-op on the normal
|
||||
@@ -3749,18 +3749,18 @@ export function REPL({
|
||||
};
|
||||
const messageActionCaps: MessageActionCaps = {
|
||||
copy: text =>
|
||||
// setClipboard RETURNS OSC 52 — caller must stdout.write (tmux side-effects load-buffer, but that's tmux-only).
|
||||
void setClipboard(text).then(raw => {
|
||||
if (raw) process.stdout.write(raw);
|
||||
addNotification({
|
||||
// Same key as text-selection copy — repeated copies replace toast, don't queue.
|
||||
key: 'selection-copied',
|
||||
text: 'copied',
|
||||
color: 'success',
|
||||
priority: 'immediate',
|
||||
timeoutMs: 2000
|
||||
});
|
||||
}),
|
||||
// setClipboard RETURNS OSC 52 — caller must stdout.write (tmux side-effects load-buffer, but that's tmux-only).
|
||||
void setClipboard(text).then(raw => {
|
||||
if (raw) process.stdout.write(raw);
|
||||
addNotification({
|
||||
// Same key as text-selection copy — repeated copies replace toast, don't queue.
|
||||
key: 'selection-copied',
|
||||
text: 'copied',
|
||||
color: 'success',
|
||||
priority: 'immediate',
|
||||
timeoutMs: 2000
|
||||
});
|
||||
}),
|
||||
edit: async msg => {
|
||||
// Same skip-confirm check as /rewind: lossless → direct, else confirm dialog.
|
||||
const rawIdx = findRawIndex(msg.uuid);
|
||||
@@ -3856,14 +3856,14 @@ export function REPL({
|
||||
const executeQueuedInput = useCallback(async (queuedCommands: QueuedCommand[]) => {
|
||||
await handlePromptSubmit({
|
||||
helpers: {
|
||||
setCursorOffset: () => {},
|
||||
clearBuffer: () => {},
|
||||
resetHistory: () => {}
|
||||
setCursorOffset: () => { },
|
||||
clearBuffer: () => { },
|
||||
resetHistory: () => { }
|
||||
},
|
||||
queryGuard,
|
||||
commands,
|
||||
onInputChange: () => {},
|
||||
setPastedContents: () => {},
|
||||
onInputChange: () => { },
|
||||
setPastedContents: () => { },
|
||||
setToolJSX,
|
||||
getToolUseContext,
|
||||
messages,
|
||||
@@ -3924,8 +3924,8 @@ export function REPL({
|
||||
// User hasn't interacted since response ended, check other conditions
|
||||
const idleTimeSinceResponse = Date.now() - lastQueryCompletionTime;
|
||||
if (!isLoading && !toolJSX &&
|
||||
// Use ref to get current dialog state, avoiding stale closure
|
||||
focusedInputDialogRef.current === undefined && idleTimeSinceResponse >= getGlobalConfig().messageIdleNotifThresholdMs) {
|
||||
// Use ref to get current dialog state, avoiding stale closure
|
||||
focusedInputDialogRef.current === undefined && idleTimeSinceResponse >= getGlobalConfig().messageIdleNotifThresholdMs) {
|
||||
void sendNotification({
|
||||
message: 'Claude is waiting for your input',
|
||||
notificationType: 'idle_prompt'
|
||||
@@ -3957,13 +3957,13 @@ export function REPL({
|
||||
addNotif({
|
||||
key: 'idle-return-hint',
|
||||
jsx: mode === 'hint_v2' ? <>
|
||||
<Text dimColor>new task? </Text>
|
||||
<Text color="suggestion">/clear</Text>
|
||||
<Text dimColor> to save </Text>
|
||||
<Text color="suggestion">{formattedTokens} tokens</Text>
|
||||
</> : <Text color="warning">
|
||||
new task? /clear to save {formattedTokens} tokens
|
||||
</Text>,
|
||||
<Text dimColor>new task? </Text>
|
||||
<Text color="suggestion">/clear</Text>
|
||||
<Text dimColor> to save </Text>
|
||||
<Text color="suggestion">{formattedTokens} tokens</Text>
|
||||
</> : <Text color="warning">
|
||||
new task? /clear to save {formattedTokens} tokens
|
||||
</Text>,
|
||||
priority: 'medium',
|
||||
// Persist until submit — the hint fires at T+75min idle, user may
|
||||
// not return for hours. removeNotification in useEffect cleanup
|
||||
@@ -4015,17 +4015,17 @@ export function REPL({
|
||||
|
||||
// Voice input integration (VOICE_MODE builds only)
|
||||
const voice = feature('VOICE_MODE') ?
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
|
||||
useVoiceIntegration({
|
||||
setInputValueRaw,
|
||||
inputValueRef,
|
||||
insertTextRef
|
||||
}) : {
|
||||
stripTrailing: () => 0,
|
||||
handleKeyEvent: () => {},
|
||||
resetAnchor: () => {},
|
||||
interimRange: null
|
||||
};
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
|
||||
useVoiceIntegration({
|
||||
setInputValueRaw,
|
||||
inputValueRef,
|
||||
insertTextRef
|
||||
}) : {
|
||||
stripTrailing: () => 0,
|
||||
handleKeyEvent: () => { },
|
||||
resetAnchor: () => { },
|
||||
interimRange: null
|
||||
};
|
||||
useInboxPoller({
|
||||
enabled: isAgentSwarmsEnabled(),
|
||||
isLoading,
|
||||
@@ -4228,11 +4228,11 @@ export function REPL({
|
||||
event.stopImmediatePropagation();
|
||||
}
|
||||
},
|
||||
// Search needs virtual scroll (jumpRef drives VirtualMessageList). [
|
||||
// kills it, so !dumpMode — after [ there's nothing to jump in.
|
||||
{
|
||||
isActive: screen === 'transcript' && virtualScrollActive && !searchOpen && !dumpMode
|
||||
});
|
||||
// Search needs virtual scroll (jumpRef drives VirtualMessageList). [
|
||||
// kills it, so !dumpMode — after [ there's nothing to jump in.
|
||||
{
|
||||
isActive: screen === 'transcript' && virtualScrollActive && !searchOpen && !dumpMode
|
||||
});
|
||||
const {
|
||||
setQuery: setHighlight,
|
||||
scanElement,
|
||||
@@ -4323,12 +4323,12 @@ export function REPL({
|
||||
})();
|
||||
}
|
||||
},
|
||||
// !searchOpen: typing 'v' or '[' in the search bar is search input, not
|
||||
// a command. No !dumpMode here — v should work after [ (the [ handler
|
||||
// guards itself inline).
|
||||
{
|
||||
isActive: screen === 'transcript' && virtualScrollActive && !searchOpen
|
||||
});
|
||||
// !searchOpen: typing 'v' or '[' in the search bar is search input, not
|
||||
// a command. No !dumpMode here — v should work after [ (the [ handler
|
||||
// guards itself inline).
|
||||
{
|
||||
isActive: screen === 'transcript' && virtualScrollActive && !searchOpen
|
||||
});
|
||||
|
||||
// Fresh `less` per transcript entry. Prevents stale highlights matching
|
||||
// unrelated normal-mode text (overlay is alt-screen-global) and avoids
|
||||
@@ -4396,78 +4396,78 @@ export function REPL({
|
||||
const transcriptScrollRef = isFullscreenEnvEnabled() && !disableVirtualScroll && !dumpMode ? scrollRef : undefined;
|
||||
const transcriptMessagesElement = <Messages messages={transcriptMessages} tools={tools} commands={commands} verbose={true} toolJSX={null} toolUseConfirmQueue={[]} inProgressToolUseIDs={inProgressToolUseIDs} isMessageSelectorVisible={false} conversationId={conversationId} screen={screen} agentDefinitions={agentDefinitions} streamingToolUses={transcriptStreamingToolUses} showAllInTranscript={showAllInTranscript} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} hidePastThinking={true} streamingThinking={streamingThinking} scrollRef={transcriptScrollRef} jumpRef={jumpRef} onSearchMatchesChange={onSearchMatchesChange} scanElement={scanElement} setPositions={setPositions} disableRenderCap={dumpMode} />;
|
||||
const transcriptToolJSX = toolJSX && <Box flexDirection="column" width="100%">
|
||||
{toolJSX.jsx}
|
||||
</Box>;
|
||||
{toolJSX.jsx}
|
||||
</Box>;
|
||||
const transcriptReturn = <KeybindingSetup>
|
||||
<AnimatedTerminalTitle isAnimating={titleIsAnimating} title={terminalTitle} disabled={titleDisabled} noPrefix={showStatusInTerminalTab} />
|
||||
<GlobalKeybindingHandlers {...globalKeybindingProps} />
|
||||
{feature('VOICE_MODE') ? <VoiceKeybindingHandler voiceHandleKeyEvent={voice.handleKeyEvent} stripTrailing={voice.stripTrailing} resetAnchor={voice.resetAnchor} isActive={!toolJSX?.isLocalJSXCommand} /> : null}
|
||||
<CommandKeybindingHandlers onSubmit={onSubmit} isActive={!toolJSX?.isLocalJSXCommand} />
|
||||
{transcriptScrollRef ?
|
||||
// ScrollKeybindingHandler must mount before CancelRequestHandler so
|
||||
// ctrl+c-with-selection copies instead of cancelling the active task.
|
||||
// Its raw useInput handler only stops propagation when a selection
|
||||
// exists — without one, ctrl+c falls through to CancelRequestHandler.
|
||||
<ScrollKeybindingHandler scrollRef={scrollRef}
|
||||
// Yield wheel/ctrl+u/d to UltraplanChoiceDialog's own scroll
|
||||
// handler while the modal is showing.
|
||||
isActive={focusedInputDialog !== 'ultraplan-choice'}
|
||||
// g/G/j/k/ctrl+u/ctrl+d would eat keystrokes the search bar
|
||||
// wants. Off while searching.
|
||||
isModal={!searchOpen}
|
||||
// Manual scroll exits the search context — clear the yellow
|
||||
// current-match marker. Positions are (msg, rowOffset)-keyed;
|
||||
// j/k changes scrollTop so rowOffset is stale → wrong row
|
||||
// gets yellow. Next n/N re-establishes via step()→jump().
|
||||
onScroll={() => jumpRef.current?.disarmSearch()} /> : null}
|
||||
<CancelRequestHandler {...cancelRequestProps} />
|
||||
{transcriptScrollRef ? <FullscreenLayout scrollRef={scrollRef} scrollable={<>
|
||||
{transcriptMessagesElement}
|
||||
{transcriptToolJSX}
|
||||
<SandboxViolationExpandedView />
|
||||
</>} bottom={searchOpen ? <TranscriptSearchBar jumpRef={jumpRef}
|
||||
// Seed was tried (c01578c8) — broke /hello muscle
|
||||
// memory (cursor lands after 'foo', /hello → foohello).
|
||||
// Cancel-restore handles the 'don't lose prior search'
|
||||
// concern differently (onCancel re-applies searchQuery).
|
||||
initialQuery="" count={searchCount} current={searchCurrent} onClose={q => {
|
||||
// Enter — commit. 0-match guard: junk query shouldn't
|
||||
// persist (badge hidden, n/N dead anyway).
|
||||
setSearchQuery(searchCount > 0 ? q : '');
|
||||
setSearchOpen(false);
|
||||
// onCancel path: bar unmounts before its useEffect([query])
|
||||
// can fire with ''. Without this, searchCount stays stale
|
||||
// (n guard at :4956 passes) and VML's matches[] too
|
||||
// (nextMatch walks the old array). Phantom nav, no
|
||||
// highlight. onExit (Enter, q non-empty) still commits.
|
||||
if (!q) {
|
||||
setSearchCount(0);
|
||||
setSearchCurrent(0);
|
||||
<AnimatedTerminalTitle isAnimating={titleIsAnimating} title={terminalTitle} disabled={titleDisabled} noPrefix={showStatusInTerminalTab} />
|
||||
<GlobalKeybindingHandlers {...globalKeybindingProps} />
|
||||
{feature('VOICE_MODE') ? <VoiceKeybindingHandler voiceHandleKeyEvent={voice.handleKeyEvent} stripTrailing={voice.stripTrailing} resetAnchor={voice.resetAnchor} isActive={!toolJSX?.isLocalJSXCommand} /> : null}
|
||||
<CommandKeybindingHandlers onSubmit={onSubmit} isActive={!toolJSX?.isLocalJSXCommand} />
|
||||
{transcriptScrollRef ?
|
||||
// ScrollKeybindingHandler must mount before CancelRequestHandler so
|
||||
// ctrl+c-with-selection copies instead of cancelling the active task.
|
||||
// Its raw useInput handler only stops propagation when a selection
|
||||
// exists — without one, ctrl+c falls through to CancelRequestHandler.
|
||||
<ScrollKeybindingHandler scrollRef={scrollRef}
|
||||
// Yield wheel/ctrl+u/d to UltraplanChoiceDialog's own scroll
|
||||
// handler while the modal is showing.
|
||||
isActive={focusedInputDialog !== 'ultraplan-choice'}
|
||||
// g/G/j/k/ctrl+u/ctrl+d would eat keystrokes the search bar
|
||||
// wants. Off while searching.
|
||||
isModal={!searchOpen}
|
||||
// Manual scroll exits the search context — clear the yellow
|
||||
// current-match marker. Positions are (msg, rowOffset)-keyed;
|
||||
// j/k changes scrollTop so rowOffset is stale → wrong row
|
||||
// gets yellow. Next n/N re-establishes via step()→jump().
|
||||
onScroll={() => jumpRef.current?.disarmSearch()} /> : null}
|
||||
<CancelRequestHandler {...cancelRequestProps} />
|
||||
{transcriptScrollRef ? <FullscreenLayout scrollRef={scrollRef} scrollable={<>
|
||||
{transcriptMessagesElement}
|
||||
{transcriptToolJSX}
|
||||
<SandboxViolationExpandedView />
|
||||
</>} bottom={searchOpen ? <TranscriptSearchBar jumpRef={jumpRef}
|
||||
// Seed was tried (c01578c8) — broke /hello muscle
|
||||
// memory (cursor lands after 'foo', /hello → foohello).
|
||||
// Cancel-restore handles the 'don't lose prior search'
|
||||
// concern differently (onCancel re-applies searchQuery).
|
||||
initialQuery="" count={searchCount} current={searchCurrent} onClose={q => {
|
||||
// Enter — commit. 0-match guard: junk query shouldn't
|
||||
// persist (badge hidden, n/N dead anyway).
|
||||
setSearchQuery(searchCount > 0 ? q : '');
|
||||
setSearchOpen(false);
|
||||
// onCancel path: bar unmounts before its useEffect([query])
|
||||
// can fire with ''. Without this, searchCount stays stale
|
||||
// (n guard at :4956 passes) and VML's matches[] too
|
||||
// (nextMatch walks the old array). Phantom nav, no
|
||||
// highlight. onExit (Enter, q non-empty) still commits.
|
||||
if (!q) {
|
||||
setSearchCount(0);
|
||||
setSearchCurrent(0);
|
||||
jumpRef.current?.setSearchQuery('');
|
||||
}
|
||||
}} onCancel={() => {
|
||||
// Esc/ctrl+c/ctrl+g — undo. Bar's effect last fired
|
||||
// with whatever was typed. searchQuery (REPL state)
|
||||
// is unchanged since / (onClose = commit, didn't run).
|
||||
// Two VML calls: '' restores anchor (0-match else-
|
||||
// branch), then searchQuery re-scans from anchor's
|
||||
// nearest. Both synchronous — one React batch.
|
||||
// setHighlight explicit: REPL's sync-effect dep is
|
||||
// searchQuery (unchanged), wouldn't re-fire.
|
||||
setSearchOpen(false);
|
||||
jumpRef.current?.setSearchQuery('');
|
||||
}
|
||||
}} onCancel={() => {
|
||||
// Esc/ctrl+c/ctrl+g — undo. Bar's effect last fired
|
||||
// with whatever was typed. searchQuery (REPL state)
|
||||
// is unchanged since / (onClose = commit, didn't run).
|
||||
// Two VML calls: '' restores anchor (0-match else-
|
||||
// branch), then searchQuery re-scans from anchor's
|
||||
// nearest. Both synchronous — one React batch.
|
||||
// setHighlight explicit: REPL's sync-effect dep is
|
||||
// searchQuery (unchanged), wouldn't re-fire.
|
||||
setSearchOpen(false);
|
||||
jumpRef.current?.setSearchQuery('');
|
||||
jumpRef.current?.setSearchQuery(searchQuery);
|
||||
setHighlight(searchQuery);
|
||||
}} setHighlight={setHighlight} /> : <TranscriptModeFooter showAllInTranscript={showAllInTranscript} virtualScroll={true} status={editorStatus || undefined} searchBadge={searchQuery && searchCount > 0 ? {
|
||||
current: searchCurrent,
|
||||
count: searchCount
|
||||
} : undefined} />} /> : <>
|
||||
{transcriptMessagesElement}
|
||||
{transcriptToolJSX}
|
||||
<SandboxViolationExpandedView />
|
||||
<TranscriptModeFooter showAllInTranscript={showAllInTranscript} virtualScroll={false} suppressShowAll={dumpMode} status={editorStatus || undefined} />
|
||||
</>}
|
||||
</KeybindingSetup>;
|
||||
jumpRef.current?.setSearchQuery(searchQuery);
|
||||
setHighlight(searchQuery);
|
||||
}} setHighlight={setHighlight} /> : <TranscriptModeFooter showAllInTranscript={showAllInTranscript} virtualScroll={true} status={editorStatus || undefined} searchBadge={searchQuery && searchCount > 0 ? {
|
||||
current: searchCurrent,
|
||||
count: searchCount
|
||||
} : undefined} />} /> : <>
|
||||
{transcriptMessagesElement}
|
||||
{transcriptToolJSX}
|
||||
<SandboxViolationExpandedView />
|
||||
<TranscriptModeFooter showAllInTranscript={showAllInTranscript} virtualScroll={false} suppressShowAll={dumpMode} status={editorStatus || undefined} />
|
||||
</>}
|
||||
</KeybindingSetup>;
|
||||
// The virtual-scroll branch (FullscreenLayout above) needs
|
||||
// <AlternateScreen>'s <Box height={rows}> constraint — without it,
|
||||
// ScrollBox's flexGrow has no ceiling, viewport = content height,
|
||||
@@ -4478,8 +4478,8 @@ export function REPL({
|
||||
// unwrapped — it wants native terminal scrollback.
|
||||
if (transcriptScrollRef) {
|
||||
return <AlternateScreen mouseTracking={isMouseTrackingEnabled()}>
|
||||
{transcriptReturn}
|
||||
</AlternateScreen>;
|
||||
{transcriptReturn}
|
||||
</AlternateScreen>;
|
||||
}
|
||||
return transcriptReturn;
|
||||
}
|
||||
@@ -4541,11 +4541,11 @@ export function REPL({
|
||||
// early return above wraps its virtual-scroll branch the same way; only
|
||||
// the 30-cap dump branch stays unwrapped for native terminal scrollback.
|
||||
const mainReturn = <KeybindingSetup>
|
||||
<AnimatedTerminalTitle isAnimating={titleIsAnimating} title={terminalTitle} disabled={titleDisabled} noPrefix={showStatusInTerminalTab} />
|
||||
<GlobalKeybindingHandlers {...globalKeybindingProps} />
|
||||
{feature('VOICE_MODE') ? <VoiceKeybindingHandler voiceHandleKeyEvent={voice.handleKeyEvent} stripTrailing={voice.stripTrailing} resetAnchor={voice.resetAnchor} isActive={!toolJSX?.isLocalJSXCommand} /> : null}
|
||||
<CommandKeybindingHandlers onSubmit={onSubmit} isActive={!toolJSX?.isLocalJSXCommand} />
|
||||
{/* ScrollKeybindingHandler must mount before CancelRequestHandler so
|
||||
<AnimatedTerminalTitle isAnimating={titleIsAnimating} title={terminalTitle} disabled={titleDisabled} noPrefix={showStatusInTerminalTab} />
|
||||
<GlobalKeybindingHandlers {...globalKeybindingProps} />
|
||||
{feature('VOICE_MODE') ? <VoiceKeybindingHandler voiceHandleKeyEvent={voice.handleKeyEvent} stripTrailing={voice.stripTrailing} resetAnchor={voice.resetAnchor} isActive={!toolJSX?.isLocalJSXCommand} /> : null}
|
||||
<CommandKeybindingHandlers onSubmit={onSubmit} isActive={!toolJSX?.isLocalJSXCommand} />
|
||||
{/* ScrollKeybindingHandler must mount before CancelRequestHandler so
|
||||
ctrl+c-with-selection copies instead of cancelling the active task.
|
||||
Its raw useInput handler only stops propagation when a selection
|
||||
exists — without one, ctrl+c falls through to CancelRequestHandler.
|
||||
@@ -4553,40 +4553,40 @@ export function REPL({
|
||||
the modal's inner ScrollBox is not keyboard-driven. onScroll
|
||||
stays suppressed while a modal is showing so scroll doesn't
|
||||
stamp divider/pill state. */}
|
||||
<ScrollKeybindingHandler scrollRef={scrollRef} isActive={isFullscreenEnvEnabled() && (centeredModal != null || !focusedInputDialog || focusedInputDialog === 'tool-permission')} onScroll={centeredModal || toolPermissionOverlay || viewedAgentTask ? undefined : composedOnScroll} />
|
||||
{feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? <MessageActionsKeybindings handlers={messageActionHandlers} isActive={cursor !== null} /> : null}
|
||||
<CancelRequestHandler {...cancelRequestProps} />
|
||||
<MCPConnectionManager key={remountKey} dynamicMcpConfig={dynamicMcpConfig} isStrictMcpConfig={strictMcpConfig}>
|
||||
<FullscreenLayout scrollRef={scrollRef} overlay={toolPermissionOverlay} bottomFloat={feature('BUDDY') && companionVisible && !companionNarrow ? <CompanionFloatingBubble /> : undefined} modal={centeredModal} modalScrollRef={modalScrollRef} dividerYRef={dividerYRef} hidePill={!!viewedAgentTask} hideSticky={!!viewedTeammateTask} newMessageCount={unseenDivider?.count ?? 0} onPillClick={() => {
|
||||
<ScrollKeybindingHandler scrollRef={scrollRef} isActive={isFullscreenEnvEnabled() && (centeredModal != null || !focusedInputDialog || focusedInputDialog === 'tool-permission')} onScroll={centeredModal || toolPermissionOverlay || viewedAgentTask ? undefined : composedOnScroll} />
|
||||
{feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? <MessageActionsKeybindings handlers={messageActionHandlers} isActive={cursor !== null} /> : null}
|
||||
<CancelRequestHandler {...cancelRequestProps} />
|
||||
<MCPConnectionManager key={remountKey} dynamicMcpConfig={dynamicMcpConfig} isStrictMcpConfig={strictMcpConfig}>
|
||||
<FullscreenLayout scrollRef={scrollRef} overlay={toolPermissionOverlay} bottomFloat={feature('BUDDY') && companionVisible && !companionNarrow ? <CompanionFloatingBubble /> : undefined} modal={centeredModal} modalScrollRef={modalScrollRef} dividerYRef={dividerYRef} hidePill={!!viewedAgentTask} hideSticky={!!viewedTeammateTask} newMessageCount={unseenDivider?.count ?? 0} onPillClick={() => {
|
||||
setCursor(null);
|
||||
jumpToNew(scrollRef.current);
|
||||
}} scrollable={<>
|
||||
<TeammateViewHeader />
|
||||
<Messages messages={displayedMessages} tools={tools} commands={commands} verbose={verbose} toolJSX={toolJSX} toolUseConfirmQueue={toolUseConfirmQueue} inProgressToolUseIDs={viewedTeammateTask ? viewedTeammateTask.inProgressToolUseIDs ?? new Set() : inProgressToolUseIDs} isMessageSelectorVisible={isMessageSelectorVisible} conversationId={conversationId} screen={screen} streamingToolUses={streamingToolUses} showAllInTranscript={showAllInTranscript} agentDefinitions={agentDefinitions} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} streamingText={isLoading && !viewedAgentTask ? visibleStreamingText : null} isBriefOnly={viewedAgentTask ? false : isBriefOnly} unseenDivider={viewedAgentTask ? undefined : unseenDivider} scrollRef={isFullscreenEnvEnabled() ? scrollRef : undefined} trackStickyPrompt={isFullscreenEnvEnabled() ? true : undefined} cursor={cursor} setCursor={setCursor} cursorNavRef={cursorNavRef} />
|
||||
<AwsAuthStatusBox />
|
||||
{/* Hide the processing placeholder while a modal is showing —
|
||||
<TeammateViewHeader />
|
||||
<Messages messages={displayedMessages} tools={tools} commands={commands} verbose={verbose} toolJSX={toolJSX} toolUseConfirmQueue={toolUseConfirmQueue} inProgressToolUseIDs={viewedTeammateTask ? viewedTeammateTask.inProgressToolUseIDs ?? new Set() : inProgressToolUseIDs} isMessageSelectorVisible={isMessageSelectorVisible} conversationId={conversationId} screen={screen} streamingToolUses={streamingToolUses} showAllInTranscript={showAllInTranscript} agentDefinitions={agentDefinitions} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} streamingText={isLoading && !viewedAgentTask ? visibleStreamingText : null} isBriefOnly={viewedAgentTask ? false : isBriefOnly} unseenDivider={viewedAgentTask ? undefined : unseenDivider} scrollRef={isFullscreenEnvEnabled() ? scrollRef : undefined} trackStickyPrompt={isFullscreenEnvEnabled() ? true : undefined} cursor={cursor} setCursor={setCursor} cursorNavRef={cursorNavRef} />
|
||||
<AwsAuthStatusBox />
|
||||
{/* Hide the processing placeholder while a modal is showing —
|
||||
it would sit at the last visible transcript row right above
|
||||
the ▔ divider, showing "❯ /config" as redundant clutter
|
||||
(the modal IS the /config UI). Outside modals it stays so
|
||||
the user sees their input echoed while Claude processes. */}
|
||||
{!disabled && placeholderText && !centeredModal && <UserTextMessage param={{
|
||||
{!disabled && placeholderText && !centeredModal && <UserTextMessage param={{
|
||||
text: placeholderText,
|
||||
type: 'text'
|
||||
}} addMargin={true} verbose={verbose} />}
|
||||
{toolJSX && !(toolJSX.isLocalJSXCommand && toolJSX.isImmediate) && !toolJsxCentered && <Box flexDirection="column" width="100%">
|
||||
{toolJSX.jsx}
|
||||
</Box>}
|
||||
{"external" === 'ant' && <TungstenLiveMonitor />}
|
||||
{feature('WEB_BROWSER_TOOL') ? WebBrowserPanelModule && <WebBrowserPanelModule.WebBrowserPanel /> : null}
|
||||
<Box flexGrow={1} />
|
||||
{showSpinner && <SpinnerWithVerb mode={streamMode} spinnerTip={spinnerTip} responseLengthRef={responseLengthRef} apiMetricsRef={apiMetricsRef} overrideMessage={spinnerMessage} spinnerSuffix={stopHookSpinnerSuffix} verbose={verbose} loadingStartTimeRef={loadingStartTimeRef} totalPausedMsRef={totalPausedMsRef} pauseStartTimeRef={pauseStartTimeRef} overrideColor={spinnerColor} overrideShimmerColor={spinnerShimmerColor} hasActiveTools={inProgressToolUseIDs.size > 0} leaderIsIdle={!isLoading} />}
|
||||
{!showSpinner && !isLoading && !userInputOnProcessing && !hasRunningTeammates && isBriefOnly && !viewedAgentTask && <BriefIdleStatus />}
|
||||
{isFullscreenEnvEnabled() && <PromptInputQueuedCommands />}
|
||||
</>} bottom={<Box flexDirection={feature('BUDDY') && companionNarrow ? 'column' : 'row'} width="100%" alignItems={feature('BUDDY') && companionNarrow ? undefined : 'flex-end'}>
|
||||
{feature('BUDDY') && companionNarrow && isFullscreenEnvEnabled() && companionVisible ? <CompanionSprite /> : null}
|
||||
<Box flexDirection="column" flexGrow={1}>
|
||||
{permissionStickyFooter}
|
||||
{/* Immediate local-jsx commands (/btw, /sandbox, /assistant,
|
||||
{toolJSX && !(toolJSX.isLocalJSXCommand && toolJSX.isImmediate) && !toolJsxCentered && <Box flexDirection="column" width="100%">
|
||||
{toolJSX.jsx}
|
||||
</Box>}
|
||||
{"external" === 'ant' && <TungstenLiveMonitor />}
|
||||
{feature('WEB_BROWSER_TOOL') ? WebBrowserPanelModule && <WebBrowserPanelModule.WebBrowserPanel /> : null}
|
||||
<Box flexGrow={1} />
|
||||
{showSpinner && <SpinnerWithVerb mode={streamMode} spinnerTip={spinnerTip} responseLengthRef={responseLengthRef} apiMetricsRef={apiMetricsRef} overrideMessage={spinnerMessage} spinnerSuffix={stopHookSpinnerSuffix} verbose={verbose} loadingStartTimeRef={loadingStartTimeRef} totalPausedMsRef={totalPausedMsRef} pauseStartTimeRef={pauseStartTimeRef} overrideColor={spinnerColor} overrideShimmerColor={spinnerShimmerColor} hasActiveTools={inProgressToolUseIDs.size > 0} leaderIsIdle={!isLoading} />}
|
||||
{!showSpinner && !isLoading && !userInputOnProcessing && !hasRunningTeammates && isBriefOnly && !viewedAgentTask && <BriefIdleStatus />}
|
||||
{isFullscreenEnvEnabled() && <PromptInputQueuedCommands />}
|
||||
</>} bottom={<Box flexDirection={feature('BUDDY') && companionNarrow ? 'column' : 'row'} width="100%" alignItems={feature('BUDDY') && companionNarrow ? undefined : 'flex-end'}>
|
||||
{feature('BUDDY') && companionNarrow && isFullscreenEnvEnabled() && companionVisible ? <CompanionSprite /> : null}
|
||||
<Box flexDirection="column" flexGrow={1}>
|
||||
{permissionStickyFooter}
|
||||
{/* Immediate local-jsx commands (/btw, /sandbox, /assistant,
|
||||
/issue) render here, NOT inside scrollable. They stay mounted
|
||||
while the main conversation streams behind them, so ScrollBox
|
||||
relayouts on each new message would drag them around. bottom
|
||||
@@ -4595,13 +4595,13 @@ export function REPL({
|
||||
stays in scrollable: the main loop is paused so no jiggle,
|
||||
and their tall content (DiffDetailView renders up to 400
|
||||
lines with no internal scroll) needs the outer ScrollBox. */}
|
||||
{toolJSX?.isLocalJSXCommand && toolJSX.isImmediate && !toolJsxCentered && <Box flexDirection="column" width="100%">
|
||||
{toolJSX.jsx}
|
||||
</Box>}
|
||||
{!showSpinner && !toolJSX?.isLocalJSXCommand && showExpandedTodos && tasksV2 && tasksV2.length > 0 && <Box width="100%" flexDirection="column">
|
||||
<TaskListV2 tasks={tasksV2} isStandalone={true} />
|
||||
</Box>}
|
||||
{focusedInputDialog === 'sandbox-permission' && <SandboxPermissionRequest key={sandboxPermissionRequestQueue[0]!.hostPattern.host} hostPattern={sandboxPermissionRequestQueue[0]!.hostPattern} onUserResponse={(response: {
|
||||
{toolJSX?.isLocalJSXCommand && toolJSX.isImmediate && !toolJsxCentered && <Box flexDirection="column" width="100%">
|
||||
{toolJSX.jsx}
|
||||
</Box>}
|
||||
{!showSpinner && !toolJSX?.isLocalJSXCommand && showExpandedTodos && tasksV2 && tasksV2.length > 0 && <Box width="100%" flexDirection="column">
|
||||
<TaskListV2 tasks={tasksV2} isStandalone={true} />
|
||||
</Box>}
|
||||
{focusedInputDialog === 'sandbox-permission' && <SandboxPermissionRequest key={sandboxPermissionRequestQueue[0]!.hostPattern.host} hostPattern={sandboxPermissionRequestQueue[0]!.hostPattern} onUserResponse={(response: {
|
||||
allow: boolean;
|
||||
persistToSettings: boolean;
|
||||
}) => {
|
||||
@@ -4650,7 +4650,7 @@ export function REPL({
|
||||
sandboxBridgeCleanupRef.current.delete(approvedHost);
|
||||
}
|
||||
}} />}
|
||||
{focusedInputDialog === 'prompt' && <PromptDialog key={promptQueue[0]!.request.prompt} title={promptQueue[0]!.title} toolInputSummary={promptQueue[0]!.toolInputSummary} request={promptQueue[0]!.request} onRespond={selectedKey => {
|
||||
{focusedInputDialog === 'prompt' && <PromptDialog key={promptQueue[0]!.request.prompt} title={promptQueue[0]!.title} toolInputSummary={promptQueue[0]!.toolInputSummary} request={promptQueue[0]!.request} onRespond={selectedKey => {
|
||||
const item = promptQueue[0];
|
||||
if (!item) return;
|
||||
item.resolve({
|
||||
@@ -4664,12 +4664,12 @@ export function REPL({
|
||||
item.reject(new Error('Prompt cancelled by user'));
|
||||
setPromptQueue(([, ...tail]) => tail);
|
||||
}} />}
|
||||
{/* Show pending indicator on worker while waiting for leader approval */}
|
||||
{pendingWorkerRequest && <WorkerPendingPermission toolName={pendingWorkerRequest.toolName} description={pendingWorkerRequest.description} />}
|
||||
{/* Show pending indicator for sandbox permission on worker side */}
|
||||
{pendingSandboxRequest && <WorkerPendingPermission toolName="Network Access" description={`Waiting for leader to approve network access to ${pendingSandboxRequest.host}`} />}
|
||||
{/* Worker sandbox permission requests from swarm workers */}
|
||||
{focusedInputDialog === 'worker-sandbox-permission' && <SandboxPermissionRequest key={workerSandboxPermissions.queue[0]!.requestId} hostPattern={{
|
||||
{/* Show pending indicator on worker while waiting for leader approval */}
|
||||
{pendingWorkerRequest && <WorkerPendingPermission toolName={pendingWorkerRequest.toolName} description={pendingWorkerRequest.description} />}
|
||||
{/* Show pending indicator for sandbox permission on worker side */}
|
||||
{pendingSandboxRequest && <WorkerPendingPermission toolName="Network Access" description={`Waiting for leader to approve network access to ${pendingSandboxRequest.host}`} />}
|
||||
{/* Worker sandbox permission requests from swarm workers */}
|
||||
{focusedInputDialog === 'worker-sandbox-permission' && <SandboxPermissionRequest key={workerSandboxPermissions.queue[0]!.requestId} hostPattern={{
|
||||
host: workerSandboxPermissions.queue[0]!.host,
|
||||
port: undefined
|
||||
} as NetworkHostPattern} onUserResponse={(response: {
|
||||
@@ -4713,7 +4713,7 @@ export function REPL({
|
||||
}
|
||||
}));
|
||||
}} />}
|
||||
{focusedInputDialog === 'elicitation' && <ElicitationDialog key={elicitation.queue[0]!.serverName + ':' + String(elicitation.queue[0]!.requestId)} event={elicitation.queue[0]!} onResponse={(action, content) => {
|
||||
{focusedInputDialog === 'elicitation' && <ElicitationDialog key={elicitation.queue[0]!.serverName + ':' + String(elicitation.queue[0]!.requestId)} event={elicitation.queue[0]!} onResponse={(action, content) => {
|
||||
const currentRequest = elicitation.queue[0];
|
||||
if (!currentRequest) return;
|
||||
// Call respond callback to resolve Promise
|
||||
@@ -4742,7 +4742,7 @@ export function REPL({
|
||||
}));
|
||||
currentRequest?.onWaitingDismiss?.(action);
|
||||
}} />}
|
||||
{focusedInputDialog === 'cost' && <CostThresholdDialog onDone={() => {
|
||||
{focusedInputDialog === 'cost' && <CostThresholdDialog onDone={() => {
|
||||
setShowCostDialog(false);
|
||||
setHaveShownCostDialog(true);
|
||||
saveGlobalConfig(current => ({
|
||||
@@ -4751,7 +4751,7 @@ export function REPL({
|
||||
}));
|
||||
logEvent('tengu_cost_threshold_acknowledged', {});
|
||||
}} />}
|
||||
{focusedInputDialog === 'idle-return' && idleReturnPending && <IdleReturnDialog idleMinutes={idleReturnPending.idleMinutes} totalInputTokens={getTotalInputTokens()} onDone={async action => {
|
||||
{focusedInputDialog === 'idle-return' && idleReturnPending && <IdleReturnDialog idleMinutes={idleReturnPending.idleMinutes} totalInputTokens={getTotalInputTokens()} onDone={async action => {
|
||||
const pending = idleReturnPending;
|
||||
setIdleReturnPending(null);
|
||||
logEvent('tengu_idle_return_action', {
|
||||
@@ -4793,13 +4793,13 @@ export function REPL({
|
||||
}
|
||||
skipIdleCheckRef.current = true;
|
||||
void onSubmitRef.current(pending.input, {
|
||||
setCursorOffset: () => {},
|
||||
clearBuffer: () => {},
|
||||
resetHistory: () => {}
|
||||
setCursorOffset: () => { },
|
||||
clearBuffer: () => { },
|
||||
resetHistory: () => { }
|
||||
});
|
||||
}} />}
|
||||
{focusedInputDialog === 'ide-onboarding' && <IdeOnboardingDialog onDone={() => setShowIdeOnboarding(false)} installationStatus={ideInstallationStatus} />}
|
||||
{"external" === 'ant' && focusedInputDialog === 'model-switch' && AntModelSwitchCallout && <AntModelSwitchCallout onDone={(selection: string, modelAlias?: string) => {
|
||||
{focusedInputDialog === 'ide-onboarding' && <IdeOnboardingDialog onDone={() => setShowIdeOnboarding(false)} installationStatus={ideInstallationStatus} />}
|
||||
{"external" === 'ant' && focusedInputDialog === 'model-switch' && AntModelSwitchCallout && <AntModelSwitchCallout onDone={(selection: string, modelAlias?: string) => {
|
||||
setShowModelSwitchCallout(false);
|
||||
if (selection === 'switch' && modelAlias) {
|
||||
setAppState(prev => ({
|
||||
@@ -4809,8 +4809,8 @@ export function REPL({
|
||||
}));
|
||||
}
|
||||
}} />}
|
||||
{"external" === 'ant' && focusedInputDialog === 'undercover-callout' && UndercoverAutoCallout && <UndercoverAutoCallout onDone={() => setShowUndercoverCallout(false)} />}
|
||||
{focusedInputDialog === 'effort-callout' && <EffortCallout model={mainLoopModel} onDone={selection => {
|
||||
{"external" === 'ant' && focusedInputDialog === 'undercover-callout' && UndercoverAutoCallout && <UndercoverAutoCallout onDone={() => setShowUndercoverCallout(false)} />}
|
||||
{focusedInputDialog === 'effort-callout' && <EffortCallout model={mainLoopModel} onDone={selection => {
|
||||
setShowEffortCallout(false);
|
||||
if (selection !== 'dismiss') {
|
||||
setAppState(prev => ({
|
||||
@@ -4819,7 +4819,7 @@ export function REPL({
|
||||
}));
|
||||
}
|
||||
}} />}
|
||||
{focusedInputDialog === 'remote-callout' && <RemoteCallout onDone={selection => {
|
||||
{focusedInputDialog === 'remote-callout' && <RemoteCallout onDone={selection => {
|
||||
setAppState(prev => {
|
||||
if (!prev.showRemoteCallout) return prev;
|
||||
return {
|
||||
@@ -4834,17 +4834,17 @@ export function REPL({
|
||||
});
|
||||
}} />}
|
||||
|
||||
{exitFlow}
|
||||
{exitFlow}
|
||||
|
||||
{focusedInputDialog === 'plugin-hint' && hintRecommendation && <PluginHintMenu pluginName={hintRecommendation.pluginName} pluginDescription={hintRecommendation.pluginDescription} marketplaceName={hintRecommendation.marketplaceName} sourceCommand={hintRecommendation.sourceCommand} onResponse={handleHintResponse} />}
|
||||
{focusedInputDialog === 'plugin-hint' && hintRecommendation && <PluginHintMenu pluginName={hintRecommendation.pluginName} pluginDescription={hintRecommendation.pluginDescription} marketplaceName={hintRecommendation.marketplaceName} sourceCommand={hintRecommendation.sourceCommand} onResponse={handleHintResponse} />}
|
||||
|
||||
{focusedInputDialog === 'lsp-recommendation' && lspRecommendation && <LspRecommendationMenu pluginName={lspRecommendation.pluginName} pluginDescription={lspRecommendation.pluginDescription} fileExtension={lspRecommendation.fileExtension} onResponse={handleLspResponse} />}
|
||||
{focusedInputDialog === 'lsp-recommendation' && lspRecommendation && <LspRecommendationMenu pluginName={lspRecommendation.pluginName} pluginDescription={lspRecommendation.pluginDescription} fileExtension={lspRecommendation.fileExtension} onResponse={handleLspResponse} />}
|
||||
|
||||
{focusedInputDialog === 'desktop-upsell' && <DesktopUpsellStartup onDone={() => setShowDesktopUpsellStartup(false)} />}
|
||||
{focusedInputDialog === 'desktop-upsell' && <DesktopUpsellStartup onDone={() => setShowDesktopUpsellStartup(false)} />}
|
||||
|
||||
{feature('ULTRAPLAN') ? focusedInputDialog === 'ultraplan-choice' && ultraplanPendingChoice && <UltraplanChoiceDialog plan={ultraplanPendingChoice.plan} sessionId={ultraplanPendingChoice.sessionId} taskId={ultraplanPendingChoice.taskId} setMessages={setMessages} readFileState={readFileState.current} getAppState={() => store.getState()} setConversationId={setConversationId} /> : null}
|
||||
{feature('ULTRAPLAN') ? focusedInputDialog === 'ultraplan-choice' && ultraplanPendingChoice && <UltraplanChoiceDialog plan={ultraplanPendingChoice.plan} sessionId={ultraplanPendingChoice.sessionId} taskId={ultraplanPendingChoice.taskId} setMessages={setMessages} readFileState={readFileState.current} getAppState={() => store.getState()} setConversationId={setConversationId} /> : null}
|
||||
|
||||
{feature('ULTRAPLAN') ? focusedInputDialog === 'ultraplan-launch' && ultraplanLaunchPending && <UltraplanLaunchDialog onChoice={(choice, opts) => {
|
||||
{feature('ULTRAPLAN') ? focusedInputDialog === 'ultraplan-launch' && ultraplanLaunchPending && <UltraplanLaunchDialog onChoice={(choice, opts) => {
|
||||
const blurb = ultraplanLaunchPending.blurb;
|
||||
setAppState(prev => prev.ultraplanLaunchPending ? {
|
||||
...prev,
|
||||
@@ -4884,26 +4884,26 @@ export function REPL({
|
||||
}).then(appendStdout).catch(logError);
|
||||
}} /> : null}
|
||||
|
||||
{mrRender()}
|
||||
{mrRender()}
|
||||
|
||||
{!toolJSX?.shouldHidePromptInput && !focusedInputDialog && !isExiting && !disabled && !cursor && <>
|
||||
{autoRunIssueReason && <AutoRunIssueNotification onRun={handleAutoRunIssue} onCancel={handleCancelAutoRunIssue} reason={getAutoRunIssueReasonText(autoRunIssueReason)} />}
|
||||
{postCompactSurvey.state !== 'closed' ? <FeedbackSurvey state={postCompactSurvey.state} lastResponse={postCompactSurvey.lastResponse} handleSelect={postCompactSurvey.handleSelect} inputValue={inputValue} setInputValue={setInputValue} onRequestFeedback={handleSurveyRequestFeedback} /> : memorySurvey.state !== 'closed' ? <FeedbackSurvey state={memorySurvey.state} lastResponse={memorySurvey.lastResponse} handleSelect={memorySurvey.handleSelect} handleTranscriptSelect={memorySurvey.handleTranscriptSelect} inputValue={inputValue} setInputValue={setInputValue} onRequestFeedback={handleSurveyRequestFeedback} message="How well did Claude use its memory? (optional)" /> : <FeedbackSurvey state={feedbackSurvey.state} lastResponse={feedbackSurvey.lastResponse} handleSelect={feedbackSurvey.handleSelect} handleTranscriptSelect={feedbackSurvey.handleTranscriptSelect} inputValue={inputValue} setInputValue={setInputValue} onRequestFeedback={didAutoRunIssueRef.current ? undefined : handleSurveyRequestFeedback} />}
|
||||
{/* Frustration-triggered transcript sharing prompt */}
|
||||
{frustrationDetection.state !== 'closed' && <FeedbackSurvey state={frustrationDetection.state} lastResponse={null} handleSelect={() => {}} handleTranscriptSelect={frustrationDetection.handleTranscriptSelect} inputValue={inputValue} setInputValue={setInputValue} />}
|
||||
{/* Skill improvement survey - appears when improvements detected (ant-only) */}
|
||||
{"external" === 'ant' && skillImprovementSurvey.suggestion && <SkillImprovementSurvey isOpen={skillImprovementSurvey.isOpen} skillName={skillImprovementSurvey.suggestion.skillName} updates={skillImprovementSurvey.suggestion.updates} handleSelect={skillImprovementSurvey.handleSelect} inputValue={inputValue} setInputValue={setInputValue} />}
|
||||
{showIssueFlagBanner && <IssueFlagBanner />}
|
||||
{}
|
||||
<PromptInput debug={debug} ideSelection={ideSelection} hasSuppressedDialogs={!!hasSuppressedDialogs} isLocalJSXCommandActive={isShowingLocalJSXCommand} getToolUseContext={getToolUseContext} toolPermissionContext={toolPermissionContext} setToolPermissionContext={setToolPermissionContext} apiKeyStatus={apiKeyStatus} commands={commands} agents={agentDefinitions.activeAgents} isLoading={isLoading} onExit={handleExit} verbose={verbose} messages={messages} onAutoUpdaterResult={setAutoUpdaterResult} autoUpdaterResult={autoUpdaterResult} input={inputValue} onInputChange={setInputValue} mode={inputMode} onModeChange={setInputMode} stashedPrompt={stashedPrompt} setStashedPrompt={setStashedPrompt} submitCount={submitCount} onShowMessageSelector={handleShowMessageSelector} onMessageActionsEnter={
|
||||
// Works during isLoading — edit cancels first; uuid selection survives appends.
|
||||
feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? enterMessageActions : undefined} mcpClients={mcpClients} pastedContents={pastedContents} setPastedContents={setPastedContents} vimMode={vimMode} setVimMode={setVimMode} showBashesDialog={showBashesDialog} setShowBashesDialog={setShowBashesDialog} onSubmit={onSubmit} onAgentSubmit={onAgentSubmit} isSearchingHistory={isSearchingHistory} setIsSearchingHistory={setIsSearchingHistory} helpOpen={isHelpOpen} setHelpOpen={setIsHelpOpen} insertTextRef={feature('VOICE_MODE') ? insertTextRef : undefined} voiceInterimRange={voice.interimRange} />
|
||||
<SessionBackgroundHint onBackgroundSession={handleBackgroundSession} isLoading={isLoading} />
|
||||
</>}
|
||||
{cursor &&
|
||||
// inputValue is REPL state; typed text survives the round-trip.
|
||||
<MessageActionsBar cursor={cursor} />}
|
||||
{focusedInputDialog === 'message-selector' && <MessageSelector messages={messages} preselectedMessage={messageSelectorPreselect} onPreRestore={onCancel} onRestoreCode={async (message: UserMessage) => {
|
||||
{!toolJSX?.shouldHidePromptInput && !focusedInputDialog && !isExiting && !disabled && !cursor && !isShuttingDown() && <>
|
||||
{autoRunIssueReason && <AutoRunIssueNotification onRun={handleAutoRunIssue} onCancel={handleCancelAutoRunIssue} reason={getAutoRunIssueReasonText(autoRunIssueReason)} />}
|
||||
{postCompactSurvey.state !== 'closed' ? <FeedbackSurvey state={postCompactSurvey.state} lastResponse={postCompactSurvey.lastResponse} handleSelect={postCompactSurvey.handleSelect} inputValue={inputValue} setInputValue={setInputValue} onRequestFeedback={handleSurveyRequestFeedback} /> : memorySurvey.state !== 'closed' ? <FeedbackSurvey state={memorySurvey.state} lastResponse={memorySurvey.lastResponse} handleSelect={memorySurvey.handleSelect} handleTranscriptSelect={memorySurvey.handleTranscriptSelect} inputValue={inputValue} setInputValue={setInputValue} onRequestFeedback={handleSurveyRequestFeedback} message="How well did Claude use its memory? (optional)" /> : <FeedbackSurvey state={feedbackSurvey.state} lastResponse={feedbackSurvey.lastResponse} handleSelect={feedbackSurvey.handleSelect} handleTranscriptSelect={feedbackSurvey.handleTranscriptSelect} inputValue={inputValue} setInputValue={setInputValue} onRequestFeedback={didAutoRunIssueRef.current ? undefined : handleSurveyRequestFeedback} />}
|
||||
{/* Frustration-triggered transcript sharing prompt */}
|
||||
{frustrationDetection.state !== 'closed' && <FeedbackSurvey state={frustrationDetection.state} lastResponse={null} handleSelect={() => { }} handleTranscriptSelect={frustrationDetection.handleTranscriptSelect} inputValue={inputValue} setInputValue={setInputValue} />}
|
||||
{/* Skill improvement survey - appears when improvements detected (ant-only) */}
|
||||
{"external" === 'ant' && skillImprovementSurvey.suggestion && <SkillImprovementSurvey isOpen={skillImprovementSurvey.isOpen} skillName={skillImprovementSurvey.suggestion.skillName} updates={skillImprovementSurvey.suggestion.updates} handleSelect={skillImprovementSurvey.handleSelect} inputValue={inputValue} setInputValue={setInputValue} />}
|
||||
{showIssueFlagBanner && <IssueFlagBanner />}
|
||||
{ }
|
||||
<PromptInput debug={debug} ideSelection={ideSelection} hasSuppressedDialogs={!!hasSuppressedDialogs} isLocalJSXCommandActive={isShowingLocalJSXCommand} getToolUseContext={getToolUseContext} toolPermissionContext={toolPermissionContext} setToolPermissionContext={setToolPermissionContext} apiKeyStatus={apiKeyStatus} commands={commands} agents={agentDefinitions.activeAgents} isLoading={isLoading} onExit={handleExit} verbose={verbose} messages={messages} onAutoUpdaterResult={setAutoUpdaterResult} autoUpdaterResult={autoUpdaterResult} input={inputValue} onInputChange={setInputValue} mode={inputMode} onModeChange={setInputMode} stashedPrompt={stashedPrompt} setStashedPrompt={setStashedPrompt} submitCount={submitCount} onShowMessageSelector={handleShowMessageSelector} onMessageActionsEnter={
|
||||
// Works during isLoading — edit cancels first; uuid selection survives appends.
|
||||
feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? enterMessageActions : undefined} mcpClients={mcpClients} pastedContents={pastedContents} setPastedContents={setPastedContents} vimMode={vimMode} setVimMode={setVimMode} showBashesDialog={showBashesDialog} setShowBashesDialog={setShowBashesDialog} onSubmit={onSubmit} onAgentSubmit={onAgentSubmit} isSearchingHistory={isSearchingHistory} setIsSearchingHistory={setIsSearchingHistory} helpOpen={isHelpOpen} setHelpOpen={setIsHelpOpen} insertTextRef={feature('VOICE_MODE') ? insertTextRef : undefined} voiceInterimRange={voice.interimRange} />
|
||||
<SessionBackgroundHint onBackgroundSession={handleBackgroundSession} isLoading={isLoading} />
|
||||
</>}
|
||||
{cursor &&
|
||||
// inputValue is REPL state; typed text survives the round-trip.
|
||||
<MessageActionsBar cursor={cursor} />}
|
||||
{focusedInputDialog === 'message-selector' && <MessageSelector messages={messages} preselectedMessage={messageSelectorPreselect} onPreRestore={onCancel} onRestoreCode={async (message: UserMessage) => {
|
||||
await fileHistoryRewind((updater: (prev: FileHistoryState) => FileHistoryState) => {
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
@@ -4985,16 +4985,16 @@ export function REPL({
|
||||
setIsMessageSelectorVisible(false);
|
||||
setMessageSelectorPreselect(undefined);
|
||||
}} />}
|
||||
{"external" === 'ant' && <DevBar />}
|
||||
</Box>
|
||||
{feature('BUDDY') && !(companionNarrow && isFullscreenEnvEnabled()) && companionVisible ? <CompanionSprite /> : null}
|
||||
</Box>} />
|
||||
</MCPConnectionManager>
|
||||
</KeybindingSetup>;
|
||||
{"external" === 'ant' && <DevBar />}
|
||||
</Box>
|
||||
{feature('BUDDY') && !(companionNarrow && isFullscreenEnvEnabled()) && companionVisible ? <CompanionSprite /> : null}
|
||||
</Box>} />
|
||||
</MCPConnectionManager>
|
||||
</KeybindingSetup>;
|
||||
if (isFullscreenEnvEnabled()) {
|
||||
return <AlternateScreen mouseTracking={isMouseTrackingEnabled()}>
|
||||
{mainReturn}
|
||||
</AlternateScreen>;
|
||||
{mainReturn}
|
||||
</AlternateScreen>;
|
||||
}
|
||||
return mainReturn;
|
||||
}
|
||||
|
||||
@@ -154,7 +154,10 @@ export async function getAnthropicClient({
|
||||
fetch: resolvedFetch,
|
||||
}),
|
||||
}
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
|
||||
if (
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
) {
|
||||
const { createOpenAIShimClient } = await import('./openaiShim.js')
|
||||
return createOpenAIShimClient({
|
||||
defaultHeaders,
|
||||
|
||||
@@ -14,8 +14,15 @@
|
||||
* OPENAI_BASE_URL=http://... — base URL (default: https://api.openai.com/v1)
|
||||
* OPENAI_MODEL=gpt-4o — default model override
|
||||
* CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark
|
||||
*
|
||||
* GitHub Models (models.github.ai), OpenAI-compatible:
|
||||
* CLAUDE_CODE_USE_GITHUB=1 — enable GitHub inference (no need for USE_OPENAI)
|
||||
* GITHUB_TOKEN or GH_TOKEN — PAT with models access (mapped to Bearer auth)
|
||||
* OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs
|
||||
*/
|
||||
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import {
|
||||
codexStreamToAnthropic,
|
||||
collectCodexCompletedResponse,
|
||||
@@ -30,6 +37,25 @@ import {
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
|
||||
const GITHUB_API_VERSION = '2022-11-28'
|
||||
const GITHUB_429_MAX_RETRIES = 3
|
||||
const GITHUB_429_BASE_DELAY_SEC = 1
|
||||
const GITHUB_429_MAX_DELAY_SEC = 32
|
||||
|
||||
function isGithubModelsMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
}
|
||||
|
||||
function formatRetryAfterHint(response: Response): string {
|
||||
const ra = response.headers.get('retry-after')
|
||||
return ra ? ` (Retry-After: ${ra})` : ''
|
||||
}
|
||||
|
||||
function sleepMs(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types — minimal subset of Anthropic SDK types we need to produce
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -235,28 +261,66 @@ function normalizeSchemaForOpenAI(
|
||||
schema: Record<string, unknown>,
|
||||
strict = true,
|
||||
): Record<string, unknown> {
|
||||
if (schema.type !== 'object' || !schema.properties) return schema
|
||||
const properties = schema.properties as Record<string, unknown>
|
||||
const existingRequired = Array.isArray(schema.required) ? schema.required as string[] : []
|
||||
// OpenAI strict mode requires every property to be listed in required[].
|
||||
// Gemini rejects schemas where required[] contains keys absent from properties,
|
||||
// so only promote keys that actually exist in properties.
|
||||
if (strict) {
|
||||
const allKeys = Object.keys(properties)
|
||||
const required = Array.from(new Set([...existingRequired, ...allKeys]))
|
||||
return { ...schema, required }
|
||||
if (!schema || typeof schema !== 'object' || Array.isArray(schema)) {
|
||||
return (schema ?? {}) as Record<string, unknown>
|
||||
}
|
||||
// For Gemini: keep only existing required keys that are present in properties
|
||||
const required = existingRequired.filter(k => k in properties)
|
||||
return { ...schema, required }
|
||||
|
||||
const record = { ...schema }
|
||||
|
||||
if (record.type === 'object' && record.properties) {
|
||||
const properties = record.properties as Record<string, Record<string, unknown>>
|
||||
const existingRequired = Array.isArray(record.required) ? record.required as string[] : []
|
||||
|
||||
// Recurse into each property
|
||||
const normalizedProps: Record<string, unknown> = {}
|
||||
for (const [key, value] of Object.entries(properties)) {
|
||||
normalizedProps[key] = normalizeSchemaForOpenAI(
|
||||
value as Record<string, unknown>,
|
||||
strict,
|
||||
)
|
||||
}
|
||||
record.properties = normalizedProps
|
||||
|
||||
if (strict) {
|
||||
// OpenAI strict mode requires every property to be listed in required[]
|
||||
const allKeys = Object.keys(normalizedProps)
|
||||
record.required = Array.from(new Set([...existingRequired, ...allKeys]))
|
||||
// OpenAI strict mode requires additionalProperties: false on all object
|
||||
// schemas — override unconditionally to ensure nested objects comply.
|
||||
record.additionalProperties = false
|
||||
} else {
|
||||
// For Gemini: keep only existing required keys that are present in properties
|
||||
record.required = existingRequired.filter(k => k in normalizedProps)
|
||||
}
|
||||
}
|
||||
|
||||
// Recurse into array items
|
||||
if ('items' in record) {
|
||||
if (Array.isArray(record.items)) {
|
||||
record.items = (record.items as unknown[]).map(
|
||||
item => normalizeSchemaForOpenAI(item as Record<string, unknown>, strict),
|
||||
)
|
||||
} else {
|
||||
record.items = normalizeSchemaForOpenAI(record.items as Record<string, unknown>, strict)
|
||||
}
|
||||
}
|
||||
|
||||
// Recurse into combinators
|
||||
for (const key of ['anyOf', 'oneOf', 'allOf'] as const) {
|
||||
if (key in record && Array.isArray(record[key])) {
|
||||
record[key] = (record[key] as unknown[]).map(
|
||||
item => normalizeSchemaForOpenAI(item as Record<string, unknown>, strict),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
function convertTools(
|
||||
tools: Array<{ name: string; description?: string; input_schema?: Record<string, unknown> }>,
|
||||
): OpenAITool[] {
|
||||
const isGemini =
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
|
||||
return tools
|
||||
.filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI
|
||||
@@ -342,7 +406,7 @@ async function* openaiStreamToAnthropic(
|
||||
): AsyncGenerator<AnthropicStreamEvent> {
|
||||
const messageId = makeMessageId()
|
||||
let contentBlockIndex = 0
|
||||
const activeToolCalls = new Map<number, { id: string; name: string; index: number }>()
|
||||
const activeToolCalls = new Map<number, { id: string; name: string; index: number; jsonBuffer: string }>()
|
||||
let hasEmittedContentStart = false
|
||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||
let hasEmittedFinalUsage = false
|
||||
@@ -374,15 +438,16 @@ async function* openaiStreamToAnthropic(
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() ?? ''
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() ?? ''
|
||||
|
||||
for (const line of lines) {
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim()
|
||||
if (!trimmed || trimmed === 'data: [DONE]') continue
|
||||
if (!trimmed.startsWith('data: ')) continue
|
||||
@@ -436,6 +501,7 @@ async function* openaiStreamToAnthropic(
|
||||
id: tc.id,
|
||||
name: tc.function.name,
|
||||
index: toolBlockIndex,
|
||||
jsonBuffer: tc.function.arguments ?? '',
|
||||
})
|
||||
|
||||
yield {
|
||||
@@ -466,6 +532,9 @@ async function* openaiStreamToAnthropic(
|
||||
// Continuation of existing tool call
|
||||
const active = activeToolCalls.get(tc.index)
|
||||
if (active) {
|
||||
if (tc.function.arguments) {
|
||||
active.jsonBuffer += tc.function.arguments
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: active.index,
|
||||
@@ -493,6 +562,36 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
// Close active tool calls
|
||||
for (const [, tc] of activeToolCalls) {
|
||||
let suffixToAdd = ''
|
||||
if (tc.jsonBuffer) {
|
||||
try {
|
||||
JSON.parse(tc.jsonBuffer)
|
||||
} catch {
|
||||
const str = tc.jsonBuffer.trimEnd()
|
||||
const combinations = [
|
||||
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
|
||||
]
|
||||
for (const combo of combinations) {
|
||||
try {
|
||||
JSON.parse(str + combo)
|
||||
suffixToAdd = combo
|
||||
break
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (suffixToAdd) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: tc.index,
|
||||
delta: {
|
||||
type: 'input_json_delta',
|
||||
partial_json: suffixToAdd,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
yield { type: 'content_block_stop', index: tc.index }
|
||||
}
|
||||
|
||||
@@ -528,6 +627,9 @@ async function* openaiStreamToAnthropic(
|
||||
hasEmittedFinalUsage = true
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
|
||||
yield { type: 'message_stop' }
|
||||
@@ -656,16 +758,32 @@ class OpenAIShimMessages {
|
||||
messages: openaiMessages,
|
||||
stream: params.stream ?? false,
|
||||
}
|
||||
if (params.max_tokens !== undefined) {
|
||||
body.max_completion_tokens = params.max_tokens
|
||||
} else if ((params as Record<string, unknown>).max_completion_tokens !== undefined) {
|
||||
body.max_completion_tokens = (params as Record<string, unknown>).max_completion_tokens
|
||||
// Convert max_tokens to max_completion_tokens for OpenAI API compatibility.
|
||||
// Azure OpenAI requires max_completion_tokens and does not accept max_tokens.
|
||||
// Ensure max_tokens is a valid positive number before using it.
|
||||
const maxTokensValue = typeof params.max_tokens === 'number' && params.max_tokens > 0
|
||||
? params.max_tokens
|
||||
: undefined
|
||||
const maxCompletionTokensValue = typeof (params as Record<string, unknown>).max_completion_tokens === 'number'
|
||||
? (params as Record<string, unknown>).max_completion_tokens as number
|
||||
: undefined
|
||||
|
||||
if (maxTokensValue !== undefined) {
|
||||
body.max_completion_tokens = maxTokensValue
|
||||
} else if (maxCompletionTokensValue !== undefined) {
|
||||
body.max_completion_tokens = maxCompletionTokensValue
|
||||
}
|
||||
|
||||
if (params.stream) {
|
||||
body.stream_options = { include_usage: true }
|
||||
}
|
||||
|
||||
const isGithub = isGithubModelsMode()
|
||||
if (isGithub && body.max_completion_tokens !== undefined) {
|
||||
body.max_tokens = body.max_completion_tokens
|
||||
delete body.max_completion_tokens
|
||||
}
|
||||
|
||||
if (params.temperature !== undefined) body.temperature = params.temperature
|
||||
if (params.top_p !== undefined) body.top_p = params.top_p
|
||||
|
||||
@@ -715,6 +833,11 @@ class OpenAIShimMessages {
|
||||
}
|
||||
}
|
||||
|
||||
if (isGithub) {
|
||||
headers.Accept = 'application/vnd.github.v3+json'
|
||||
headers['X-GitHub-Api-Version'] = GITHUB_API_VERSION
|
||||
}
|
||||
|
||||
// Build the chat completions URL
|
||||
// Azure Cognitive Services / Azure OpenAI require a deployment-specific path
|
||||
// and an api-version query parameter.
|
||||
@@ -737,19 +860,42 @@ class OpenAIShimMessages {
|
||||
chatCompletionsUrl = `${request.baseUrl}/chat/completions`
|
||||
}
|
||||
|
||||
const response = await fetch(chatCompletionsUrl, {
|
||||
method: 'POST',
|
||||
const fetchInit = {
|
||||
method: 'POST' as const,
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
signal: options?.signal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorBody = await response.text().catch(() => 'unknown error')
|
||||
throw new Error(`OpenAI API error ${response.status}: ${errorBody}`)
|
||||
}
|
||||
|
||||
return response
|
||||
const maxAttempts = isGithub ? GITHUB_429_MAX_RETRIES : 1
|
||||
let response: Response | undefined
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
response = await fetch(chatCompletionsUrl, fetchInit)
|
||||
if (response.ok) {
|
||||
return response
|
||||
}
|
||||
if (
|
||||
isGithub &&
|
||||
response.status === 429 &&
|
||||
attempt < maxAttempts - 1
|
||||
) {
|
||||
await response.text().catch(() => {})
|
||||
const delaySec = Math.min(
|
||||
GITHUB_429_BASE_DELAY_SEC * 2 ** attempt,
|
||||
GITHUB_429_MAX_DELAY_SEC,
|
||||
)
|
||||
await sleepMs(delaySec * 1000)
|
||||
continue
|
||||
}
|
||||
const errorBody = await response.text().catch(() => 'unknown error')
|
||||
const rateHint =
|
||||
isGithub && response.status === 429 ? formatRetryAfterHint(response) : ''
|
||||
throw new Error(
|
||||
`OpenAI API error ${response.status}: ${errorBody}${rateHint}`,
|
||||
)
|
||||
}
|
||||
|
||||
throw new Error('OpenAI shim: request loop exited unexpectedly')
|
||||
}
|
||||
|
||||
private _convertNonStreamingResponse(
|
||||
@@ -759,7 +905,10 @@ class OpenAIShimMessages {
|
||||
choices?: Array<{
|
||||
message?: {
|
||||
role?: string
|
||||
content?: string | null
|
||||
content?:
|
||||
| string
|
||||
| null
|
||||
| Array<{ type?: string; text?: string }>
|
||||
tool_calls?: Array<{
|
||||
id: string
|
||||
function: { name: string; arguments: string }
|
||||
@@ -778,8 +927,25 @@ class OpenAIShimMessages {
|
||||
const choice = data.choices?.[0]
|
||||
const content: Array<Record<string, unknown>> = []
|
||||
|
||||
if (choice?.message?.content) {
|
||||
content.push({ type: 'text', text: choice.message.content })
|
||||
const rawContent = choice?.message?.content
|
||||
if (typeof rawContent === 'string' && rawContent) {
|
||||
content.push({ type: 'text', text: rawContent })
|
||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||
const parts: string[] = []
|
||||
for (const part of rawContent) {
|
||||
if (
|
||||
part &&
|
||||
typeof part === 'object' &&
|
||||
part.type === 'text' &&
|
||||
typeof part.text === 'string'
|
||||
) {
|
||||
parts.push(part.text)
|
||||
}
|
||||
}
|
||||
const joined = parts.join('\n')
|
||||
if (joined) {
|
||||
content.push({ type: 'text', text: joined })
|
||||
}
|
||||
}
|
||||
|
||||
if (choice?.message?.tool_calls) {
|
||||
@@ -838,12 +1004,11 @@ export function createOpenAIShimClient(options: {
|
||||
maxRetries?: number
|
||||
timeout?: number
|
||||
}): unknown {
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
|
||||
// When Gemini provider is active, map Gemini env vars to OpenAI-compatible ones
|
||||
// so the existing providerConfig.ts infrastructure picks them up correctly.
|
||||
if (
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
) {
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
process.env.OPENAI_BASE_URL ??=
|
||||
process.env.GEMINI_BASE_URL ??
|
||||
'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
@@ -852,6 +1017,10 @@ export function createOpenAIShimClient(options: {
|
||||
if (process.env.GEMINI_MODEL && !process.env.OPENAI_MODEL) {
|
||||
process.env.OPENAI_MODEL = process.env.GEMINI_MODEL
|
||||
}
|
||||
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
process.env.OPENAI_BASE_URL ??= GITHUB_MODELS_DEFAULT_BASE
|
||||
process.env.OPENAI_API_KEY ??=
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? ''
|
||||
}
|
||||
|
||||
const beta = new OpenAIShimBeta({
|
||||
|
||||
41
src/services/api/providerConfig.github.test.ts
Normal file
41
src/services/api/providerConfig.github.test.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
DEFAULT_GITHUB_MODELS_API_MODEL,
|
||||
normalizeGithubModelsApiModel,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const originalUseGithub = process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
afterEach(() => {
|
||||
if (originalUseGithub === undefined) {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = originalUseGithub
|
||||
}
|
||||
})
|
||||
|
||||
test.each([
|
||||
['copilot', DEFAULT_GITHUB_MODELS_API_MODEL],
|
||||
['github:copilot', DEFAULT_GITHUB_MODELS_API_MODEL],
|
||||
['', DEFAULT_GITHUB_MODELS_API_MODEL],
|
||||
['github:gpt-4o', 'gpt-4o'],
|
||||
['gpt-4o', 'gpt-4o'],
|
||||
['github:copilot?reasoning=high', DEFAULT_GITHUB_MODELS_API_MODEL],
|
||||
] as const)('normalizeGithubModelsApiModel(%s) -> %s', (input, expected) => {
|
||||
expect(normalizeGithubModelsApiModel(input)).toBe(expected)
|
||||
})
|
||||
|
||||
test('resolveProviderRequest applies GitHub normalization when CLAUDE_CODE_USE_GITHUB=1', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const r = resolveProviderRequest({ model: 'github:gpt-4o' })
|
||||
expect(r.resolvedModel).toBe('gpt-4o')
|
||||
expect(r.transport).toBe('chat_completions')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest leaves model unchanged without GitHub flag', () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
const r = resolveProviderRequest({ model: 'github:gpt-4o' })
|
||||
expect(r.resolvedModel).toBe('github:gpt-4o')
|
||||
})
|
||||
@@ -2,8 +2,12 @@ import { existsSync, readFileSync } from 'node:fs'
|
||||
import { homedir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
||||
/** Default GitHub Models API model when user selects copilot / github:copilot */
|
||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'openai/gpt-4.1'
|
||||
|
||||
const CODEX_ALIAS_MODELS: Record<
|
||||
string,
|
||||
@@ -171,16 +175,31 @@ export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize user model string for GitHub Models inference (models.github.ai).
|
||||
* Mirrors runtime devsper `github._normalize_model_id`.
|
||||
*/
|
||||
export function normalizeGithubModelsApiModel(requestedModel: string): string {
|
||||
const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel
|
||||
const segment =
|
||||
noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim()
|
||||
if (!segment || segment.toLowerCase() === 'copilot') {
|
||||
return DEFAULT_GITHUB_MODELS_API_MODEL
|
||||
}
|
||||
return segment
|
||||
}
|
||||
|
||||
export function resolveProviderRequest(options?: {
|
||||
model?: string
|
||||
baseUrl?: string
|
||||
fallbackModel?: string
|
||||
}): ResolvedProviderRequest {
|
||||
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const requestedModel =
|
||||
options?.model?.trim() ||
|
||||
process.env.OPENAI_MODEL?.trim() ||
|
||||
options?.fallbackModel?.trim() ||
|
||||
'gpt-4o'
|
||||
(isGithubMode ? 'github:copilot' : 'gpt-4o')
|
||||
const descriptor = parseModelDescriptor(requestedModel)
|
||||
const rawBaseUrl =
|
||||
options?.baseUrl ??
|
||||
@@ -192,10 +211,16 @@ export function resolveProviderRequest(options?: {
|
||||
? 'codex_responses'
|
||||
: 'chat_completions'
|
||||
|
||||
const resolvedModel =
|
||||
transport === 'chat_completions' &&
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
? normalizeGithubModelsApiModel(requestedModel)
|
||||
: descriptor.baseModel
|
||||
|
||||
return {
|
||||
transport,
|
||||
requestedModel,
|
||||
resolvedModel: descriptor.baseModel,
|
||||
resolvedModel,
|
||||
baseUrl:
|
||||
(rawBaseUrl ??
|
||||
(transport === 'codex_responses'
|
||||
|
||||
94
src/services/github/deviceFlow.test.ts
Normal file
94
src/services/github/deviceFlow.test.ts
Normal file
@@ -0,0 +1,94 @@
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
GitHubDeviceFlowError,
|
||||
pollAccessToken,
|
||||
requestDeviceCode,
|
||||
} from './deviceFlow.js'
|
||||
|
||||
describe('requestDeviceCode', () => {
|
||||
const originalFetch = globalThis.fetch
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
test('parses successful device code response', async () => {
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
device_code: 'abc',
|
||||
user_code: 'ABCD-1234',
|
||||
verification_uri: 'https://github.com/login/device',
|
||||
expires_in: 600,
|
||||
interval: 5,
|
||||
}),
|
||||
{ status: 200 },
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
const r = await requestDeviceCode({
|
||||
clientId: 'test-client',
|
||||
fetchImpl: globalThis.fetch,
|
||||
})
|
||||
expect(r.device_code).toBe('abc')
|
||||
expect(r.user_code).toBe('ABCD-1234')
|
||||
expect(r.verification_uri).toBe('https://github.com/login/device')
|
||||
expect(r.expires_in).toBe(600)
|
||||
expect(r.interval).toBe(5)
|
||||
})
|
||||
|
||||
test('throws on HTTP error', async () => {
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(new Response('bad', { status: 500 })),
|
||||
)
|
||||
await expect(
|
||||
requestDeviceCode({ clientId: 'x', fetchImpl: globalThis.fetch }),
|
||||
).rejects.toThrow(GitHubDeviceFlowError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('pollAccessToken', () => {
|
||||
const originalFetch = globalThis.fetch
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
test('returns token when GitHub responds with access_token immediately', async () => {
|
||||
let calls = 0
|
||||
globalThis.fetch = mock(() => {
|
||||
calls++
|
||||
return Promise.resolve(
|
||||
new Response(JSON.stringify({ access_token: 'tok-xyz' }), {
|
||||
status: 200,
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
const token = await pollAccessToken('dev-code', {
|
||||
clientId: 'cid',
|
||||
fetchImpl: globalThis.fetch,
|
||||
})
|
||||
expect(token).toBe('tok-xyz')
|
||||
expect(calls).toBe(1)
|
||||
})
|
||||
|
||||
test('throws on access_denied', async () => {
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(JSON.stringify({ error: 'access_denied' }), {
|
||||
status: 200,
|
||||
}),
|
||||
),
|
||||
)
|
||||
await expect(
|
||||
pollAccessToken('dc', {
|
||||
clientId: 'c',
|
||||
fetchImpl: globalThis.fetch,
|
||||
}),
|
||||
).rejects.toThrow(/denied/)
|
||||
})
|
||||
})
|
||||
174
src/services/github/deviceFlow.ts
Normal file
174
src/services/github/deviceFlow.ts
Normal file
@@ -0,0 +1,174 @@
|
||||
/**
|
||||
* GitHub OAuth device flow for CLI login (https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps#device-flow).
|
||||
*/
|
||||
|
||||
import { execFileNoThrow } from '../../utils/execFileNoThrow.js'
|
||||
|
||||
export const DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID = 'Ov23liXjWSSui6QIahPl'
|
||||
|
||||
export const GITHUB_DEVICE_CODE_URL = 'https://github.com/login/device/code'
|
||||
export const GITHUB_DEVICE_ACCESS_TOKEN_URL =
|
||||
'https://github.com/login/oauth/access_token'
|
||||
|
||||
/** Match runtime devsper github_oauth DEFAULT_SCOPE */
|
||||
export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user,models:read'
|
||||
|
||||
export class GitHubDeviceFlowError extends Error {
|
||||
constructor(message: string) {
|
||||
super(message)
|
||||
this.name = 'GitHubDeviceFlowError'
|
||||
}
|
||||
}
|
||||
|
||||
export type DeviceCodeResult = {
|
||||
device_code: string
|
||||
user_code: string
|
||||
verification_uri: string
|
||||
expires_in: number
|
||||
interval: number
|
||||
}
|
||||
|
||||
export function getGithubDeviceFlowClientId(): string {
|
||||
return (
|
||||
process.env.GITHUB_DEVICE_FLOW_CLIENT_ID?.trim() ||
|
||||
DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID
|
||||
)
|
||||
}
|
||||
|
||||
function sleep(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
|
||||
export async function requestDeviceCode(options?: {
|
||||
clientId?: string
|
||||
scope?: string
|
||||
fetchImpl?: typeof fetch
|
||||
}): Promise<DeviceCodeResult> {
|
||||
const clientId = options?.clientId ?? getGithubDeviceFlowClientId()
|
||||
if (!clientId) {
|
||||
throw new GitHubDeviceFlowError(
|
||||
'No OAuth client ID: set GITHUB_DEVICE_FLOW_CLIENT_ID or paste a PAT instead.',
|
||||
)
|
||||
}
|
||||
const fetchFn = options?.fetchImpl ?? fetch
|
||||
const res = await fetchFn(GITHUB_DEVICE_CODE_URL, {
|
||||
method: 'POST',
|
||||
headers: { Accept: 'application/json' },
|
||||
body: new URLSearchParams({
|
||||
client_id: clientId,
|
||||
scope: options?.scope ?? DEFAULT_GITHUB_DEVICE_SCOPE,
|
||||
}),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => '')
|
||||
throw new GitHubDeviceFlowError(
|
||||
`Device code request failed: ${res.status} ${text}`,
|
||||
)
|
||||
}
|
||||
const data = (await res.json()) as Record<string, unknown>
|
||||
const device_code = data.device_code
|
||||
const user_code = data.user_code
|
||||
const verification_uri = data.verification_uri
|
||||
if (
|
||||
typeof device_code !== 'string' ||
|
||||
typeof user_code !== 'string' ||
|
||||
typeof verification_uri !== 'string'
|
||||
) {
|
||||
throw new GitHubDeviceFlowError('Malformed device code response from GitHub')
|
||||
}
|
||||
return {
|
||||
device_code,
|
||||
user_code,
|
||||
verification_uri,
|
||||
expires_in: typeof data.expires_in === 'number' ? data.expires_in : 900,
|
||||
interval: typeof data.interval === 'number' ? data.interval : 5,
|
||||
}
|
||||
}
|
||||
|
||||
export type PollOptions = {
|
||||
clientId?: string
|
||||
initialInterval?: number
|
||||
timeoutSeconds?: number
|
||||
fetchImpl?: typeof fetch
|
||||
}
|
||||
|
||||
export async function pollAccessToken(
|
||||
deviceCode: string,
|
||||
options?: PollOptions,
|
||||
): Promise<string> {
|
||||
const clientId = options?.clientId ?? getGithubDeviceFlowClientId()
|
||||
if (!clientId) {
|
||||
throw new GitHubDeviceFlowError('client_id required for polling')
|
||||
}
|
||||
let interval = Math.max(1, options?.initialInterval ?? 5)
|
||||
const timeoutSeconds = options?.timeoutSeconds ?? 900
|
||||
const fetchFn = options?.fetchImpl ?? fetch
|
||||
const start = Date.now()
|
||||
|
||||
while ((Date.now() - start) / 1000 < timeoutSeconds) {
|
||||
const res = await fetchFn(GITHUB_DEVICE_ACCESS_TOKEN_URL, {
|
||||
method: 'POST',
|
||||
headers: { Accept: 'application/json' },
|
||||
body: new URLSearchParams({
|
||||
client_id: clientId,
|
||||
device_code: deviceCode,
|
||||
grant_type: 'urn:ietf:params:oauth:grant-type:device_code',
|
||||
}),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => '')
|
||||
throw new GitHubDeviceFlowError(
|
||||
`Token request failed: ${res.status} ${text}`,
|
||||
)
|
||||
}
|
||||
const data = (await res.json()) as Record<string, unknown>
|
||||
const err = data.error as string | undefined
|
||||
if (err == null) {
|
||||
const token = data.access_token
|
||||
if (typeof token === 'string' && token) {
|
||||
return token
|
||||
}
|
||||
throw new GitHubDeviceFlowError('No access_token in response')
|
||||
}
|
||||
if (err === 'authorization_pending') {
|
||||
await sleep(interval * 1000)
|
||||
continue
|
||||
}
|
||||
if (err === 'slow_down') {
|
||||
interval =
|
||||
typeof data.interval === 'number' ? data.interval : interval + 5
|
||||
await sleep(interval * 1000)
|
||||
continue
|
||||
}
|
||||
if (err === 'expired_token') {
|
||||
throw new GitHubDeviceFlowError(
|
||||
'Device code expired. Start the login flow again.',
|
||||
)
|
||||
}
|
||||
if (err === 'access_denied') {
|
||||
throw new GitHubDeviceFlowError('Authorization was denied or cancelled.')
|
||||
}
|
||||
throw new GitHubDeviceFlowError(`GitHub OAuth error: ${err}`)
|
||||
}
|
||||
throw new GitHubDeviceFlowError('Timed out waiting for authorization.')
|
||||
}
|
||||
|
||||
/**
|
||||
* Best-effort open browser / OS handler for the verification URL.
|
||||
*/
|
||||
export async function openVerificationUri(uri: string): Promise<void> {
|
||||
try {
|
||||
if (process.platform === 'darwin') {
|
||||
await execFileNoThrow('open', [uri], { useCwd: false, timeout: 5000 })
|
||||
} else if (process.platform === 'win32') {
|
||||
await execFileNoThrow('cmd', ['/c', 'start', '', uri], {
|
||||
useCwd: false,
|
||||
timeout: 5000,
|
||||
})
|
||||
} else {
|
||||
await execFileNoThrow('xdg-open', [uri], { useCwd: false, timeout: 5000 })
|
||||
}
|
||||
} catch {
|
||||
// User can open the URL manually
|
||||
}
|
||||
}
|
||||
48
src/services/mcp/client.test.ts
Normal file
48
src/services/mcp/client.test.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import assert from 'node:assert/strict'
|
||||
import test from 'node:test'
|
||||
|
||||
import { cleanupFailedConnection } from './client.js'
|
||||
|
||||
test('cleanupFailedConnection awaits transport close before resolving', async () => {
|
||||
let closed = false
|
||||
let resolveClose: (() => void) | undefined
|
||||
|
||||
const transport = {
|
||||
close: async () =>
|
||||
await new Promise<void>(resolve => {
|
||||
resolveClose = () => {
|
||||
closed = true
|
||||
resolve()
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
const cleanupPromise = cleanupFailedConnection(transport)
|
||||
|
||||
assert.equal(closed, false)
|
||||
resolveClose?.()
|
||||
await cleanupPromise
|
||||
assert.equal(closed, true)
|
||||
})
|
||||
|
||||
test('cleanupFailedConnection closes in-process server and transport', async () => {
|
||||
let inProcessClosed = false
|
||||
let transportClosed = false
|
||||
|
||||
const inProcessServer = {
|
||||
close: async () => {
|
||||
inProcessClosed = true
|
||||
},
|
||||
}
|
||||
|
||||
const transport = {
|
||||
close: async () => {
|
||||
transportClosed = true
|
||||
},
|
||||
}
|
||||
|
||||
await cleanupFailedConnection(transport, inProcessServer)
|
||||
|
||||
assert.equal(inProcessClosed, true)
|
||||
assert.equal(transportClosed, true)
|
||||
})
|
||||
@@ -116,8 +116,8 @@ import { getLoggingSafeMcpBaseUrl } from './utils.js'
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const fetchMcpSkillsForClient = feature('MCP_SKILLS')
|
||||
? (
|
||||
require('../../skills/mcpSkills.js') as typeof import('../../skills/mcpSkills.js')
|
||||
).fetchMcpSkillsForClient
|
||||
require('../../skills/mcpSkills.js') as typeof import('../../skills/mcpSkills.js')
|
||||
).fetchMcpSkillsForClient
|
||||
: null
|
||||
|
||||
import { UnauthorizedError } from '@modelcontextprotocol/sdk/client/auth.js'
|
||||
@@ -240,12 +240,12 @@ const claudeInChromeToolRendering =
|
||||
// GrowthBook tengu_malort_pedway (see gates.ts).
|
||||
const computerUseWrapper = feature('CHICAGO_MCP')
|
||||
? (): typeof import('../../utils/computerUse/wrapper.js') =>
|
||||
require('../../utils/computerUse/wrapper.js')
|
||||
require('../../utils/computerUse/wrapper.js')
|
||||
: undefined
|
||||
const isComputerUseMCPServer = feature('CHICAGO_MCP')
|
||||
? (
|
||||
require('../../utils/computerUse/common.js') as typeof import('../../utils/computerUse/common.js')
|
||||
).isComputerUseMCPServer
|
||||
require('../../utils/computerUse/common.js') as typeof import('../../utils/computerUse/common.js')
|
||||
).isComputerUseMCPServer
|
||||
: undefined
|
||||
|
||||
import { mkdir, readFile, unlink, writeFile } from 'fs/promises'
|
||||
@@ -326,9 +326,9 @@ function mcpBaseUrlAnalytics(serverRef: ScopedMcpServerConfig): {
|
||||
const url = getLoggingSafeMcpBaseUrl(serverRef)
|
||||
return url
|
||||
? {
|
||||
mcpServerBaseUrl:
|
||||
url as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
}
|
||||
mcpServerBaseUrl:
|
||||
url as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
}
|
||||
: {}
|
||||
}
|
||||
|
||||
@@ -560,6 +560,22 @@ function getRemoteMcpServerConnectionBatchSize(): number {
|
||||
)
|
||||
}
|
||||
|
||||
type InProcessMcpServer = {
|
||||
connect(t: Transport): Promise<void>
|
||||
close(): Promise<void>
|
||||
}
|
||||
|
||||
export async function cleanupFailedConnection(
|
||||
transport: Pick<Transport, 'close'>,
|
||||
inProcessServer?: Pick<InProcessMcpServer, 'close'>,
|
||||
): Promise<void> {
|
||||
if (inProcessServer) {
|
||||
await inProcessServer.close().catch(() => {})
|
||||
}
|
||||
|
||||
await transport.close().catch(() => {})
|
||||
}
|
||||
|
||||
function isLocalMcpServer(config: ScopedMcpServerConfig): boolean {
|
||||
return !config.type || config.type === 'stdio' || config.type === 'sdk'
|
||||
}
|
||||
@@ -606,9 +622,7 @@ export const connectToServer = memoize(
|
||||
},
|
||||
): Promise<MCPServerConnection> => {
|
||||
const connectStartTime = Date.now()
|
||||
let inProcessServer:
|
||||
| { connect(t: Transport): Promise<void>; close(): Promise<void> }
|
||||
| undefined
|
||||
let inProcessServer: InProcessMcpServer | undefined
|
||||
try {
|
||||
let transport
|
||||
|
||||
@@ -683,20 +697,20 @@ export const connectToServer = memoize(
|
||||
const transportOptions: SSEClientTransportOptions =
|
||||
proxyOptions.dispatcher
|
||||
? {
|
||||
eventSourceInit: {
|
||||
fetch: async (url: string | URL, init?: RequestInit) => {
|
||||
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
|
||||
return fetch(url, {
|
||||
...init,
|
||||
...proxyOptions,
|
||||
headers: {
|
||||
'User-Agent': getMCPUserAgent(),
|
||||
...init?.headers,
|
||||
},
|
||||
})
|
||||
},
|
||||
eventSourceInit: {
|
||||
fetch: async (url: string | URL, init?: RequestInit) => {
|
||||
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
|
||||
return fetch(url, {
|
||||
...init,
|
||||
...proxyOptions,
|
||||
headers: {
|
||||
'User-Agent': getMCPUserAgent(),
|
||||
...init?.headers,
|
||||
},
|
||||
})
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
: {}
|
||||
|
||||
transport = new SSEClientTransport(
|
||||
@@ -832,8 +846,8 @@ export const connectToServer = memoize(
|
||||
'User-Agent': getMCPUserAgent(),
|
||||
...(sessionIngressToken &&
|
||||
!hasOAuthTokens && {
|
||||
Authorization: `Bearer ${sessionIngressToken}`,
|
||||
}),
|
||||
Authorization: `Bearer ${sessionIngressToken}`,
|
||||
}),
|
||||
...combinedHeaders,
|
||||
},
|
||||
},
|
||||
@@ -842,10 +856,10 @@ export const connectToServer = memoize(
|
||||
// Redact sensitive headers before logging
|
||||
const headersForLogging = transportOptions.requestInit?.headers
|
||||
? mapValues(
|
||||
transportOptions.requestInit.headers as Record<string, string>,
|
||||
(value, key) =>
|
||||
key.toLowerCase() === 'authorization' ? '[REDACTED]' : value,
|
||||
)
|
||||
transportOptions.requestInit.headers as Record<string, string>,
|
||||
(value, key) =>
|
||||
key.toLowerCase() === 'authorization' ? '[REDACTED]' : value,
|
||||
)
|
||||
: undefined
|
||||
|
||||
logMCPDebug(
|
||||
@@ -985,7 +999,7 @@ export const connectToServer = memoize(
|
||||
const client = new Client(
|
||||
{
|
||||
name: 'claude-code',
|
||||
title: 'Claude Code',
|
||||
title: 'Open Claude',
|
||||
version: MACRO.VERSION ?? 'unknown',
|
||||
description: "Anthropic's agentic coding tool",
|
||||
websiteUrl: PRODUCT_URL,
|
||||
@@ -1054,9 +1068,9 @@ export const connectToServer = memoize(
|
||||
`Connection timeout triggered after ${elapsed}ms (limit: ${getConnectionTimeoutMs()}ms)`,
|
||||
)
|
||||
if (inProcessServer) {
|
||||
inProcessServer.close().catch(() => {})
|
||||
inProcessServer.close().catch(() => { })
|
||||
}
|
||||
transport.close().catch(() => {})
|
||||
transport.close().catch(() => { })
|
||||
reject(
|
||||
new TelemetrySafeError_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS(
|
||||
`MCP server "${name}" connection timed out after ${getConnectionTimeoutMs()}ms`,
|
||||
@@ -1145,9 +1159,10 @@ export const connectToServer = memoize(
|
||||
})
|
||||
}
|
||||
if (inProcessServer) {
|
||||
inProcessServer.close().catch(() => {})
|
||||
await cleanupFailedConnection(transport, inProcessServer)
|
||||
} else {
|
||||
await cleanupFailedConnection(transport)
|
||||
}
|
||||
transport.close().catch(() => {})
|
||||
if (stderrOutput) {
|
||||
logMCPError(name, `Server stderr: ${stderrOutput}`)
|
||||
}
|
||||
@@ -1627,7 +1642,7 @@ export const connectToServer = memoize(
|
||||
logMCPError(name, `Connection failed: ${errorMessage(error)}`)
|
||||
|
||||
if (inProcessServer) {
|
||||
inProcessServer.close().catch(() => {})
|
||||
inProcessServer.close().catch(() => { })
|
||||
}
|
||||
return {
|
||||
name,
|
||||
@@ -1779,8 +1794,8 @@ export const fetchToolsForClient = memoizeWithLRU(
|
||||
searchHint:
|
||||
typeof tool._meta?.['anthropic/searchHint'] === 'string'
|
||||
? tool._meta['anthropic/searchHint']
|
||||
.replace(/\s+/g, ' ')
|
||||
.trim() || undefined
|
||||
.replace(/\s+/g, ' ')
|
||||
.trim() || undefined
|
||||
: undefined,
|
||||
alwaysLoad: tool._meta?.['anthropic/alwaysLoad'] === true,
|
||||
async description() {
|
||||
@@ -1871,11 +1886,11 @@ export const fetchToolsForClient = memoizeWithLRU(
|
||||
onProgress:
|
||||
onProgress && toolUseId
|
||||
? progressData => {
|
||||
onProgress({
|
||||
toolUseID: toolUseId,
|
||||
data: progressData,
|
||||
})
|
||||
}
|
||||
onProgress({
|
||||
toolUseID: toolUseId,
|
||||
data: progressData,
|
||||
})
|
||||
}
|
||||
: undefined,
|
||||
handleElicitation: context.handleElicitation,
|
||||
})
|
||||
@@ -1975,14 +1990,14 @@ export const fetchToolsForClient = memoizeWithLRU(
|
||||
return `${client.name} - ${displayName} (MCP)`
|
||||
},
|
||||
...(isClaudeInChromeMCPServer(client.name) &&
|
||||
(client.config.type === 'stdio' || !client.config.type)
|
||||
(client.config.type === 'stdio' || !client.config.type)
|
||||
? claudeInChromeToolRendering().getClaudeInChromeMCPToolOverrides(
|
||||
tool.name,
|
||||
)
|
||||
tool.name,
|
||||
)
|
||||
: {}),
|
||||
...(feature('CHICAGO_MCP') &&
|
||||
(client.config.type === 'stdio' || !client.config.type) &&
|
||||
isComputerUseMCPServer!(client.name)
|
||||
(client.config.type === 'stdio' || !client.config.type) &&
|
||||
isComputerUseMCPServer!(client.name)
|
||||
? computerUseWrapper!().getComputerUseMCPToolOverrides(tool.name)
|
||||
: {}),
|
||||
}
|
||||
@@ -2876,9 +2891,9 @@ export async function callMCPToolWithUrlElicitationRetry({
|
||||
const errorData = error.data
|
||||
const rawElicitations =
|
||||
errorData != null &&
|
||||
typeof errorData === 'object' &&
|
||||
'elicitations' in errorData &&
|
||||
Array.isArray(errorData.elicitations)
|
||||
typeof errorData === 'object' &&
|
||||
'elicitations' in errorData &&
|
||||
Array.isArray(errorData.elicitations)
|
||||
? (errorData.elicitations as unknown[])
|
||||
: []
|
||||
|
||||
@@ -3101,16 +3116,16 @@ async function callMCPTool({
|
||||
timeout: timeoutMs,
|
||||
onprogress: onProgress
|
||||
? sdkProgress => {
|
||||
onProgress({
|
||||
type: 'mcp_progress',
|
||||
status: 'progress',
|
||||
serverName: name,
|
||||
toolName: tool,
|
||||
progress: sdkProgress.progress,
|
||||
total: sdkProgress.total,
|
||||
progressMessage: sdkProgress.message,
|
||||
})
|
||||
}
|
||||
onProgress({
|
||||
type: 'mcp_progress',
|
||||
status: 'progress',
|
||||
serverName: name,
|
||||
toolName: tool,
|
||||
progress: sdkProgress.progress,
|
||||
total: sdkProgress.total,
|
||||
progressMessage: sdkProgress.message,
|
||||
})
|
||||
}
|
||||
: undefined,
|
||||
},
|
||||
),
|
||||
@@ -3280,7 +3295,7 @@ export async function setupSdkMcpClients(
|
||||
const client = new Client(
|
||||
{
|
||||
name: 'claude-code',
|
||||
title: 'Claude Code',
|
||||
title: 'Open Claude',
|
||||
version: MACRO.VERSION ?? 'unknown',
|
||||
description: "Anthropic's agentic coding tool",
|
||||
websiteUrl: PRODUCT_URL,
|
||||
|
||||
540
src/services/mcp/doctor.test.ts
Normal file
540
src/services/mcp/doctor.test.ts
Normal file
@@ -0,0 +1,540 @@
|
||||
import assert from 'node:assert/strict'
|
||||
import test from 'node:test'
|
||||
|
||||
import type { ValidationError } from '../../utils/settings/validation.js'
|
||||
|
||||
import {
|
||||
buildEmptyDoctorReport,
|
||||
doctorAllServers,
|
||||
doctorServer,
|
||||
findingsFromValidationErrors,
|
||||
type McpDoctorDependencies,
|
||||
} from './doctor.js'
|
||||
|
||||
function stdioConfig(scope: 'local' | 'project' | 'user' | 'enterprise', command: string) {
|
||||
return {
|
||||
type: 'stdio' as const,
|
||||
command,
|
||||
args: [],
|
||||
scope,
|
||||
}
|
||||
}
|
||||
|
||||
function makeDependencies(overrides: Partial<McpDoctorDependencies> = {}): McpDoctorDependencies {
|
||||
return {
|
||||
getAllMcpConfigs: async () => ({ servers: {}, errors: [] }),
|
||||
getMcpConfigsByScope: () => ({ servers: {}, errors: [] }),
|
||||
getProjectMcpServerStatus: () => 'approved',
|
||||
isMcpServerDisabled: () => false,
|
||||
describeMcpConfigFilePath: scope => `scope://${scope}`,
|
||||
clearServerCache: async () => {},
|
||||
connectToServer: async (name, config) => ({
|
||||
name,
|
||||
type: 'connected',
|
||||
capabilities: {},
|
||||
config,
|
||||
cleanup: async () => {},
|
||||
}),
|
||||
...overrides,
|
||||
}
|
||||
}
|
||||
|
||||
test('buildEmptyDoctorReport returns zeroed summary', () => {
|
||||
const report = buildEmptyDoctorReport({
|
||||
configOnly: true,
|
||||
scopeFilter: 'project',
|
||||
targetName: 'filesystem',
|
||||
})
|
||||
|
||||
assert.equal(report.targetName, 'filesystem')
|
||||
assert.equal(report.scopeFilter, 'project')
|
||||
assert.equal(report.configOnly, true)
|
||||
assert.deepEqual(report.summary, {
|
||||
totalReports: 0,
|
||||
healthy: 0,
|
||||
warnings: 0,
|
||||
blocking: 0,
|
||||
})
|
||||
assert.deepEqual(report.findings, [])
|
||||
assert.deepEqual(report.servers, [])
|
||||
})
|
||||
|
||||
test('findingsFromValidationErrors maps missing env warnings into doctor findings', () => {
|
||||
const validationErrors: ValidationError[] = [
|
||||
{
|
||||
file: '.mcp.json',
|
||||
path: 'mcpServers.filesystem',
|
||||
message: 'Missing environment variables: API_KEY, API_URL',
|
||||
suggestion: 'Set the following environment variables: API_KEY, API_URL',
|
||||
mcpErrorMetadata: {
|
||||
scope: 'project',
|
||||
serverName: 'filesystem',
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
const findings = findingsFromValidationErrors(validationErrors)
|
||||
|
||||
assert.equal(findings.length, 1)
|
||||
assert.deepEqual(findings[0], {
|
||||
blocking: false,
|
||||
code: 'config.missing_env_vars',
|
||||
message: 'Missing environment variables: API_KEY, API_URL',
|
||||
remediation: 'Set the following environment variables: API_KEY, API_URL',
|
||||
scope: 'project',
|
||||
serverName: 'filesystem',
|
||||
severity: 'warn',
|
||||
sourcePath: '.mcp.json',
|
||||
})
|
||||
})
|
||||
|
||||
test('findingsFromValidationErrors maps Windows npx warnings into doctor findings', () => {
|
||||
const validationErrors: ValidationError[] = [
|
||||
{
|
||||
file: '.mcp.json',
|
||||
path: 'mcpServers.node-tools',
|
||||
message: "Windows requires 'cmd /c' wrapper to execute npx",
|
||||
suggestion:
|
||||
'Change command to "cmd" with args ["/c", "npx", ...]. See: https://code.claude.com/docs/en/mcp#configure-mcp-servers',
|
||||
mcpErrorMetadata: {
|
||||
scope: 'project',
|
||||
serverName: 'node-tools',
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
const findings = findingsFromValidationErrors(validationErrors)
|
||||
|
||||
assert.equal(findings.length, 1)
|
||||
assert.equal(findings[0]?.code, 'config.windows_npx_wrapper_required')
|
||||
assert.equal(findings[0]?.serverName, 'node-tools')
|
||||
assert.equal(findings[0]?.severity, 'warn')
|
||||
assert.equal(findings[0]?.blocking, false)
|
||||
})
|
||||
|
||||
test('findingsFromValidationErrors maps fatal parse errors into blocking findings', () => {
|
||||
const validationErrors: ValidationError[] = [
|
||||
{
|
||||
file: 'C:/repo/.mcp.json',
|
||||
path: '',
|
||||
message: 'MCP config is not a valid JSON',
|
||||
suggestion: 'Fix the JSON syntax errors in the file',
|
||||
mcpErrorMetadata: {
|
||||
scope: 'project',
|
||||
severity: 'fatal',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
const findings = findingsFromValidationErrors(validationErrors)
|
||||
|
||||
assert.equal(findings.length, 1)
|
||||
assert.equal(findings[0]?.code, 'config.invalid_json')
|
||||
assert.equal(findings[0]?.severity, 'error')
|
||||
assert.equal(findings[0]?.blocking, true)
|
||||
})
|
||||
|
||||
test('doctorAllServers reports global validation findings once without duplicating them into every server', async () => {
|
||||
const localConfig = stdioConfig('local', 'node-local')
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { filesystem: localConfig },
|
||||
errors: [],
|
||||
}),
|
||||
getMcpConfigsByScope: scope =>
|
||||
scope === 'project'
|
||||
? {
|
||||
servers: {},
|
||||
errors: [
|
||||
{
|
||||
file: '.mcp.json',
|
||||
path: '',
|
||||
message: 'MCP config is not a valid JSON',
|
||||
suggestion: 'Fix the JSON syntax errors in the file',
|
||||
mcpErrorMetadata: {
|
||||
scope: 'project',
|
||||
severity: 'fatal',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
: scope === 'local'
|
||||
? { servers: { filesystem: localConfig }, errors: [] }
|
||||
: { servers: {}, errors: [] },
|
||||
})
|
||||
|
||||
const report = await doctorAllServers({ configOnly: true }, deps)
|
||||
|
||||
assert.equal(report.summary.totalReports, 1)
|
||||
assert.equal(report.summary.blocking, 1)
|
||||
assert.equal(report.findings.length, 1)
|
||||
assert.equal(report.findings[0]?.code, 'config.invalid_json')
|
||||
assert.deepEqual(report.servers[0]?.findings, [])
|
||||
})
|
||||
|
||||
test('doctorServer explains same-name shadowing across scopes', async () => {
|
||||
const localConfig = stdioConfig('local', 'node-local')
|
||||
const userConfig = stdioConfig('user', 'node-user')
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: {
|
||||
filesystem: localConfig,
|
||||
},
|
||||
errors: [],
|
||||
}),
|
||||
getMcpConfigsByScope: scope => {
|
||||
switch (scope) {
|
||||
case 'local':
|
||||
return { servers: { filesystem: localConfig }, errors: [] }
|
||||
case 'user':
|
||||
return { servers: { filesystem: userConfig }, errors: [] }
|
||||
default:
|
||||
return { servers: {}, errors: [] }
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
const report = await doctorServer('filesystem', { configOnly: true }, deps)
|
||||
assert.equal(report.servers.length, 1)
|
||||
assert.equal(report.servers[0]?.definitions.length, 2)
|
||||
assert.equal(report.servers[0]?.definitions.find(def => def.sourceType === 'local')?.runtimeActive, true)
|
||||
assert.equal(report.servers[0]?.definitions.find(def => def.sourceType === 'user')?.runtimeActive, false)
|
||||
assert.deepEqual(
|
||||
report.servers[0]?.findings.map(finding => finding.code).sort(),
|
||||
['duplicate.same_name_multiple_scopes', 'scope.shadowed'],
|
||||
)
|
||||
})
|
||||
|
||||
test('doctorServer reports project servers pending approval', async () => {
|
||||
const projectConfig = stdioConfig('project', 'node-project')
|
||||
const deps = makeDependencies({
|
||||
getMcpConfigsByScope: scope =>
|
||||
scope === 'project'
|
||||
? { servers: { sentry: projectConfig }, errors: [] }
|
||||
: { servers: {}, errors: [] },
|
||||
getProjectMcpServerStatus: name => (name === 'sentry' ? 'pending' : 'approved'),
|
||||
})
|
||||
|
||||
const report = await doctorServer('sentry', { configOnly: true }, deps)
|
||||
assert.equal(report.servers.length, 1)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.pendingApproval, true)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, false)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.runtimeVisible, false)
|
||||
assert.equal(
|
||||
report.servers[0]?.findings.some(finding => finding.code === 'state.pending_project_approval'),
|
||||
true,
|
||||
)
|
||||
})
|
||||
|
||||
test('doctorServer does not treat disabled servers as runtime-active or live-check targets', async () => {
|
||||
let connectCalls = 0
|
||||
const localConfig = stdioConfig('local', 'node-local')
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { github: localConfig },
|
||||
errors: [],
|
||||
}),
|
||||
getMcpConfigsByScope: scope =>
|
||||
scope === 'local'
|
||||
? { servers: { github: localConfig }, errors: [] }
|
||||
: { servers: {}, errors: [] },
|
||||
isMcpServerDisabled: name => name === 'github',
|
||||
connectToServer: async (name, config) => {
|
||||
connectCalls += 1
|
||||
return {
|
||||
name,
|
||||
type: 'failed',
|
||||
config,
|
||||
error: 'should not connect',
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
const report = await doctorServer('github', { configOnly: false }, deps)
|
||||
|
||||
assert.equal(connectCalls, 0)
|
||||
assert.equal(report.summary.blocking, 0)
|
||||
assert.equal(report.summary.warnings, 1)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.disabled, true)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, false)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.runtimeVisible, false)
|
||||
assert.equal(report.servers[0]?.liveCheck.result, 'disabled')
|
||||
assert.equal(
|
||||
report.servers[0]?.findings.some(finding => finding.code === 'state.disabled' && finding.severity === 'warn'),
|
||||
true,
|
||||
)
|
||||
})
|
||||
|
||||
test('doctorAllServers skips live checks in config-only mode', async () => {
|
||||
let connectCalls = 0
|
||||
const localConfig = stdioConfig('local', 'node-local')
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { linear: localConfig },
|
||||
errors: [],
|
||||
}),
|
||||
getMcpConfigsByScope: scope =>
|
||||
scope === 'local'
|
||||
? { servers: { linear: localConfig }, errors: [] }
|
||||
: { servers: {}, errors: [] },
|
||||
connectToServer: async (name, config) => {
|
||||
connectCalls += 1
|
||||
return {
|
||||
name,
|
||||
type: 'connected',
|
||||
capabilities: {},
|
||||
config,
|
||||
cleanup: async () => {},
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
const report = await doctorAllServers({ configOnly: true }, deps)
|
||||
assert.equal(connectCalls, 0)
|
||||
assert.equal(report.servers[0]?.liveCheck.attempted, false)
|
||||
assert.equal(report.servers[0]?.liveCheck.result, 'skipped')
|
||||
})
|
||||
|
||||
test('doctorAllServers honors scopeFilter when collecting names', async () => {
|
||||
const pluginConfig = {
|
||||
type: 'http' as const,
|
||||
url: 'https://example.test/mcp',
|
||||
scope: 'dynamic' as const,
|
||||
pluginSource: 'plugin:github@official',
|
||||
}
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { 'plugin:github:github': pluginConfig },
|
||||
errors: [],
|
||||
}),
|
||||
})
|
||||
|
||||
const report = await doctorAllServers({ configOnly: true, scopeFilter: 'user' }, deps)
|
||||
|
||||
assert.equal(report.summary.totalReports, 0)
|
||||
assert.deepEqual(report.servers, [])
|
||||
})
|
||||
|
||||
test('doctorAllServers honors scopeFilter when collecting validation errors', async () => {
|
||||
const userConfig = stdioConfig('user', 'node-user')
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { filesystem: userConfig },
|
||||
errors: [],
|
||||
}),
|
||||
getMcpConfigsByScope: scope => {
|
||||
switch (scope) {
|
||||
case 'project':
|
||||
return {
|
||||
servers: {},
|
||||
errors: [
|
||||
{
|
||||
file: '.mcp.json',
|
||||
path: '',
|
||||
message: 'MCP config is not a valid JSON',
|
||||
suggestion: 'Fix the JSON syntax errors in the file',
|
||||
mcpErrorMetadata: {
|
||||
scope: 'project',
|
||||
severity: 'fatal',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
case 'user':
|
||||
return { servers: { filesystem: userConfig }, errors: [] }
|
||||
default:
|
||||
return { servers: {}, errors: [] }
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
const report = await doctorAllServers({ configOnly: true, scopeFilter: 'user' }, deps)
|
||||
|
||||
assert.equal(report.summary.totalReports, 1)
|
||||
assert.equal(report.summary.blocking, 0)
|
||||
assert.deepEqual(report.findings, [])
|
||||
assert.deepEqual(report.servers[0]?.findings, [])
|
||||
})
|
||||
|
||||
test('doctorAllServers includes observed runtime definitions for plugin-only servers', async () => {
|
||||
const pluginConfig = {
|
||||
type: 'http' as const,
|
||||
url: 'https://example.test/mcp',
|
||||
scope: 'dynamic' as const,
|
||||
pluginSource: 'plugin:github@official',
|
||||
}
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { 'plugin:github:github': pluginConfig },
|
||||
errors: [],
|
||||
}),
|
||||
})
|
||||
|
||||
const report = await doctorAllServers({ configOnly: true }, deps)
|
||||
|
||||
assert.equal(report.summary.totalReports, 1)
|
||||
assert.equal(report.servers[0]?.definitions.length, 1)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.sourceType, 'plugin')
|
||||
assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, true)
|
||||
})
|
||||
|
||||
test('doctorAllServers reports disabled plugin servers as disabled, not not-found', async () => {
|
||||
const pluginConfig = {
|
||||
type: 'http' as const,
|
||||
url: 'https://example.test/mcp',
|
||||
scope: 'dynamic' as const,
|
||||
pluginSource: 'plugin:github@official',
|
||||
}
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { 'plugin:github:github': pluginConfig },
|
||||
errors: [],
|
||||
}),
|
||||
isMcpServerDisabled: name => name === 'plugin:github:github',
|
||||
})
|
||||
|
||||
const report = await doctorAllServers({ configOnly: true }, deps)
|
||||
|
||||
assert.equal(report.summary.totalReports, 1)
|
||||
assert.equal(report.summary.warnings, 1)
|
||||
assert.equal(report.summary.blocking, 0)
|
||||
assert.equal(report.servers[0]?.definitions.length, 1)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.sourceType, 'plugin')
|
||||
assert.equal(report.servers[0]?.definitions[0]?.disabled, true)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, false)
|
||||
assert.equal(
|
||||
report.servers[0]?.findings.some(finding => finding.code === 'state.disabled' && !finding.blocking),
|
||||
true,
|
||||
)
|
||||
assert.equal(
|
||||
report.servers[0]?.findings.some(finding => finding.code === 'state.not_found'),
|
||||
false,
|
||||
)
|
||||
})
|
||||
|
||||
test('doctorServer converts failed live checks into blocking findings', async () => {
|
||||
const localConfig = stdioConfig('local', 'node-local')
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { github: localConfig },
|
||||
errors: [],
|
||||
}),
|
||||
getMcpConfigsByScope: scope =>
|
||||
scope === 'local'
|
||||
? { servers: { github: localConfig }, errors: [] }
|
||||
: { servers: {}, errors: [] },
|
||||
connectToServer: async (name, config) => ({
|
||||
name,
|
||||
type: 'failed',
|
||||
config,
|
||||
error: 'command not found: node-local',
|
||||
}),
|
||||
})
|
||||
|
||||
const report = await doctorServer('github', { configOnly: false }, deps)
|
||||
|
||||
assert.equal(report.summary.blocking, 1)
|
||||
assert.equal(report.servers[0]?.liveCheck.result, 'failed')
|
||||
assert.equal(
|
||||
report.servers[0]?.findings.some(
|
||||
finding => finding.code === 'stdio.command_not_found' && finding.blocking,
|
||||
),
|
||||
true,
|
||||
)
|
||||
})
|
||||
|
||||
test('doctorServer converts needs-auth live checks into warning findings', async () => {
|
||||
const localConfig = stdioConfig('local', 'node-local')
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { sentry: localConfig },
|
||||
errors: [],
|
||||
}),
|
||||
getMcpConfigsByScope: scope =>
|
||||
scope === 'local'
|
||||
? { servers: { sentry: localConfig }, errors: [] }
|
||||
: { servers: {}, errors: [] },
|
||||
connectToServer: async (name, config) => ({
|
||||
name,
|
||||
type: 'needs-auth',
|
||||
config,
|
||||
}),
|
||||
})
|
||||
|
||||
const report = await doctorServer('sentry', { configOnly: false }, deps)
|
||||
|
||||
assert.equal(report.summary.warnings, 1)
|
||||
assert.equal(report.summary.blocking, 0)
|
||||
assert.equal(
|
||||
report.servers[0]?.findings.some(finding => finding.code === 'auth.needs_auth' && finding.severity === 'warn'),
|
||||
true,
|
||||
)
|
||||
})
|
||||
|
||||
test('doctorServer includes observed runtime definition for plugin-only targets', async () => {
|
||||
const pluginConfig = {
|
||||
type: 'http' as const,
|
||||
url: 'https://example.test/mcp',
|
||||
scope: 'dynamic' as const,
|
||||
pluginSource: 'plugin:github@official',
|
||||
}
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { 'plugin:github:github': pluginConfig },
|
||||
errors: [],
|
||||
}),
|
||||
})
|
||||
|
||||
const report = await doctorServer('plugin:github:github', { configOnly: true }, deps)
|
||||
|
||||
assert.equal(report.summary.totalReports, 1)
|
||||
assert.equal(report.servers[0]?.definitions.length, 1)
|
||||
assert.equal(report.servers[0]?.definitions[0]?.sourceType, 'plugin')
|
||||
assert.equal(report.servers[0]?.definitions[0]?.runtimeActive, true)
|
||||
})
|
||||
|
||||
test('doctorServer with scopeFilter does not leak runtime definition from another scope when target is absent', async () => {
|
||||
let connectCalls = 0
|
||||
const localConfig = stdioConfig('local', 'node-local')
|
||||
const deps = makeDependencies({
|
||||
getAllMcpConfigs: async () => ({
|
||||
servers: { github: localConfig },
|
||||
errors: [],
|
||||
}),
|
||||
getMcpConfigsByScope: scope =>
|
||||
scope === 'local'
|
||||
? { servers: { github: localConfig }, errors: [] }
|
||||
: { servers: {}, errors: [] },
|
||||
connectToServer: async (name, config) => {
|
||||
connectCalls += 1
|
||||
return {
|
||||
name,
|
||||
type: 'connected',
|
||||
capabilities: {},
|
||||
config,
|
||||
cleanup: async () => {},
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
const report = await doctorServer('github', { configOnly: false, scopeFilter: 'user' }, deps)
|
||||
|
||||
assert.equal(connectCalls, 0)
|
||||
assert.equal(report.summary.totalReports, 1)
|
||||
assert.equal(report.summary.blocking, 1)
|
||||
assert.deepEqual(report.servers[0]?.definitions, [])
|
||||
assert.equal(report.servers[0]?.liveCheck.result, 'skipped')
|
||||
assert.equal(
|
||||
report.servers[0]?.findings.some(finding => finding.code === 'state.not_found' && finding.blocking),
|
||||
true,
|
||||
)
|
||||
})
|
||||
|
||||
test('doctorServer reports blocking not-found state when no definition exists', async () => {
|
||||
const report = await doctorServer('missing-server', { configOnly: true }, makeDependencies())
|
||||
|
||||
assert.equal(report.summary.blocking, 1)
|
||||
assert.equal(report.servers[0]?.findings.some(finding => finding.code === 'state.not_found' && finding.blocking), true)
|
||||
})
|
||||
695
src/services/mcp/doctor.ts
Normal file
695
src/services/mcp/doctor.ts
Normal file
@@ -0,0 +1,695 @@
|
||||
import type { ValidationError } from '../../utils/settings/validation.js'
|
||||
import { clearServerCache, connectToServer } from './client.js'
|
||||
import {
|
||||
getAllMcpConfigs,
|
||||
getMcpConfigsByScope,
|
||||
isMcpServerDisabled,
|
||||
} from './config.js'
|
||||
import type {
|
||||
ConfigScope,
|
||||
ScopedMcpServerConfig,
|
||||
} from './types.js'
|
||||
import { describeMcpConfigFilePath, getProjectMcpServerStatus } from './utils.js'
|
||||
|
||||
export type McpDoctorSeverity = 'info' | 'warn' | 'error'
|
||||
export type McpDoctorScopeFilter = 'local' | 'project' | 'user' | 'enterprise'
|
||||
|
||||
export type McpDoctorFinding = {
|
||||
blocking: boolean
|
||||
code: string
|
||||
message: string
|
||||
remediation?: string
|
||||
scope?: string
|
||||
serverName?: string
|
||||
severity: McpDoctorSeverity
|
||||
sourcePath?: string
|
||||
}
|
||||
|
||||
export type McpDoctorLiveCheck = {
|
||||
attempted: boolean
|
||||
durationMs?: number
|
||||
error?: string
|
||||
result?: 'connected' | 'needs-auth' | 'failed' | 'pending' | 'disabled' | 'skipped'
|
||||
}
|
||||
|
||||
export type McpDoctorDefinition = {
|
||||
name: string
|
||||
sourceType:
|
||||
| 'local'
|
||||
| 'project'
|
||||
| 'user'
|
||||
| 'enterprise'
|
||||
| 'managed'
|
||||
| 'plugin'
|
||||
| 'claudeai'
|
||||
| 'dynamic'
|
||||
| 'internal'
|
||||
sourcePath?: string
|
||||
transport?: string
|
||||
runtimeVisible: boolean
|
||||
runtimeActive: boolean
|
||||
pendingApproval?: boolean
|
||||
disabled?: boolean
|
||||
}
|
||||
|
||||
export type McpDoctorServerReport = {
|
||||
serverName: string
|
||||
requestedByUser: boolean
|
||||
definitions: McpDoctorDefinition[]
|
||||
liveCheck: McpDoctorLiveCheck
|
||||
findings: McpDoctorFinding[]
|
||||
}
|
||||
|
||||
export type McpDoctorDependencies = {
|
||||
getAllMcpConfigs: typeof getAllMcpConfigs
|
||||
getMcpConfigsByScope: typeof getMcpConfigsByScope
|
||||
getProjectMcpServerStatus: typeof getProjectMcpServerStatus
|
||||
isMcpServerDisabled: typeof isMcpServerDisabled
|
||||
describeMcpConfigFilePath: typeof describeMcpConfigFilePath
|
||||
connectToServer: typeof connectToServer
|
||||
clearServerCache: typeof clearServerCache
|
||||
}
|
||||
|
||||
export type McpDoctorReport = {
|
||||
generatedAt: string
|
||||
targetName?: string
|
||||
scopeFilter?: McpDoctorScopeFilter
|
||||
configOnly: boolean
|
||||
summary: {
|
||||
totalReports: number
|
||||
healthy: number
|
||||
warnings: number
|
||||
blocking: number
|
||||
}
|
||||
findings: McpDoctorFinding[]
|
||||
servers: McpDoctorServerReport[]
|
||||
}
|
||||
|
||||
const DEFAULT_DEPENDENCIES: McpDoctorDependencies = {
|
||||
getAllMcpConfigs,
|
||||
getMcpConfigsByScope,
|
||||
getProjectMcpServerStatus,
|
||||
isMcpServerDisabled,
|
||||
describeMcpConfigFilePath,
|
||||
connectToServer,
|
||||
clearServerCache,
|
||||
}
|
||||
|
||||
export function buildEmptyDoctorReport(options: {
|
||||
configOnly: boolean
|
||||
scopeFilter?: McpDoctorScopeFilter
|
||||
targetName?: string
|
||||
}): McpDoctorReport {
|
||||
return {
|
||||
generatedAt: new Date().toISOString(),
|
||||
targetName: options.targetName,
|
||||
scopeFilter: options.scopeFilter,
|
||||
configOnly: options.configOnly,
|
||||
summary: {
|
||||
totalReports: 0,
|
||||
healthy: 0,
|
||||
warnings: 0,
|
||||
blocking: 0,
|
||||
},
|
||||
findings: [],
|
||||
servers: [],
|
||||
}
|
||||
}
|
||||
|
||||
function getFindingCode(error: ValidationError): string {
|
||||
if (error.message === 'MCP config is not a valid JSON') {
|
||||
return 'config.invalid_json'
|
||||
}
|
||||
if (error.message.startsWith('Missing environment variables:')) {
|
||||
return 'config.missing_env_vars'
|
||||
}
|
||||
if (error.message.includes("Windows requires 'cmd /c' wrapper to execute npx")) {
|
||||
return 'config.windows_npx_wrapper_required'
|
||||
}
|
||||
if (error.message === 'Does not adhere to MCP server configuration schema') {
|
||||
return 'config.invalid_schema'
|
||||
}
|
||||
return 'config.validation_error'
|
||||
}
|
||||
|
||||
function getSeverity(error: ValidationError): McpDoctorSeverity {
|
||||
const severity = error.mcpErrorMetadata?.severity
|
||||
if (severity === 'fatal') {
|
||||
return 'error'
|
||||
}
|
||||
if (severity === 'warning') {
|
||||
return 'warn'
|
||||
}
|
||||
return 'warn'
|
||||
}
|
||||
|
||||
export function findingsFromValidationErrors(
|
||||
validationErrors: ValidationError[],
|
||||
): McpDoctorFinding[] {
|
||||
return validationErrors.map(error => {
|
||||
const severity = getSeverity(error)
|
||||
return {
|
||||
blocking: severity === 'error',
|
||||
code: getFindingCode(error),
|
||||
message: error.message,
|
||||
remediation: error.suggestion,
|
||||
scope: error.mcpErrorMetadata?.scope,
|
||||
serverName: error.mcpErrorMetadata?.serverName,
|
||||
severity,
|
||||
sourcePath: error.file,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
function splitValidationFindings(validationFindings: McpDoctorFinding[]): {
|
||||
globalFindings: McpDoctorFinding[]
|
||||
serverFindingsByName: Map<string, McpDoctorFinding[]>
|
||||
} {
|
||||
const globalFindings: McpDoctorFinding[] = []
|
||||
const serverFindingsByName = new Map<string, McpDoctorFinding[]>()
|
||||
|
||||
for (const finding of validationFindings) {
|
||||
if (!finding.serverName) {
|
||||
globalFindings.push(finding)
|
||||
continue
|
||||
}
|
||||
|
||||
const findings = serverFindingsByName.get(finding.serverName) ?? []
|
||||
findings.push(finding)
|
||||
serverFindingsByName.set(finding.serverName, findings)
|
||||
}
|
||||
|
||||
return {
|
||||
globalFindings,
|
||||
serverFindingsByName,
|
||||
}
|
||||
}
|
||||
|
||||
function getSourceType(config: ScopedMcpServerConfig): McpDoctorDefinition['sourceType'] {
|
||||
if (config.scope === 'claudeai') {
|
||||
return 'claudeai'
|
||||
}
|
||||
if (config.scope === 'dynamic') {
|
||||
return config.pluginSource ? 'plugin' : 'dynamic'
|
||||
}
|
||||
if (config.scope === 'managed') {
|
||||
return 'managed'
|
||||
}
|
||||
return config.scope
|
||||
}
|
||||
|
||||
function getTransport(config: ScopedMcpServerConfig): string {
|
||||
return config.type ?? 'stdio'
|
||||
}
|
||||
|
||||
function getConfigSignature(config: ScopedMcpServerConfig): string {
|
||||
switch (config.type) {
|
||||
case 'sse':
|
||||
case 'http':
|
||||
case 'ws':
|
||||
case 'claudeai-proxy':
|
||||
return `${config.scope}:${config.type}:${config.url}`
|
||||
case 'sdk':
|
||||
return `${config.scope}:${config.type}:${config.name}`
|
||||
default:
|
||||
return `${config.scope}:${config.type ?? 'stdio'}:${config.command}:${JSON.stringify(config.args ?? [])}`
|
||||
}
|
||||
}
|
||||
|
||||
function isSameDefinition(
|
||||
config: ScopedMcpServerConfig,
|
||||
activeConfig: ScopedMcpServerConfig | undefined,
|
||||
): boolean {
|
||||
if (!activeConfig) {
|
||||
return false
|
||||
}
|
||||
return getSourceType(config) === getSourceType(activeConfig) && getConfigSignature(config) === getConfigSignature(activeConfig)
|
||||
}
|
||||
|
||||
function buildScopeDefinitions(
|
||||
name: string,
|
||||
scope: ConfigScope,
|
||||
servers: Record<string, ScopedMcpServerConfig>,
|
||||
activeConfig: ScopedMcpServerConfig | undefined,
|
||||
deps: McpDoctorDependencies,
|
||||
): McpDoctorDefinition[] {
|
||||
const config = servers[name]
|
||||
if (!config) {
|
||||
return []
|
||||
}
|
||||
|
||||
const pendingApproval =
|
||||
scope === 'project' ? deps.getProjectMcpServerStatus(name) === 'pending' : false
|
||||
const disabled = deps.isMcpServerDisabled(name)
|
||||
const runtimeActive = !disabled && isSameDefinition(config, activeConfig)
|
||||
|
||||
return [
|
||||
{
|
||||
name,
|
||||
sourceType: getSourceType(config),
|
||||
sourcePath: deps.describeMcpConfigFilePath(scope),
|
||||
transport: getTransport(config),
|
||||
runtimeVisible: runtimeActive,
|
||||
runtimeActive,
|
||||
pendingApproval,
|
||||
disabled,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
function shouldIncludeScope(
|
||||
scope: ConfigScope,
|
||||
scopeFilter: McpDoctorScopeFilter | undefined,
|
||||
): boolean {
|
||||
if (!scopeFilter) {
|
||||
return scope === 'enterprise' || scope === 'local' || scope === 'project' || scope === 'user'
|
||||
}
|
||||
return scope === scopeFilter
|
||||
}
|
||||
|
||||
function getValidationErrorsForSelectedScopes(
|
||||
scopeResults: {
|
||||
enterprise: ReturnType<McpDoctorDependencies['getMcpConfigsByScope']>
|
||||
local: ReturnType<McpDoctorDependencies['getMcpConfigsByScope']>
|
||||
project: ReturnType<McpDoctorDependencies['getMcpConfigsByScope']>
|
||||
user: ReturnType<McpDoctorDependencies['getMcpConfigsByScope']>
|
||||
},
|
||||
scopeFilter: McpDoctorScopeFilter | undefined,
|
||||
): ValidationError[] {
|
||||
return [
|
||||
...(shouldIncludeScope('enterprise', scopeFilter) ? scopeResults.enterprise.errors : []),
|
||||
...(shouldIncludeScope('local', scopeFilter) ? scopeResults.local.errors : []),
|
||||
...(shouldIncludeScope('project', scopeFilter) ? scopeResults.project.errors : []),
|
||||
...(shouldIncludeScope('user', scopeFilter) ? scopeResults.user.errors : []),
|
||||
]
|
||||
}
|
||||
|
||||
function buildObservedDefinition(
|
||||
name: string,
|
||||
activeConfig: ScopedMcpServerConfig,
|
||||
options?: {
|
||||
disabled?: boolean
|
||||
runtimeActive?: boolean
|
||||
runtimeVisible?: boolean
|
||||
},
|
||||
): McpDoctorDefinition {
|
||||
return {
|
||||
name,
|
||||
sourceType: getSourceType(activeConfig),
|
||||
sourcePath:
|
||||
getSourceType(activeConfig) === 'plugin'
|
||||
? `plugin:${activeConfig.pluginSource ?? 'unknown'}`
|
||||
: getSourceType(activeConfig) === 'claudeai'
|
||||
? 'claude.ai'
|
||||
: activeConfig.scope,
|
||||
transport: getTransport(activeConfig),
|
||||
runtimeVisible: options?.runtimeVisible ?? true,
|
||||
runtimeActive: options?.runtimeActive ?? true,
|
||||
disabled: options?.disabled ?? false,
|
||||
}
|
||||
}
|
||||
|
||||
function hasDefinitionForRuntimeSource(
|
||||
definitions: McpDoctorDefinition[],
|
||||
runtimeConfig: ScopedMcpServerConfig,
|
||||
deps: McpDoctorDependencies,
|
||||
): boolean {
|
||||
const runtimeSourceType = getSourceType(runtimeConfig)
|
||||
const runtimeSourcePath =
|
||||
runtimeSourceType === 'plugin'
|
||||
? `plugin:${runtimeConfig.pluginSource ?? 'unknown'}`
|
||||
: runtimeSourceType === 'claudeai'
|
||||
? 'claude.ai'
|
||||
: deps.describeMcpConfigFilePath(runtimeConfig.scope)
|
||||
|
||||
return definitions.some(
|
||||
definition =>
|
||||
definition.sourceType === runtimeSourceType &&
|
||||
definition.sourcePath === runtimeSourcePath &&
|
||||
definition.transport === getTransport(runtimeConfig),
|
||||
)
|
||||
}
|
||||
|
||||
function buildShadowingFindings(definitions: McpDoctorDefinition[]): McpDoctorFinding[] {
|
||||
const userEditable = definitions.filter(definition =>
|
||||
definition.sourceType === 'local' ||
|
||||
definition.sourceType === 'project' ||
|
||||
definition.sourceType === 'user' ||
|
||||
definition.sourceType === 'enterprise',
|
||||
)
|
||||
|
||||
if (userEditable.length <= 1) {
|
||||
return []
|
||||
}
|
||||
|
||||
const active = userEditable.find(definition => definition.runtimeActive) ?? userEditable[0]
|
||||
return [
|
||||
{
|
||||
blocking: false,
|
||||
code: 'duplicate.same_name_multiple_scopes',
|
||||
message: `Server is defined in multiple config scopes; active source is ${active.sourceType}`,
|
||||
remediation: 'Remove or rename one of the duplicate definitions to avoid confusion.',
|
||||
serverName: active.name,
|
||||
severity: 'warn',
|
||||
},
|
||||
{
|
||||
blocking: false,
|
||||
code: 'scope.shadowed',
|
||||
message: `${active.name} has shadowed definitions in lower-precedence config scopes.`,
|
||||
remediation: 'Inspect the other definitions and remove the ones you no longer want to keep.',
|
||||
serverName: active.name,
|
||||
severity: 'warn',
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
function buildStateFindings(definitions: McpDoctorDefinition[]): McpDoctorFinding[] {
|
||||
const findings: McpDoctorFinding[] = []
|
||||
|
||||
for (const definition of definitions) {
|
||||
if (definition.pendingApproval) {
|
||||
findings.push({
|
||||
blocking: false,
|
||||
code: 'state.pending_project_approval',
|
||||
message: `${definition.name} is declared in project config but pending project approval.`,
|
||||
remediation: 'Approve the server in the project MCP approval flow before expecting it to become active.',
|
||||
scope: 'project',
|
||||
serverName: definition.name,
|
||||
severity: 'warn',
|
||||
sourcePath: definition.sourcePath,
|
||||
})
|
||||
}
|
||||
|
||||
if (definition.disabled) {
|
||||
findings.push({
|
||||
blocking: false,
|
||||
code: 'state.disabled',
|
||||
message: `${definition.name} is currently disabled.`,
|
||||
remediation: 'Re-enable the server before expecting it to be available at runtime.',
|
||||
serverName: definition.name,
|
||||
severity: 'warn',
|
||||
sourcePath: definition.sourcePath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return findings
|
||||
}
|
||||
|
||||
function summarizeReport(report: McpDoctorReport): McpDoctorReport {
|
||||
const allFindings = [...report.findings, ...report.servers.flatMap(server => server.findings)]
|
||||
const blocking = allFindings.filter(finding => finding.blocking).length
|
||||
const warnings = allFindings.filter(finding => finding.severity === 'warn').length
|
||||
const healthy = report.servers.filter(
|
||||
server =>
|
||||
server.liveCheck.result === 'connected' &&
|
||||
server.findings.every(finding => !finding.blocking && finding.severity !== 'warn'),
|
||||
).length
|
||||
|
||||
return {
|
||||
...report,
|
||||
summary: {
|
||||
totalReports: report.servers.length,
|
||||
healthy,
|
||||
warnings,
|
||||
blocking,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function getLiveCheck(
|
||||
name: string,
|
||||
activeConfig: ScopedMcpServerConfig | undefined,
|
||||
configOnly: boolean,
|
||||
definitions: McpDoctorDefinition[],
|
||||
deps: McpDoctorDependencies,
|
||||
): Promise<McpDoctorLiveCheck> {
|
||||
if (configOnly) {
|
||||
return { attempted: false, result: 'skipped' }
|
||||
}
|
||||
|
||||
if (!activeConfig) {
|
||||
if (definitions.some(definition => definition.pendingApproval)) {
|
||||
return { attempted: false, result: 'pending' }
|
||||
}
|
||||
if (definitions.some(definition => definition.disabled)) {
|
||||
return { attempted: false, result: 'disabled' }
|
||||
}
|
||||
return { attempted: false, result: 'skipped' }
|
||||
}
|
||||
|
||||
const startedAt = Date.now()
|
||||
const connection = await deps.connectToServer(name, activeConfig)
|
||||
const durationMs = Date.now() - startedAt
|
||||
|
||||
try {
|
||||
switch (connection.type) {
|
||||
case 'connected':
|
||||
return { attempted: true, result: 'connected', durationMs }
|
||||
case 'needs-auth':
|
||||
return { attempted: true, result: 'needs-auth', durationMs }
|
||||
case 'disabled':
|
||||
return { attempted: true, result: 'disabled', durationMs }
|
||||
case 'pending':
|
||||
return { attempted: true, result: 'pending', durationMs }
|
||||
case 'failed':
|
||||
return {
|
||||
attempted: true,
|
||||
result: 'failed',
|
||||
durationMs,
|
||||
error: connection.error,
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
await deps.clearServerCache(name, activeConfig).catch(() => {
|
||||
// Best-effort cleanup for diagnostic connections.
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function buildLiveFindings(
|
||||
name: string,
|
||||
definitions: McpDoctorDefinition[],
|
||||
liveCheck: McpDoctorLiveCheck,
|
||||
): McpDoctorFinding[] {
|
||||
const activeDefinition = definitions.find(definition => definition.runtimeActive)
|
||||
|
||||
if (liveCheck.result === 'needs-auth') {
|
||||
return [
|
||||
{
|
||||
blocking: false,
|
||||
code: 'auth.needs_auth',
|
||||
message: `${name} requires authentication before it can be used.`,
|
||||
remediation: 'Authenticate the server and then rerun the doctor command.',
|
||||
serverName: name,
|
||||
severity: 'warn',
|
||||
sourcePath: activeDefinition?.sourcePath,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
if (liveCheck.result === 'failed') {
|
||||
const commandNotFound =
|
||||
activeDefinition?.transport === 'stdio' &&
|
||||
typeof liveCheck.error === 'string' &&
|
||||
liveCheck.error.toLowerCase().includes('not found')
|
||||
|
||||
return [
|
||||
{
|
||||
blocking: true,
|
||||
code: commandNotFound ? 'stdio.command_not_found' : 'health.failed',
|
||||
message: liveCheck.error
|
||||
? `${name} failed its live health check: ${liveCheck.error}`
|
||||
: `${name} failed its live health check.`,
|
||||
remediation: commandNotFound
|
||||
? 'Verify the configured executable exists on PATH or use a full executable path.'
|
||||
: 'Inspect the server configuration and retry the connection once the underlying problem is fixed.',
|
||||
serverName: name,
|
||||
severity: 'error',
|
||||
sourcePath: activeDefinition?.sourcePath,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
return []
|
||||
}
|
||||
|
||||
async function buildServerReport(
|
||||
name: string,
|
||||
options: {
|
||||
configOnly: boolean
|
||||
requestedByUser: boolean
|
||||
scopeFilter?: McpDoctorScopeFilter
|
||||
},
|
||||
validationFindingsByName: Map<string, McpDoctorFinding[]>,
|
||||
deps: McpDoctorDependencies,
|
||||
): Promise<McpDoctorServerReport> {
|
||||
const scopeResults = {
|
||||
enterprise: deps.getMcpConfigsByScope('enterprise'),
|
||||
local: deps.getMcpConfigsByScope('local'),
|
||||
project: deps.getMcpConfigsByScope('project'),
|
||||
user: deps.getMcpConfigsByScope('user'),
|
||||
}
|
||||
const { servers: activeServers } = await deps.getAllMcpConfigs()
|
||||
const serverDisabled = deps.isMcpServerDisabled(name)
|
||||
const runtimeConfig = activeServers[name] ?? undefined
|
||||
const activeConfig = serverDisabled ? undefined : runtimeConfig
|
||||
|
||||
const definitions = [
|
||||
...(shouldIncludeScope('enterprise', options.scopeFilter)
|
||||
? buildScopeDefinitions(name, 'enterprise', scopeResults.enterprise.servers, activeConfig, deps)
|
||||
: []),
|
||||
...(shouldIncludeScope('local', options.scopeFilter)
|
||||
? buildScopeDefinitions(name, 'local', scopeResults.local.servers, activeConfig, deps)
|
||||
: []),
|
||||
...(shouldIncludeScope('project', options.scopeFilter)
|
||||
? buildScopeDefinitions(name, 'project', scopeResults.project.servers, activeConfig, deps)
|
||||
: []),
|
||||
...(shouldIncludeScope('user', options.scopeFilter)
|
||||
? buildScopeDefinitions(name, 'user', scopeResults.user.servers, activeConfig, deps)
|
||||
: []),
|
||||
]
|
||||
|
||||
const shouldAddObservedDefinition =
|
||||
!!runtimeConfig &&
|
||||
!hasDefinitionForRuntimeSource(definitions, runtimeConfig, deps) &&
|
||||
((definitions.length === 0 && !options.scopeFilter) ||
|
||||
(definitions.length > 0 && definitions.every(definition => !definition.runtimeActive)))
|
||||
|
||||
if (runtimeConfig && shouldAddObservedDefinition) {
|
||||
definitions.push(
|
||||
buildObservedDefinition(name, runtimeConfig, {
|
||||
disabled: serverDisabled,
|
||||
runtimeActive: !serverDisabled,
|
||||
runtimeVisible: !serverDisabled,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
const visibleRuntimeConfig =
|
||||
definitions.some(definition => definition.runtimeActive) || shouldAddObservedDefinition
|
||||
? activeConfig
|
||||
: undefined
|
||||
|
||||
const findings: McpDoctorFinding[] = [
|
||||
...(validationFindingsByName.get(name) ?? []),
|
||||
...buildShadowingFindings(definitions),
|
||||
...buildStateFindings(definitions),
|
||||
]
|
||||
|
||||
if (definitions.length === 0 && !shouldAddObservedDefinition) {
|
||||
findings.push({
|
||||
blocking: true,
|
||||
code: 'state.not_found',
|
||||
message: `${name} was not found in the selected MCP configuration sources.`,
|
||||
remediation: 'Check the server name and scope, or add the MCP server before retrying.',
|
||||
serverName: name,
|
||||
severity: 'error',
|
||||
})
|
||||
}
|
||||
|
||||
const liveCheck = await getLiveCheck(name, visibleRuntimeConfig, options.configOnly, definitions, deps)
|
||||
findings.push(...buildLiveFindings(name, definitions, liveCheck))
|
||||
|
||||
return {
|
||||
serverName: name,
|
||||
requestedByUser: options.requestedByUser,
|
||||
definitions,
|
||||
liveCheck,
|
||||
findings,
|
||||
}
|
||||
}
|
||||
|
||||
function getServerNames(
|
||||
scopeServers: Array<Record<string, ScopedMcpServerConfig>>,
|
||||
activeServers: Record<string, ScopedMcpServerConfig>,
|
||||
includeActiveServers: boolean,
|
||||
): string[] {
|
||||
const names = new Set<string>(includeActiveServers ? Object.keys(activeServers) : [])
|
||||
for (const servers of scopeServers) {
|
||||
for (const name of Object.keys(servers)) {
|
||||
names.add(name)
|
||||
}
|
||||
}
|
||||
return [...names].sort()
|
||||
}
|
||||
|
||||
export async function doctorAllServers(
|
||||
options: { configOnly: boolean; scopeFilter?: McpDoctorScopeFilter } = {
|
||||
configOnly: false,
|
||||
},
|
||||
deps: McpDoctorDependencies = DEFAULT_DEPENDENCIES,
|
||||
): Promise<McpDoctorReport> {
|
||||
const report = buildEmptyDoctorReport(options)
|
||||
const scopeResults = {
|
||||
enterprise: deps.getMcpConfigsByScope('enterprise'),
|
||||
local: deps.getMcpConfigsByScope('local'),
|
||||
project: deps.getMcpConfigsByScope('project'),
|
||||
user: deps.getMcpConfigsByScope('user'),
|
||||
}
|
||||
const validationFindings = findingsFromValidationErrors(
|
||||
getValidationErrorsForSelectedScopes(scopeResults, options.scopeFilter),
|
||||
)
|
||||
const { globalFindings, serverFindingsByName } = splitValidationFindings(validationFindings)
|
||||
const { servers: activeServers } = await deps.getAllMcpConfigs()
|
||||
const names = getServerNames(
|
||||
[
|
||||
...(shouldIncludeScope('enterprise', options.scopeFilter) ? [scopeResults.enterprise.servers] : []),
|
||||
...(shouldIncludeScope('local', options.scopeFilter) ? [scopeResults.local.servers] : []),
|
||||
...(shouldIncludeScope('project', options.scopeFilter) ? [scopeResults.project.servers] : []),
|
||||
...(shouldIncludeScope('user', options.scopeFilter) ? [scopeResults.user.servers] : []),
|
||||
],
|
||||
activeServers,
|
||||
!options.scopeFilter,
|
||||
)
|
||||
|
||||
const servers = await Promise.all(
|
||||
names.map(name =>
|
||||
buildServerReport(
|
||||
name,
|
||||
{
|
||||
configOnly: options.configOnly,
|
||||
requestedByUser: false,
|
||||
scopeFilter: options.scopeFilter,
|
||||
},
|
||||
serverFindingsByName,
|
||||
deps,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
report.servers = servers
|
||||
report.findings = globalFindings
|
||||
return summarizeReport(report)
|
||||
}
|
||||
|
||||
export async function doctorServer(
|
||||
name: string,
|
||||
options: { configOnly: boolean; scopeFilter?: McpDoctorScopeFilter },
|
||||
deps: McpDoctorDependencies = DEFAULT_DEPENDENCIES,
|
||||
): Promise<McpDoctorReport> {
|
||||
const report = buildEmptyDoctorReport({ ...options, targetName: name })
|
||||
const scopeResults = {
|
||||
enterprise: deps.getMcpConfigsByScope('enterprise'),
|
||||
local: deps.getMcpConfigsByScope('local'),
|
||||
project: deps.getMcpConfigsByScope('project'),
|
||||
user: deps.getMcpConfigsByScope('user'),
|
||||
}
|
||||
const validationFindings = findingsFromValidationErrors(
|
||||
getValidationErrorsForSelectedScopes(scopeResults, options.scopeFilter),
|
||||
)
|
||||
const { globalFindings, serverFindingsByName } = splitValidationFindings(validationFindings)
|
||||
const server = await buildServerReport(
|
||||
name,
|
||||
{
|
||||
configOnly: options.configOnly,
|
||||
requestedByUser: true,
|
||||
scopeFilter: options.scopeFilter,
|
||||
},
|
||||
serverFindingsByName,
|
||||
deps,
|
||||
)
|
||||
report.servers = [server]
|
||||
report.findings = globalFindings
|
||||
return summarizeReport(report)
|
||||
}
|
||||
@@ -35,7 +35,7 @@ export async function sendNotification(
|
||||
})
|
||||
}
|
||||
|
||||
const DEFAULT_TITLE = 'Claude Code'
|
||||
const DEFAULT_TITLE = 'Open Claude'
|
||||
|
||||
async function sendToChannel(
|
||||
channel: string,
|
||||
|
||||
@@ -117,7 +117,8 @@ export function isAnthropicAuthEnabled(): boolean {
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
|
||||
// Check if user has configured an external API key source
|
||||
// This allows externally-provided API keys to work (without requiring proxy configuration)
|
||||
@@ -1731,14 +1732,15 @@ export function getSubscriptionName(): string {
|
||||
}
|
||||
}
|
||||
|
||||
/** Check if using third-party services (Bedrock or Vertex or Foundry or OpenAI-compatible or Gemini) */
|
||||
/** Check if using third-party services (Bedrock or Vertex or Foundry or OpenAI-compatible or Gemini or GitHub Models) */
|
||||
export function isUsing3PServices(): boolean {
|
||||
return !!(
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
logEvent,
|
||||
} from 'src/services/analytics/index.js'
|
||||
import { type ReleaseChannel, saveGlobalConfig } from './config.js'
|
||||
import { getAPIProvider } from './model/providers.js'
|
||||
import { logForDebugging } from './debug.js'
|
||||
import { env } from './env.js'
|
||||
import { getClaudeConfigHomeDir } from './envUtils.js'
|
||||
@@ -72,6 +73,12 @@ export async function assertMinVersion(): Promise<void> {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip version check for third-party providers — the min version
|
||||
// kill-switch is Anthropic-specific and should not block 3P users
|
||||
if (getAPIProvider() !== 'firstParty') {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const versionConfig = await getDynamicConfig_BLOCKS_ON_INIT<{
|
||||
minVersion: string
|
||||
|
||||
@@ -77,7 +77,9 @@ export function getContextWindowForModel(
|
||||
process.env.CLAUDE_CODE_USE_OPENAI === '1' ||
|
||||
process.env.CLAUDE_CODE_USE_OPENAI === 'true' ||
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === 'true' ||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB === '1' ||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
||||
) {
|
||||
const openaiWindow = getOpenAIContextWindow(model)
|
||||
if (openaiWindow !== undefined) {
|
||||
@@ -181,7 +183,9 @@ export function getModelMaxOutputTokens(model: string): {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI === '1' ||
|
||||
process.env.CLAUDE_CODE_USE_OPENAI === 'true' ||
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
process.env.CLAUDE_CODE_USE_GEMINI === 'true' ||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB === '1' ||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
||||
) {
|
||||
const openaiMax = getOpenAIMaxOutputTokens(model)
|
||||
if (openaiMax !== undefined) {
|
||||
|
||||
66
src/utils/githubModelsCredentials.hydrate.test.ts
Normal file
66
src/utils/githubModelsCredentials.hydrate.test.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
/**
|
||||
* Hydrate tests live in a separate file with no static import of
|
||||
* githubModelsCredentials so Bun's mock.module can replace secureStorage
|
||||
* before that module is first loaded.
|
||||
*/
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
|
||||
describe('hydrateGithubModelsTokenFromSecureStorage', () => {
|
||||
const orig = {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
for (const [k, v] of Object.entries(orig)) {
|
||||
if (v === undefined) {
|
||||
delete process.env[k as keyof typeof orig]
|
||||
} else {
|
||||
process.env[k as keyof typeof orig] = v
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
test('sets GITHUB_TOKEN from secure storage when USE_GITHUB and env token empty', async () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => ({
|
||||
githubModels: { accessToken: 'stored-secret' },
|
||||
}),
|
||||
}),
|
||||
}))
|
||||
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
||||
'./githubModelsCredentials.js'
|
||||
)
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
expect(process.env.GITHUB_TOKEN).toBe('stored-secret')
|
||||
})
|
||||
|
||||
test('does not override existing GITHUB_TOKEN', async () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.GITHUB_TOKEN = 'already'
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => ({
|
||||
githubModels: { accessToken: 'stored-secret' },
|
||||
}),
|
||||
}),
|
||||
}))
|
||||
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
||||
'./githubModelsCredentials.js'
|
||||
)
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
expect(process.env.GITHUB_TOKEN).toBe('already')
|
||||
})
|
||||
})
|
||||
47
src/utils/githubModelsCredentials.test.ts
Normal file
47
src/utils/githubModelsCredentials.test.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
clearGithubModelsToken,
|
||||
readGithubModelsToken,
|
||||
saveGithubModelsToken,
|
||||
} from './githubModelsCredentials.js'
|
||||
|
||||
describe('readGithubModelsToken', () => {
|
||||
test('returns undefined in bare mode', () => {
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
expect(readGithubModelsToken()).toBeUndefined()
|
||||
if (prev === undefined) {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_SIMPLE = prev
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe('saveGithubModelsToken / clearGithubModelsToken', () => {
|
||||
test('save returns failure in bare mode', () => {
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
const r = saveGithubModelsToken('abc')
|
||||
expect(r.success).toBe(false)
|
||||
expect(r.warning).toContain('Bare mode')
|
||||
if (prev === undefined) {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_SIMPLE = prev
|
||||
}
|
||||
})
|
||||
|
||||
test('clear succeeds in bare mode', () => {
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
expect(clearGithubModelsToken().success).toBe(true)
|
||||
if (prev === undefined) {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_SIMPLE = prev
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
73
src/utils/githubModelsCredentials.ts
Normal file
73
src/utils/githubModelsCredentials.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
import { isBareMode, isEnvTruthy } from './envUtils.js'
|
||||
import { getSecureStorage } from './secureStorage/index.js'
|
||||
|
||||
/** JSON key in the shared OpenClaude secure storage blob. */
|
||||
export const GITHUB_MODELS_STORAGE_KEY = 'githubModels' as const
|
||||
|
||||
export type GithubModelsCredentialBlob = {
|
||||
accessToken: string
|
||||
}
|
||||
|
||||
export function readGithubModelsToken(): string | undefined {
|
||||
if (isBareMode()) return undefined
|
||||
try {
|
||||
const data = getSecureStorage().read() as
|
||||
| ({ githubModels?: GithubModelsCredentialBlob } & Record<string, unknown>)
|
||||
| null
|
||||
const t = data?.githubModels?.accessToken?.trim()
|
||||
return t || undefined
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If GitHub Models mode is on and no token is in the environment, copy the
|
||||
* stored token into process.env so the OpenAI shim and validation see it.
|
||||
*/
|
||||
export function hydrateGithubModelsTokenFromSecureStorage(): void {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return
|
||||
}
|
||||
if (process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()) {
|
||||
return
|
||||
}
|
||||
if (isBareMode()) {
|
||||
return
|
||||
}
|
||||
const t = readGithubModelsToken()
|
||||
if (t) {
|
||||
process.env.GITHUB_TOKEN = t
|
||||
}
|
||||
}
|
||||
|
||||
export function saveGithubModelsToken(token: string): {
|
||||
success: boolean
|
||||
warning?: string
|
||||
} {
|
||||
if (isBareMode()) {
|
||||
return { success: false, warning: 'Bare mode: secure storage is disabled.' }
|
||||
}
|
||||
const trimmed = token.trim()
|
||||
if (!trimmed) {
|
||||
return { success: false, warning: 'Token is empty.' }
|
||||
}
|
||||
const secureStorage = getSecureStorage()
|
||||
const prev = secureStorage.read() || {}
|
||||
const merged = {
|
||||
...(prev as Record<string, unknown>),
|
||||
[GITHUB_MODELS_STORAGE_KEY]: { accessToken: trimmed },
|
||||
}
|
||||
return secureStorage.update(merged as typeof prev)
|
||||
}
|
||||
|
||||
export function clearGithubModelsToken(): { success: boolean; warning?: string } {
|
||||
if (isBareMode()) {
|
||||
return { success: true }
|
||||
}
|
||||
const secureStorage = getSecureStorage()
|
||||
const prev = secureStorage.read() || {}
|
||||
const next = { ...(prev as Record<string, unknown>) }
|
||||
delete next[GITHUB_MODELS_STORAGE_KEY]
|
||||
return secureStorage.update(next as typeof prev)
|
||||
}
|
||||
@@ -56,7 +56,7 @@ import { profileReport } from './startupProfiler.js'
|
||||
* 3. Failing to disable leaves the terminal in a broken state
|
||||
*/
|
||||
/* eslint-disable custom-rules/no-sync-fs -- must be sync to flush before process.exit */
|
||||
function cleanupTerminalModes(): void {
|
||||
function cleanupTerminalModes(skipUnmount: boolean = false): void {
|
||||
if (!process.stdout.isTTY) {
|
||||
return
|
||||
}
|
||||
@@ -84,7 +84,7 @@ function cleanupTerminalModes(): void {
|
||||
// Calling unmount() now does the final render on the alt buffer,
|
||||
// unsubscribes from signal-exit, and writes 1049l exactly once.
|
||||
const inst = instances.get(process.stdout)
|
||||
if (inst?.isAltScreenActive) {
|
||||
if (!skipUnmount && inst?.isAltScreenActive) {
|
||||
try {
|
||||
inst.unmount()
|
||||
} catch {
|
||||
@@ -92,6 +92,11 @@ function cleanupTerminalModes(): void {
|
||||
// so printResumeHint still hits the main buffer.
|
||||
writeSync(1, EXIT_ALT_SCREEN)
|
||||
}
|
||||
} else if (skipUnmount && inst?.isAltScreenActive) {
|
||||
// We already unmounted asynchronously in gracefulShutdown, but if we
|
||||
// fallback to manual alt-screen exit here just in case Ink didn't write it or is dead.
|
||||
// Actually, AlternateScreen unmount writes EXIT_ALT_SCREEN, so if we awaited unmount,
|
||||
// we shouldn't emit it again. So we just do nothing here.
|
||||
}
|
||||
// Catches events that arrived during the unmount tree-walk.
|
||||
// detachForShutdown() below also drains.
|
||||
@@ -173,7 +178,7 @@ function printResumeHint(): void {
|
||||
writeSync(
|
||||
1,
|
||||
chalk.dim(
|
||||
`\nResume this session with:\nclaude --resume ${resumeArg}\n`,
|
||||
`\nResume this session with:\nopenclaude --resume ${resumeArg}\n`,
|
||||
),
|
||||
)
|
||||
resumeHintPrinted = true
|
||||
@@ -411,12 +416,17 @@ export async function gracefulShutdown(
|
||||
)
|
||||
const sessionEndTimeoutMs = getSessionEndHookTimeoutMs()
|
||||
|
||||
// Await one tick so React can flush pending updates from commands (e.g. hiding
|
||||
// the autocomplete menu on /exit) before we detach Ink. This lets log-update
|
||||
// erase floating UI elements from the terminal so they don't linger after exit.
|
||||
await new Promise(r => setTimeout(r, 20))
|
||||
|
||||
// Failsafe: guarantee process exits even if cleanup hangs (e.g., MCP connections).
|
||||
// Runs cleanupTerminalModes first so a hung cleanup doesn't leave the terminal dirty.
|
||||
// Budget = max(5s, hook budget + 3.5s headroom for cleanup + analytics flush).
|
||||
failsafeTimer = setTimeout(
|
||||
code => {
|
||||
cleanupTerminalModes()
|
||||
cleanupTerminalModes(true)
|
||||
printResumeHint()
|
||||
forceExit(code)
|
||||
},
|
||||
@@ -433,7 +443,7 @@ export async function gracefulShutdown(
|
||||
// cleanup (e.g., SIGKILL during macOS reboot). Without this, the resume
|
||||
// hint would only appear after cleanup functions, hooks, and analytics
|
||||
// flush — which can take several seconds.
|
||||
cleanupTerminalModes()
|
||||
cleanupTerminalModes(true)
|
||||
printResumeHint()
|
||||
|
||||
// Flush session data first — this is the most critical cleanup. If the
|
||||
|
||||
@@ -18,6 +18,7 @@ const PROVIDER_MANAGED_ENV_VARS = new Set([
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
// Endpoint config (base URLs, project/resource identifiers)
|
||||
'ANTHROPIC_BASE_URL',
|
||||
'ANTHROPIC_BEDROCK_BASE_URL',
|
||||
@@ -147,6 +148,7 @@ export const SAFE_ENV_VARS = new Set([
|
||||
'CLAUDE_CODE_SUBAGENT_MODEL',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'DISABLE_AUTOUPDATER',
|
||||
'DISABLE_BUG_COMMAND',
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
||||
@@ -15,6 +16,7 @@ const originalEnv = {
|
||||
|
||||
afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
||||
@@ -23,6 +25,7 @@ afterEach(() => {
|
||||
|
||||
function clearProviderEnv(): void {
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
@@ -38,6 +41,7 @@ test('first-party provider keeps Anthropic account setup flow enabled', () => {
|
||||
|
||||
test.each([
|
||||
['CLAUDE_CODE_USE_OPENAI', 'openai'],
|
||||
['CLAUDE_CODE_USE_GITHUB', 'github'],
|
||||
['CLAUDE_CODE_USE_GEMINI', 'gemini'],
|
||||
['CLAUDE_CODE_USE_BEDROCK', 'bedrock'],
|
||||
['CLAUDE_CODE_USE_VERTEX', 'vertex'],
|
||||
@@ -52,3 +56,11 @@ test.each([
|
||||
expect(usesAnthropicAccountFlow()).toBe(false)
|
||||
},
|
||||
)
|
||||
|
||||
test('GEMINI takes precedence over GitHub when both are set', () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
|
||||
expect(getAPIProvider()).toBe('gemini')
|
||||
})
|
||||
|
||||
@@ -1,20 +1,29 @@
|
||||
import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js'
|
||||
import { isEnvTruthy } from '../envUtils.js'
|
||||
|
||||
export type APIProvider = 'firstParty' | 'bedrock' | 'vertex' | 'foundry' | 'openai' | 'gemini'
|
||||
export type APIProvider =
|
||||
| 'firstParty'
|
||||
| 'bedrock'
|
||||
| 'vertex'
|
||||
| 'foundry'
|
||||
| 'openai'
|
||||
| 'gemini'
|
||||
| 'github'
|
||||
|
||||
export function getAPIProvider(): APIProvider {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
? 'gemini'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
? 'openai'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
|
||||
? 'bedrock'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)
|
||||
? 'vertex'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)
|
||||
? 'foundry'
|
||||
: 'firstParty'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
? 'github'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
? 'openai'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
|
||||
? 'bedrock'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)
|
||||
? 'vertex'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)
|
||||
? 'foundry'
|
||||
: 'firstParty'
|
||||
}
|
||||
|
||||
export function usesAnthropicAccountFlow(): boolean {
|
||||
|
||||
@@ -5,6 +5,7 @@ import { join } from 'node:path'
|
||||
import test from 'node:test'
|
||||
|
||||
import {
|
||||
buildAtomicChatProfileEnv,
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildLaunchEnv,
|
||||
@@ -381,3 +382,72 @@ test('auto profile falls back to openai when no viable ollama model exists', ()
|
||||
assert.equal(selectAutoProfile(null), 'openai')
|
||||
assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama')
|
||||
})
|
||||
|
||||
// ── Atomic Chat profile tests ────────────────────────────────────────────────
|
||||
|
||||
test('atomic-chat profiles never persist openai api keys', () => {
|
||||
const env = buildAtomicChatProfileEnv('some-local-model', {
|
||||
getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1',
|
||||
})
|
||||
|
||||
assert.deepEqual(env, {
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:1337/v1',
|
||||
OPENAI_MODEL: 'some-local-model',
|
||||
})
|
||||
assert.equal('OPENAI_API_KEY' in env, false)
|
||||
})
|
||||
|
||||
test('atomic-chat profiles respect custom base url', () => {
|
||||
const env = buildAtomicChatProfileEnv('my-model', {
|
||||
baseUrl: 'http://192.168.1.100:1337',
|
||||
getAtomicChatChatBaseUrl: (baseUrl?: string) =>
|
||||
baseUrl ? `${baseUrl}/v1` : 'http://127.0.0.1:1337/v1',
|
||||
})
|
||||
|
||||
assert.equal(env.OPENAI_BASE_URL, 'http://192.168.1.100:1337/v1')
|
||||
assert.equal(env.OPENAI_MODEL, 'my-model')
|
||||
})
|
||||
|
||||
test('matching persisted atomic-chat env is reused for atomic-chat launch', async () => {
|
||||
const env = await buildLaunchEnv({
|
||||
profile: 'atomic-chat',
|
||||
persisted: profile('atomic-chat', {
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:1337/v1',
|
||||
OPENAI_MODEL: 'llama-3.1-8b',
|
||||
}),
|
||||
goal: 'balanced',
|
||||
processEnv: {},
|
||||
getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1',
|
||||
resolveAtomicChatDefaultModel: async () => 'other-model',
|
||||
})
|
||||
|
||||
assert.equal(env.OPENAI_BASE_URL, 'http://127.0.0.1:1337/v1')
|
||||
assert.equal(env.OPENAI_MODEL, 'llama-3.1-8b')
|
||||
assert.equal(env.OPENAI_API_KEY, undefined)
|
||||
assert.equal(env.CODEX_API_KEY, undefined)
|
||||
})
|
||||
|
||||
test('atomic-chat launch ignores mismatched persisted openai env', async () => {
|
||||
const env = await buildLaunchEnv({
|
||||
profile: 'atomic-chat',
|
||||
persisted: profile('openai', {
|
||||
OPENAI_BASE_URL: 'https://api.openai.com/v1',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OPENAI_API_KEY: 'sk-persisted',
|
||||
}),
|
||||
goal: 'balanced',
|
||||
processEnv: {
|
||||
OPENAI_API_KEY: 'sk-live',
|
||||
CODEX_API_KEY: 'codex-live',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_live',
|
||||
},
|
||||
getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1',
|
||||
resolveAtomicChatDefaultModel: async () => 'local-model',
|
||||
})
|
||||
|
||||
assert.equal(env.OPENAI_BASE_URL, 'http://127.0.0.1:1337/v1')
|
||||
assert.equal(env.OPENAI_MODEL, 'local-model')
|
||||
assert.equal(env.OPENAI_API_KEY, undefined)
|
||||
assert.equal(env.CODEX_API_KEY, undefined)
|
||||
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
|
||||
})
|
||||
|
||||
@@ -13,7 +13,7 @@ import {
|
||||
const DEFAULT_GEMINI_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const DEFAULT_GEMINI_MODEL = 'gemini-2.0-flash'
|
||||
|
||||
export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini'
|
||||
export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' | 'atomic-chat'
|
||||
|
||||
export type ProfileEnv = {
|
||||
OPENAI_BASE_URL?: string
|
||||
@@ -53,6 +53,19 @@ export function buildOllamaProfileEnv(
|
||||
}
|
||||
}
|
||||
|
||||
export function buildAtomicChatProfileEnv(
|
||||
model: string,
|
||||
options: {
|
||||
baseUrl?: string | null
|
||||
getAtomicChatChatBaseUrl: (baseUrl?: string) => string
|
||||
},
|
||||
): ProfileEnv {
|
||||
return {
|
||||
OPENAI_BASE_URL: options.getAtomicChatChatBaseUrl(options.baseUrl ?? undefined),
|
||||
OPENAI_MODEL: model,
|
||||
}
|
||||
}
|
||||
|
||||
export function buildGeminiProfileEnv(options: {
|
||||
model?: string | null
|
||||
baseUrl?: string | null
|
||||
@@ -171,6 +184,8 @@ export async function buildLaunchEnv(options: {
|
||||
processEnv?: NodeJS.ProcessEnv
|
||||
getOllamaChatBaseUrl?: (baseUrl?: string) => string
|
||||
resolveOllamaDefaultModel?: (goal: RecommendationGoal) => Promise<string>
|
||||
getAtomicChatChatBaseUrl?: (baseUrl?: string) => string
|
||||
resolveAtomicChatDefaultModel?: () => Promise<string | null>
|
||||
}): Promise<NodeJS.ProcessEnv> {
|
||||
const processEnv = options.processEnv ?? process.env
|
||||
const persistedEnv =
|
||||
@@ -190,6 +205,7 @@ export async function buildLaunchEnv(options: {
|
||||
}
|
||||
|
||||
delete env.CLAUDE_CODE_USE_OPENAI
|
||||
delete env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
env.GEMINI_MODEL =
|
||||
processEnv.GEMINI_MODEL ||
|
||||
@@ -224,6 +240,7 @@ export async function buildLaunchEnv(options: {
|
||||
}
|
||||
|
||||
delete env.CLAUDE_CODE_USE_GEMINI
|
||||
delete env.CLAUDE_CODE_USE_GITHUB
|
||||
delete env.GEMINI_API_KEY
|
||||
delete env.GEMINI_MODEL
|
||||
delete env.GEMINI_BASE_URL
|
||||
@@ -248,6 +265,26 @@ export async function buildLaunchEnv(options: {
|
||||
return env
|
||||
}
|
||||
|
||||
if (options.profile === 'atomic-chat') {
|
||||
const getAtomicChatBaseUrl =
|
||||
options.getAtomicChatChatBaseUrl ?? (() => 'http://127.0.0.1:1337/v1')
|
||||
const resolveModel =
|
||||
options.resolveAtomicChatDefaultModel ?? (async () => null as string | null)
|
||||
|
||||
env.OPENAI_BASE_URL = persistedEnv.OPENAI_BASE_URL || getAtomicChatBaseUrl()
|
||||
env.OPENAI_MODEL =
|
||||
persistedEnv.OPENAI_MODEL ||
|
||||
(await resolveModel()) ||
|
||||
''
|
||||
|
||||
delete env.OPENAI_API_KEY
|
||||
delete env.CODEX_API_KEY
|
||||
delete env.CHATGPT_ACCOUNT_ID
|
||||
delete env.CODEX_ACCOUNT_ID
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
if (options.profile === 'codex') {
|
||||
env.OPENAI_BASE_URL =
|
||||
persistedEnv.OPENAI_BASE_URL && isCodexBaseUrl(persistedEnv.OPENAI_BASE_URL)
|
||||
|
||||
27
src/utils/ripgrep.test.ts
Normal file
27
src/utils/ripgrep.test.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
|
||||
import { wrapRipgrepUnavailableError } from './ripgrep.ts'
|
||||
|
||||
test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
|
||||
const error = wrapRipgrepUnavailableError(
|
||||
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
|
||||
{ mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe' },
|
||||
'win32',
|
||||
)
|
||||
|
||||
expect(error.name).toBe('RipgrepUnavailableError')
|
||||
expect(error.code).toBe('ENOENT')
|
||||
expect(error.message).toContain('packaged ripgrep fallback')
|
||||
expect(error.message).toContain('winget install BurntSushi.ripgrep.MSVC')
|
||||
})
|
||||
|
||||
test('wrapRipgrepUnavailableError explains missing system ripgrep', () => {
|
||||
const error = wrapRipgrepUnavailableError(
|
||||
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
|
||||
{ mode: 'system', command: 'rg' },
|
||||
'linux',
|
||||
)
|
||||
|
||||
expect(error.message).toContain('system ripgrep binary was not found on PATH')
|
||||
expect(error.message).toContain('apt install ripgrep')
|
||||
})
|
||||
@@ -28,6 +28,8 @@ type RipgrepConfig = {
|
||||
argv0?: string
|
||||
}
|
||||
|
||||
type RipgrepErrorLike = Pick<NodeJS.ErrnoException, 'code' | 'message'>
|
||||
|
||||
const getRipgrepConfig = memoize((): RipgrepConfig => {
|
||||
const userWantsSystemRipgrep = isEnvDefinedFalsy(
|
||||
process.env.USE_BUILTIN_RIPGREP,
|
||||
@@ -105,6 +107,52 @@ export class RipgrepTimeoutError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
export class RipgrepUnavailableError extends Error {
|
||||
code?: string | number
|
||||
|
||||
constructor(
|
||||
message: string,
|
||||
public readonly config: Pick<RipgrepConfig, 'mode' | 'command'>,
|
||||
code?: string | number,
|
||||
) {
|
||||
super(message)
|
||||
this.name = 'RipgrepUnavailableError'
|
||||
this.code = code
|
||||
}
|
||||
}
|
||||
|
||||
function getRipgrepInstallHint(platform = process.platform): string {
|
||||
switch (platform) {
|
||||
case 'win32':
|
||||
return 'Install ripgrep and confirm `rg --version` works in the same terminal. Windows: `winget install BurntSushi.ripgrep.MSVC` or `choco install ripgrep`.'
|
||||
case 'darwin':
|
||||
return 'Install ripgrep and confirm `rg --version` works in the same terminal. macOS: `brew install ripgrep`.'
|
||||
default:
|
||||
return 'Install ripgrep and confirm `rg --version` works in the same terminal. Linux: use your distro package manager, for example `apt install ripgrep`.'
|
||||
}
|
||||
}
|
||||
|
||||
export function wrapRipgrepUnavailableError(
|
||||
error: RipgrepErrorLike,
|
||||
config = getRipgrepConfig(),
|
||||
platform = process.platform,
|
||||
): RipgrepUnavailableError {
|
||||
const modeExplanation =
|
||||
config.mode === 'builtin'
|
||||
? 'This install could not locate its packaged ripgrep fallback.'
|
||||
: config.mode === 'system'
|
||||
? 'A working system ripgrep binary was not found on PATH.'
|
||||
: 'The embedded ripgrep binary could not be started.'
|
||||
|
||||
const originalMessage = error.message ? ` Original error: ${error.message}` : ''
|
||||
|
||||
return new RipgrepUnavailableError(
|
||||
`ripgrep (rg) is required for file search but could not be started. ${modeExplanation} ${getRipgrepInstallHint(platform)}${originalMessage}`,
|
||||
config,
|
||||
error.code,
|
||||
)
|
||||
}
|
||||
|
||||
function ripGrepRaw(
|
||||
args: string[],
|
||||
target: string,
|
||||
@@ -275,7 +323,9 @@ async function ripGrepFileCount(
|
||||
child.on('error', err => {
|
||||
if (settled) return
|
||||
settled = true
|
||||
reject(err)
|
||||
reject(
|
||||
err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err,
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -337,7 +387,9 @@ export async function ripGrepStream(
|
||||
child.on('error', err => {
|
||||
if (settled) return
|
||||
settled = true
|
||||
reject(err)
|
||||
reject(
|
||||
err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err,
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -383,7 +435,9 @@ export async function ripGrep(
|
||||
// These should be surfaced to the user rather than silently returning empty results
|
||||
const CRITICAL_ERROR_CODES = ['ENOENT', 'EACCES', 'EPERM']
|
||||
if (CRITICAL_ERROR_CODES.includes(error.code as string)) {
|
||||
reject(error)
|
||||
reject(
|
||||
error.code === 'ENOENT' ? wrapRipgrepUnavailableError(error) : error,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -124,6 +124,10 @@ export async function generateSessionTitle(
|
||||
level: 'error',
|
||||
})
|
||||
logEvent('tengu_session_title_generated', { success: false })
|
||||
return null
|
||||
|
||||
// Fallback: When using 3P providers without a compatible schema,
|
||||
// default to the application name.
|
||||
return 'Open Claude'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -99,6 +99,18 @@ const TEAMMATE_ENV_VARS = [
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'GITHUB_TOKEN',
|
||||
'GH_TOKEN',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_API_KEY',
|
||||
'GEMINI_BASE_URL',
|
||||
'GEMINI_MODEL',
|
||||
'GOOGLE_API_KEY',
|
||||
// Custom API endpoint
|
||||
'ANTHROPIC_BASE_URL',
|
||||
// Config directory override
|
||||
|
||||
130
test_atomic_chat_provider.py
Normal file
130
test_atomic_chat_provider.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
test_atomic_chat_provider.py
|
||||
Run: pytest test_atomic_chat_provider.py -v
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from atomic_chat_provider import (
|
||||
atomic_chat,
|
||||
list_atomic_chat_models,
|
||||
check_atomic_chat_running,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_running_true():
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response)
|
||||
result = await check_atomic_chat_running()
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_running_false_on_exception():
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.get = AsyncMock(side_effect=Exception("refused"))
|
||||
result = await check_atomic_chat_running()
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_models_returns_ids():
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"data": [{"id": "llama-3.1-8b"}, {"id": "mistral-7b"}],
|
||||
}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response)
|
||||
models = await list_atomic_chat_models()
|
||||
assert "llama-3.1-8b" in models
|
||||
assert "mistral-7b" in models
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_models_empty_on_failure():
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.get = AsyncMock(side_effect=Exception("down"))
|
||||
models = await list_atomic_chat_models()
|
||||
assert models == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_returns_anthropic_format():
|
||||
mock_response = MagicMock()
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"id": "chatcmpl-abc123",
|
||||
"choices": [{"message": {"content": "42 is the answer."}}],
|
||||
"usage": {"prompt_tokens": 10, "completion_tokens": 8},
|
||||
}
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response)
|
||||
result = await atomic_chat(
|
||||
model="llama-3.1-8b",
|
||||
messages=[{"role": "user", "content": "What is 6*7?"}],
|
||||
)
|
||||
assert result["type"] == "message"
|
||||
assert result["role"] == "assistant"
|
||||
assert "42" in result["content"][0]["text"]
|
||||
assert result["usage"]["input_tokens"] == 10
|
||||
assert result["usage"]["output_tokens"] == 8
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_prepends_system():
|
||||
captured = {}
|
||||
|
||||
async def mock_post(url, json=None, **kwargs):
|
||||
captured.update(json or {})
|
||||
m = MagicMock()
|
||||
m.raise_for_status = MagicMock()
|
||||
m.json.return_value = {
|
||||
"id": "chatcmpl-xyz",
|
||||
"choices": [{"message": {"content": "ok"}}],
|
||||
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
||||
}
|
||||
return m
|
||||
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.post = mock_post
|
||||
await atomic_chat(
|
||||
model="llama-3.1-8b",
|
||||
messages=[{"role": "user", "content": "Hi"}],
|
||||
system="Be helpful.",
|
||||
)
|
||||
assert captured["messages"][0]["role"] == "system"
|
||||
assert "helpful" in captured["messages"][0]["content"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_sends_correct_payload():
|
||||
captured = {}
|
||||
|
||||
async def mock_post(url, json=None, **kwargs):
|
||||
captured.update(json or {})
|
||||
m = MagicMock()
|
||||
m.raise_for_status = MagicMock()
|
||||
m.json.return_value = {
|
||||
"id": "chatcmpl-xyz",
|
||||
"choices": [{"message": {"content": "ok"}}],
|
||||
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
||||
}
|
||||
return m
|
||||
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.post = mock_post
|
||||
await atomic_chat(
|
||||
model="test-model",
|
||||
messages=[{"role": "user", "content": "Test"}],
|
||||
max_tokens=2048,
|
||||
temperature=0.5,
|
||||
)
|
||||
assert captured["model"] == "test-model"
|
||||
assert captured["max_tokens"] == 2048
|
||||
assert captured["temperature"] == 0.5
|
||||
assert captured["stream"] is False
|
||||
Reference in New Issue
Block a user