Compare commits
57 Commits
fix/386-wi
...
fix/theme-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f4a06dd40 | ||
|
|
4c50977f3c | ||
|
|
b126e38b1a | ||
|
|
6e94dd9136 | ||
|
|
91e4cfb15b | ||
|
|
f4ac709fa6 | ||
|
|
8aaa4f22ac | ||
|
|
a7f5982f64 | ||
|
|
cb8f8b7ac2 | ||
|
|
07621a6f8d | ||
|
|
692471850f | ||
|
|
68c296833d | ||
|
|
9ccaa7a675 | ||
|
|
598651f423 | ||
|
|
c385047abb | ||
|
|
42b121bd0d | ||
|
|
32fbd0c7b4 | ||
|
|
e30ad17ae0 | ||
|
|
c328fdf9e2 | ||
|
|
4ad6bc50c1 | ||
|
|
284d9bda36 | ||
|
|
537c469c3a | ||
|
|
ccaa193eec | ||
|
|
2caf2fd982 | ||
|
|
ad724dc3a4 | ||
|
|
648ae8053b | ||
|
|
3188f6ac66 | ||
|
|
69ea1f1e4a | ||
|
|
f9ce81bfb3 | ||
|
|
4975cfc2e0 | ||
|
|
600c01faf7 | ||
|
|
b07bafa5bd | ||
|
|
85aa8b0985 | ||
|
|
e365cb4010 | ||
|
|
52d33a87a0 | ||
|
|
b4bd95b477 | ||
|
|
1e057025d6 | ||
|
|
aff2bd87e4 | ||
|
|
72e6a945fe | ||
|
|
39f3b2babd | ||
|
|
ff7d49990d | ||
|
|
8ece290087 | ||
|
|
6c61790063 | ||
|
|
26eef92fe7 | ||
|
|
112df59117 | ||
|
|
8724d59d48 | ||
|
|
af08b4f762 | ||
|
|
5012c160c9 | ||
|
|
c1934974aa | ||
|
|
94de37d44f | ||
|
|
3b3aca716d | ||
|
|
d5852ca73d | ||
|
|
c534aa5771 | ||
|
|
60d3d8961a | ||
|
|
3b9893b586 | ||
|
|
daf2c90b6d | ||
|
|
4ac7367733 |
90
.env.example
90
.env.example
@@ -248,3 +248,93 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
|
||||
|
||||
# Enable debug logging
|
||||
# CLAUDE_DEBUG=1
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# WEB SEARCH (OPTIONAL)
|
||||
# =============================================================================
|
||||
# OpenClaude includes a web search tool. By default it uses DuckDuckGo (free)
|
||||
# or the provider's native search (Anthropic firstParty / vertex).
|
||||
#
|
||||
# Set one API key below to enable a provider. That's it.
|
||||
|
||||
# ── Provider API keys — set ONE of these ────────────────────────────
|
||||
|
||||
# Tavily (AI-optimized search, recommended)
|
||||
# TAVILY_API_KEY=tvly-your-key-here
|
||||
|
||||
# Exa (neural/semantic search)
|
||||
# EXA_API_KEY=your-exa-key-here
|
||||
|
||||
# You.com (RAG-ready snippets)
|
||||
# YOU_API_KEY=your-you-key-here
|
||||
|
||||
# Jina (s.jina.ai endpoint)
|
||||
# JINA_API_KEY=your-jina-key-here
|
||||
|
||||
# Bing Web Search
|
||||
# BING_API_KEY=your-bing-key-here
|
||||
|
||||
# Mojeek (privacy-focused)
|
||||
# MOJEEK_API_KEY=your-mojeek-key-here
|
||||
|
||||
# Linkup
|
||||
# LINKUP_API_KEY=your-linkup-key-here
|
||||
|
||||
# Firecrawl (premium, uses @mendable/firecrawl-js)
|
||||
# FIRECRAWL_API_KEY=fc-your-key-here
|
||||
|
||||
# ── Provider selection mode ─────────────────────────────────────────
|
||||
#
|
||||
# WEB_SEARCH_PROVIDER controls fallback behavior:
|
||||
#
|
||||
# "auto" (default) — try all configured providers, fall through on failure
|
||||
# "custom" — custom API only, throw on failure (NOT in auto chain)
|
||||
# "firecrawl" — firecrawl only
|
||||
# "tavily" — tavily only
|
||||
# "exa" — exa only
|
||||
# "you" — you.com only
|
||||
# "jina" — jina only
|
||||
# "bing" — bing only
|
||||
# "mojeek" — mojeek only
|
||||
# "linkup" — linkup only
|
||||
# "ddg" — duckduckgo only
|
||||
# "native" — anthropic native / codex only
|
||||
#
|
||||
# Auto mode priority: firecrawl → tavily → exa → you → jina → bing → mojeek →
|
||||
# linkup → ddg
|
||||
# Note: "custom" is NOT in the auto chain. To use the custom API provider,
|
||||
# you must explicitly set WEB_SEARCH_PROVIDER=custom.
|
||||
#
|
||||
# WEB_SEARCH_PROVIDER=auto
|
||||
|
||||
# ── Built-in custom API presets ─────────────────────────────────────
|
||||
#
|
||||
# Use with WEB_KEY for the API key:
|
||||
# WEB_PROVIDER=searxng|google|brave|serpapi
|
||||
# WEB_KEY=your-api-key-here
|
||||
|
||||
# ── Custom API endpoint (advanced) ──────────────────────────────────
|
||||
#
|
||||
# WEB_SEARCH_API — base URL of your search endpoint
|
||||
# WEB_QUERY_PARAM — query parameter name (default: "q")
|
||||
# WEB_METHOD — GET or POST (default: GET)
|
||||
# WEB_PARAMS — extra static query params as JSON: {"lang":"en","count":"10"}
|
||||
# WEB_URL_TEMPLATE — URL template with {query} for path embedding
|
||||
# WEB_BODY_TEMPLATE — custom POST body with {query} placeholder
|
||||
# WEB_AUTH_HEADER — header name for API key (default: "Authorization")
|
||||
# WEB_AUTH_SCHEME — prefix before key (default: "Bearer")
|
||||
# WEB_HEADERS — extra headers as "Name: value; Name2: value2"
|
||||
# WEB_JSON_PATH — dot-path to results array in response
|
||||
|
||||
# ── Custom API security guardrails ──────────────────────────────────
|
||||
#
|
||||
# The custom provider enforces security guardrails by default.
|
||||
# Override these only if you understand the risks.
|
||||
#
|
||||
# WEB_CUSTOM_TIMEOUT_SEC=15 — request timeout in seconds (default 15)
|
||||
# WEB_CUSTOM_MAX_BODY_KB=300 — max POST body size in KB (default 300)
|
||||
# WEB_CUSTOM_ALLOW_ARBITRARY_HEADERS=false — set "true" to use non-standard headers
|
||||
# WEB_CUSTOM_ALLOW_HTTP=false — set "true" to allow http:// URLs
|
||||
# WEB_CUSTOM_ALLOW_PRIVATE=false — set "true" to target localhost/private IPs
|
||||
# (needed for self-hosted SearXNG)
|
||||
|
||||
13
.github/workflows/pr-checks.yml
vendored
13
.github/workflows/pr-checks.yml
vendored
@@ -29,6 +29,13 @@ jobs:
|
||||
with:
|
||||
bun-version: 1.3.11
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: "pip"
|
||||
cache-dependency-path: python/requirements.txt
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
@@ -38,6 +45,12 @@ jobs:
|
||||
- name: Full unit test suite
|
||||
run: bun test --max-concurrency=1
|
||||
|
||||
- name: Install Python test dependencies
|
||||
run: python -m pip install -r python/requirements.txt
|
||||
|
||||
- name: Python unit tests
|
||||
run: python -m pytest -q python/tests
|
||||
|
||||
- name: Suspicious PR intent scan
|
||||
run: bun run security:pr-scan -- --base ${{ github.event.pull_request.base.sha || 'origin/main' }}
|
||||
- name: Provider tests
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,4 +6,7 @@ dist/
|
||||
!.env.example
|
||||
.openclaude-profile.json
|
||||
reports/
|
||||
GEMINI.md
|
||||
package-lock.json
|
||||
/.claude
|
||||
coverage/
|
||||
|
||||
35
README.md
35
README.md
@@ -185,6 +185,41 @@ With Firecrawl enabled:
|
||||
|
||||
Free tier at [firecrawl.dev](https://firecrawl.dev) includes 500 credits. The key is optional.
|
||||
|
||||
---
|
||||
|
||||
## Headless gRPC Server
|
||||
|
||||
OpenClaude can be run as a headless gRPC service, allowing you to integrate its agentic capabilities (tools, bash, file editing) into other applications, CI/CD pipelines, or custom user interfaces. The server uses bidirectional streaming to send real-time text chunks, tool calls, and request permissions for sensitive commands.
|
||||
|
||||
### 1. Start the gRPC Server
|
||||
|
||||
Start the core engine as a gRPC service on `localhost:50051`:
|
||||
|
||||
```bash
|
||||
npm run dev:grpc
|
||||
```
|
||||
|
||||
#### Configuration
|
||||
|
||||
| Variable | Default | Description |
|
||||
|-----------|-------------|------------------------------------------------|
|
||||
| `GRPC_PORT` | `50051` | Port the gRPC server listens on |
|
||||
| `GRPC_HOST` | `localhost` | Bind address. Use `0.0.0.0` to expose on all interfaces (not recommended without authentication) |
|
||||
|
||||
### 2. Run the Test CLI Client
|
||||
|
||||
We provide a lightweight CLI client that communicates exclusively over gRPC. It acts just like the main interactive CLI, rendering colors, streaming tokens, and prompting you for tool permissions (y/n) via the gRPC `action_required` event.
|
||||
|
||||
In a separate terminal, run:
|
||||
|
||||
```bash
|
||||
npm run dev:grpc:cli
|
||||
```
|
||||
|
||||
*Note: The gRPC definitions are located in `src/proto/openclaude.proto`. You can use this file to generate clients in Python, Go, Rust, or any other language.*
|
||||
|
||||
---
|
||||
|
||||
## Source Build And Local Development
|
||||
|
||||
```bash
|
||||
|
||||
128
bun.lock
128
bun.lock
@@ -13,6 +13,8 @@
|
||||
"@anthropic-ai/vertex-sdk": "0.14.4",
|
||||
"@commander-js/extra-typings": "12.1.0",
|
||||
"@growthbook/growthbook": "1.6.5",
|
||||
"@grpc/grpc-js": "^1.14.3",
|
||||
"@grpc/proto-loader": "^0.8.0",
|
||||
"@mendable/firecrawl-js": "4.18.1",
|
||||
"@modelcontextprotocol/sdk": "1.29.0",
|
||||
"@opentelemetry/api": "1.9.1",
|
||||
@@ -51,7 +53,7 @@
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.18.0",
|
||||
"lodash-es": "4.18.1",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
@@ -84,10 +86,14 @@
|
||||
"@types/bun": "1.3.11",
|
||||
"@types/node": "25.5.0",
|
||||
"@types/react": "19.2.14",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "5.9.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
"overrides": {
|
||||
"lodash-es": "4.18.1",
|
||||
},
|
||||
"packages": {
|
||||
"@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.3.0", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA=="],
|
||||
|
||||
@@ -181,6 +187,58 @@
|
||||
|
||||
"@emnapi/runtime": ["@emnapi/runtime@1.9.2", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw=="],
|
||||
|
||||
"@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.7", "", { "os": "aix", "cpu": "ppc64" }, "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg=="],
|
||||
|
||||
"@esbuild/android-arm": ["@esbuild/android-arm@0.27.7", "", { "os": "android", "cpu": "arm" }, "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ=="],
|
||||
|
||||
"@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.7", "", { "os": "android", "cpu": "arm64" }, "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ=="],
|
||||
|
||||
"@esbuild/android-x64": ["@esbuild/android-x64@0.27.7", "", { "os": "android", "cpu": "x64" }, "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg=="],
|
||||
|
||||
"@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.7", "", { "os": "darwin", "cpu": "arm64" }, "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw=="],
|
||||
|
||||
"@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.7", "", { "os": "darwin", "cpu": "x64" }, "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ=="],
|
||||
|
||||
"@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.7", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w=="],
|
||||
|
||||
"@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.7", "", { "os": "freebsd", "cpu": "x64" }, "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ=="],
|
||||
|
||||
"@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.7", "", { "os": "linux", "cpu": "arm" }, "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA=="],
|
||||
|
||||
"@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A=="],
|
||||
|
||||
"@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.7", "", { "os": "linux", "cpu": "ia32" }, "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg=="],
|
||||
|
||||
"@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q=="],
|
||||
|
||||
"@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw=="],
|
||||
|
||||
"@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.7", "", { "os": "linux", "cpu": "ppc64" }, "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ=="],
|
||||
|
||||
"@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ=="],
|
||||
|
||||
"@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.7", "", { "os": "linux", "cpu": "s390x" }, "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw=="],
|
||||
|
||||
"@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.7", "", { "os": "linux", "cpu": "x64" }, "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA=="],
|
||||
|
||||
"@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.7", "", { "os": "none", "cpu": "arm64" }, "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w=="],
|
||||
|
||||
"@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.7", "", { "os": "none", "cpu": "x64" }, "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw=="],
|
||||
|
||||
"@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.7", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A=="],
|
||||
|
||||
"@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.7", "", { "os": "openbsd", "cpu": "x64" }, "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg=="],
|
||||
|
||||
"@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.7", "", { "os": "none", "cpu": "arm64" }, "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw=="],
|
||||
|
||||
"@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.7", "", { "os": "sunos", "cpu": "x64" }, "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA=="],
|
||||
|
||||
"@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.7", "", { "os": "win32", "cpu": "arm64" }, "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA=="],
|
||||
|
||||
"@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.7", "", { "os": "win32", "cpu": "ia32" }, "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw=="],
|
||||
|
||||
"@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.7", "", { "os": "win32", "cpu": "x64" }, "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg=="],
|
||||
|
||||
"@growthbook/growthbook": ["@growthbook/growthbook@1.6.5", "", { "dependencies": { "dom-mutator": "^0.6.0" } }, "sha512-mUaMsgeUTpRIUOTn33EUXHRK6j7pxBjwqH4WpQyq+pukjd1AIzWlEa6w7i6bInJUcweGgP2beXZmaP6b6UPn7A=="],
|
||||
|
||||
"@grpc/grpc-js": ["@grpc/grpc-js@1.14.3", "", { "dependencies": { "@grpc/proto-loader": "^0.8.0", "@js-sdsl/ordered-map": "^4.4.2" } }, "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA=="],
|
||||
@@ -453,7 +511,7 @@
|
||||
|
||||
"cli-highlight": ["cli-highlight@2.1.11", "", { "dependencies": { "chalk": "^4.0.0", "highlight.js": "^10.7.1", "mz": "^2.4.0", "parse5": "^5.1.1", "parse5-htmlparser2-tree-adapter": "^6.0.0", "yargs": "^16.0.0" }, "bin": { "highlight": "bin/highlight" } }, "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg=="],
|
||||
|
||||
"cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="],
|
||||
"cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
|
||||
|
||||
"code-excerpt": ["code-excerpt@4.0.0", "", { "dependencies": { "convert-to-spaces": "^2.0.1" } }, "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA=="],
|
||||
|
||||
@@ -521,6 +579,8 @@
|
||||
|
||||
"es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="],
|
||||
|
||||
"esbuild": ["esbuild@0.27.7", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.7", "@esbuild/android-arm": "0.27.7", "@esbuild/android-arm64": "0.27.7", "@esbuild/android-x64": "0.27.7", "@esbuild/darwin-arm64": "0.27.7", "@esbuild/darwin-x64": "0.27.7", "@esbuild/freebsd-arm64": "0.27.7", "@esbuild/freebsd-x64": "0.27.7", "@esbuild/linux-arm": "0.27.7", "@esbuild/linux-arm64": "0.27.7", "@esbuild/linux-ia32": "0.27.7", "@esbuild/linux-loong64": "0.27.7", "@esbuild/linux-mips64el": "0.27.7", "@esbuild/linux-ppc64": "0.27.7", "@esbuild/linux-riscv64": "0.27.7", "@esbuild/linux-s390x": "0.27.7", "@esbuild/linux-x64": "0.27.7", "@esbuild/netbsd-arm64": "0.27.7", "@esbuild/netbsd-x64": "0.27.7", "@esbuild/openbsd-arm64": "0.27.7", "@esbuild/openbsd-x64": "0.27.7", "@esbuild/openharmony-arm64": "0.27.7", "@esbuild/sunos-x64": "0.27.7", "@esbuild/win32-arm64": "0.27.7", "@esbuild/win32-ia32": "0.27.7", "@esbuild/win32-x64": "0.27.7" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w=="],
|
||||
|
||||
"escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="],
|
||||
|
||||
"escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="],
|
||||
@@ -567,6 +627,8 @@
|
||||
|
||||
"fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="],
|
||||
|
||||
"fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
|
||||
|
||||
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
|
||||
|
||||
"fuse.js": ["fuse.js@7.1.0", "", {}, "sha512-trLf4SzuuUxfusZADLINj+dE8clK1frKdmqiJNb1Es75fmI5oY6X2mxLVUciLLjxqw/xr72Dhy+lER6dGd02FQ=="],
|
||||
@@ -585,6 +647,8 @@
|
||||
|
||||
"get-stream": ["get-stream@9.0.1", "", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="],
|
||||
|
||||
"get-tsconfig": ["get-tsconfig@4.13.7", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q=="],
|
||||
|
||||
"google-auth-library": ["google-auth-library@9.15.1", "", { "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", "gaxios": "^6.1.1", "gcp-metadata": "^6.1.0", "gtoken": "^7.0.0", "jws": "^4.0.0" } }, "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng=="],
|
||||
|
||||
"google-logging-utils": ["google-logging-utils@0.0.2", "", {}, "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ=="],
|
||||
@@ -657,7 +721,7 @@
|
||||
|
||||
"locate-path": ["locate-path@5.0.0", "", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="],
|
||||
|
||||
"lodash-es": ["lodash-es@4.18.0", "", {}, "sha512-koAgswPPA+UTaPN64Etp+PGP+WT6oqOS2NMi5yDkMaiGw9qY4VxQbQF0mtKMyr4BlTznWyzePV5UpECTJQmSUA=="],
|
||||
"lodash-es": ["lodash-es@4.18.1", "", {}, "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A=="],
|
||||
|
||||
"lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="],
|
||||
|
||||
@@ -761,6 +825,8 @@
|
||||
|
||||
"require-main-filename": ["require-main-filename@2.0.0", "", {}, "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="],
|
||||
|
||||
"resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="],
|
||||
|
||||
"retry": ["retry@0.12.0", "", {}, "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow=="],
|
||||
|
||||
"router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="],
|
||||
@@ -831,6 +897,8 @@
|
||||
|
||||
"tslib": ["tslib@1.14.1", "", {}, "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="],
|
||||
|
||||
"tsx": ["tsx@4.21.0", "", { "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "bin": { "tsx": "dist/cli.mjs" } }, "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw=="],
|
||||
|
||||
"turndown": ["turndown@7.2.2", "", { "dependencies": { "@mixmark-io/domino": "^2.2.0" } }, "sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ=="],
|
||||
|
||||
"type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="],
|
||||
@@ -881,9 +949,9 @@
|
||||
|
||||
"yaml": ["yaml@2.8.3", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg=="],
|
||||
|
||||
"yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
|
||||
"yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
|
||||
|
||||
"yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="],
|
||||
"yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
|
||||
|
||||
"yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="],
|
||||
|
||||
@@ -891,8 +959,6 @@
|
||||
|
||||
"zod-to-json-schema": ["zod-to-json-schema@3.25.2", "", { "peerDependencies": { "zod": "^3.25.28 || ^4" } }, "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA=="],
|
||||
|
||||
"@anthropic-ai/sandbox-runtime/lodash-es": ["lodash-es@4.17.23", "", {}, "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="],
|
||||
|
||||
"@aws-crypto/crc32/@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="],
|
||||
|
||||
"@aws-crypto/crc32/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
@@ -1085,8 +1151,6 @@
|
||||
|
||||
"@emnapi/runtime/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"@grpc/proto-loader/yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core": ["@opentelemetry/core@1.30.1", "", { "dependencies": { "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-exporter-base": ["@opentelemetry/otlp-exporter-base@0.57.2", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/otlp-transformer": "0.57.2" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-XdxEzL23Urhidyebg5E6jZoaiW5ygP/mRjxLHixogbqwDy2Faduzb5N0o/Oi+XTIJu+iyxXdVORjXax+Qgfxag=="],
|
||||
@@ -1305,6 +1369,8 @@
|
||||
|
||||
"cli-highlight/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
|
||||
|
||||
"cli-highlight/yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
|
||||
|
||||
"cliui/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
@@ -1359,12 +1425,6 @@
|
||||
|
||||
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from": ["@smithy/util-buffer-from@4.2.2", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-transformer/@opentelemetry/api-logs": ["@opentelemetry/api-logs@0.57.2", "", { "dependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A=="],
|
||||
@@ -1431,6 +1491,12 @@
|
||||
|
||||
"cli-highlight/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"cli-highlight/yargs/cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="],
|
||||
|
||||
"cli-highlight/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"cli-highlight/yargs/yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="],
|
||||
|
||||
"cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"cliui/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
@@ -1471,16 +1537,6 @@
|
||||
|
||||
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@4.2.2", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/resources/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
|
||||
"@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
@@ -1501,6 +1557,16 @@
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder": ["@smithy/querystring-builder@2.2.0", "", { "dependencies": { "@smithy/types": "^2.12.0", "@smithy/util-uri-escape": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-L1kSeviUWL+emq3CUVSgdogoM/D9QMFaqxL/dd0X7PCNWmPXqt+ExtrBjqT0V7HLN03Vs9SuiLrG3zy3JGnE5A=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"qrcode/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"qrcode/yargs/cliui/wrap-ansi": ["wrap-ansi@6.2.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="],
|
||||
@@ -1513,16 +1579,16 @@
|
||||
|
||||
"yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/fetch-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="],
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"qrcode/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"qrcode/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
@@ -137,10 +137,9 @@ export OPENAI_MODEL=llama-3.3-70b-versatile
|
||||
### Mistral
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=...
|
||||
export OPENAI_BASE_URL=https://api.mistral.ai/v1
|
||||
export OPENAI_MODEL=mistral-large-latest
|
||||
export CLAUDE_CODE_USE_MISTRAL=1
|
||||
export MISTRAL_API_KEY=...
|
||||
export MISTRAL_MODEL=mistral-large-latest
|
||||
```
|
||||
|
||||
### Azure OpenAI
|
||||
|
||||
144
docs/litellm-setup.md
Normal file
144
docs/litellm-setup.md
Normal file
@@ -0,0 +1,144 @@
|
||||
# LiteLLM Setup
|
||||
|
||||
OpenClaude can connect to LiteLLM through LiteLLM's OpenAI-compatible proxy.
|
||||
|
||||
## Overview
|
||||
|
||||
LiteLLM is an open-source LLM gateway that provides a unified API to 100+ model providers. By running the LiteLLM Proxy, you can route OpenClaude requests through LiteLLM to access any of its supported providers — all while using OpenClaude's existing OpenAI-compatible provider path.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- LiteLLM installed (`pip install litellm[proxy]`)
|
||||
- A `litellm_config.yaml` or equivalent LiteLLM configuration
|
||||
- LiteLLM Proxy running on a local or remote port
|
||||
|
||||
## 1. Start the LiteLLM Proxy
|
||||
|
||||
### Basic installation
|
||||
|
||||
```bash
|
||||
pip install litellm[proxy]
|
||||
```
|
||||
|
||||
### Configure LiteLLM
|
||||
|
||||
Create a `litellm_config.yaml` with your desired model aliases:
|
||||
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gpt-4o
|
||||
litellm_params:
|
||||
model: openai/gpt-4o
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
|
||||
- model_name: claude-sonnet-4
|
||||
litellm_params:
|
||||
model: anthropic/claude-sonnet-4-5-20250929
|
||||
api_key: os.environ/ANTHROPIC_API_KEY
|
||||
|
||||
- model_name: gemini-2.5-flash
|
||||
litellm_params:
|
||||
model: gemini/gemini-2.5-flash
|
||||
api_key: os.environ/GEMINI_API_KEY
|
||||
|
||||
- model_name: llama-3.3-70b
|
||||
litellm_params:
|
||||
model: together_ai/meta-llama/Llama-3.3-70B-Instruct-Turbo
|
||||
api_key: os.environ/TOGETHER_API_KEY
|
||||
```
|
||||
|
||||
### Run the proxy
|
||||
|
||||
```bash
|
||||
litellm --config litellm_config.yaml --port 4000
|
||||
```
|
||||
|
||||
The proxy will start at `http://localhost:4000` by default.
|
||||
|
||||
## 2. Point OpenClaude to LiteLLM
|
||||
|
||||
### Option A: Environment Variables
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:4000
|
||||
export OPENAI_API_KEY=<your-master-key-or-placeholder>
|
||||
export OPENAI_MODEL=<your-litellm-model-alias>
|
||||
openclaude
|
||||
```
|
||||
|
||||
Replace `<your-litellm-model-alias>` with a model name from your `litellm_config.yaml` (e.g., `gpt-4o`, `claude-sonnet-4`, `gemini-2.5-flash`).
|
||||
|
||||
### Option B: Using /provider
|
||||
|
||||
1. Run `openclaude`
|
||||
2. Type `/provider` to open the provider setup flow
|
||||
3. Choose the **OpenAI-compatible** option
|
||||
4. When prompted for the API key, enter the key required by your LiteLLM proxy
|
||||
If your local LiteLLM setup does not enforce auth, you may still need to enter a placeholder value
|
||||
- 5. When prompted for the base URL, enter `http://localhost:4000`
|
||||
6. 6. When prompted for the model, enter the LiteLLM model name or alias you configured
|
||||
7. 7. Save the provider configuration
|
||||
|
||||
## 3. Example LiteLLM Configs
|
||||
|
||||
### Multi-provider routing with spend tracking
|
||||
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gpt-4o
|
||||
litellm_params:
|
||||
model: openai/gpt-4o
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
|
||||
- model_name: claude-sonnet-4
|
||||
litellm_params:
|
||||
model: anthropic/claude-sonnet-4-5-20250929
|
||||
api_key: os.environ/ANTHROPIC_API_KEY
|
||||
|
||||
- model_name: deepseek-chat
|
||||
litellm_params:
|
||||
model: deepseek/deepseek-chat
|
||||
api_key: os.environ/DEEPSEEK_API_KEY
|
||||
|
||||
litellm_settings:
|
||||
set_verbose: false
|
||||
num_retries: 3
|
||||
```
|
||||
|
||||
### With a master key for auth
|
||||
|
||||
```bash
|
||||
# Start proxy with a master key
|
||||
litellm --config litellm_config.yaml --port 4000 --master_key sk-my-master-key
|
||||
|
||||
# Connect OpenClaude
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:4000
|
||||
export OPENAI_API_KEY=sk-my-master-key
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
openclaude
|
||||
```
|
||||
|
||||
## 4. Notes
|
||||
|
||||
- `OPENAI_MODEL` must match the **LiteLLM model alias** defined in your config, not the upstream raw provider model name.
|
||||
- If your proxy requires authentication, use the proxy key (or `master_key`) in `OPENAI_API_KEY`.
|
||||
- LiteLLM's OpenAI-compatible endpoint accepts the same request format as OpenAI, so OpenClaude works without any code changes.
|
||||
- You can switch between any provider configured in LiteLLM by simply changing the `OPENAI_MODEL` value — no need to reconfigure OpenClaude.
|
||||
|
||||
## 5. Troubleshooting
|
||||
|
||||
| Issue | Likely Cause | Fix |
|
||||
|-------|--------------|-----|
|
||||
| 404 or Model Not Found | Model alias doesn't exist in LiteLLM config | Verify the `model_name` in `litellm_config.yaml` matches `OPENAI_MODEL` |
|
||||
| Connection Refused | LiteLLM proxy isn't running | Start the proxy with `litellm --config litellm_config.yaml --port 4000` |
|
||||
| Auth Failed | Missing or wrong `master_key` | Set the correct key in `OPENAI_API_KEY` |
|
||||
| Upstream provider error | The backend provider key is missing or invalid | Ensure the upstream API key (e.g., `OPENAI_API_KEY`) is set in your LiteLLM proxy process environment |
|
||||
| Tools fail but chat works | The selected model has weak function/tool calling support | Switch to a model with strong tool support (e.g., GPT-4o, Claude Sonnet) |
|
||||
|
||||
## 6. Resources
|
||||
|
||||
- [LiteLLM Proxy Docs](https://docs.litellm.ai/docs/proxy/quick_start)
|
||||
- [LiteLLM Provider List](https://docs.litellm.ai/docs/providers)
|
||||
- [LiteLLM OpenAI-Compatible Endpoints](https://docs.litellm.ai/docs/proxy/openai_compatible_proxy)
|
||||
12
package.json
12
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@gitlawb/openclaude",
|
||||
"version": "0.1.7",
|
||||
"version": "0.1.8",
|
||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -30,6 +30,8 @@
|
||||
"profile:code": "bun run profile:init -- --provider ollama --model qwen2.5-coder:7b",
|
||||
"dev:fast": "bun run profile:fast && bun run dev:ollama:fast",
|
||||
"dev:code": "bun run profile:code && bun run dev:profile",
|
||||
"dev:grpc": "bun run scripts/start-grpc.ts",
|
||||
"dev:grpc:cli": "bun run scripts/grpc-cli.ts",
|
||||
"start": "node dist/cli.mjs",
|
||||
"test": "bun test",
|
||||
"test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-dir=coverage --max-concurrency=1 && bun run scripts/render-coverage-heatmap.ts",
|
||||
@@ -57,6 +59,8 @@
|
||||
"@anthropic-ai/vertex-sdk": "0.14.4",
|
||||
"@commander-js/extra-typings": "12.1.0",
|
||||
"@growthbook/growthbook": "1.6.5",
|
||||
"@grpc/grpc-js": "^1.14.3",
|
||||
"@grpc/proto-loader": "^0.8.0",
|
||||
"@mendable/firecrawl-js": "4.18.1",
|
||||
"@modelcontextprotocol/sdk": "1.29.0",
|
||||
"@opentelemetry/api": "1.9.1",
|
||||
@@ -95,7 +99,7 @@
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.18.0",
|
||||
"lodash-es": "4.18.1",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
@@ -128,6 +132,7 @@
|
||||
"@types/bun": "1.3.11",
|
||||
"@types/node": "25.5.0",
|
||||
"@types/react": "19.2.14",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "5.9.3"
|
||||
},
|
||||
"engines": {
|
||||
@@ -150,5 +155,8 @@
|
||||
"license": "SEE LICENSE FILE",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"overrides": {
|
||||
"lodash-es": "4.18.1"
|
||||
}
|
||||
}
|
||||
|
||||
3
python/requirements.txt
Normal file
3
python/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
pytest==7.4.4
|
||||
pytest-asyncio==0.23.3
|
||||
httpx==0.25.2
|
||||
@@ -112,6 +112,14 @@ def build_default_providers() -> list[Provider]:
|
||||
big_model=big if "gemini" in big else "gemini-2.5-pro",
|
||||
small_model=small if "gemini" in small else "gemini-2.0-flash",
|
||||
),
|
||||
Provider(
|
||||
name="mistral",
|
||||
ping_url="",
|
||||
api_key_env="MISTRAL_API_KEY",
|
||||
cost_per_1k_tokens=0.0001,
|
||||
big_model=big if "mistral" in big else "devstral-latest",
|
||||
small_model=small if "small" in small else "ministral-3b-latest",
|
||||
),
|
||||
Provider(
|
||||
name="ollama",
|
||||
ping_url=f"{ollama_url}/api/tags",
|
||||
|
||||
121
scripts/grpc-cli.ts
Normal file
121
scripts/grpc-cli.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
import * as grpc from '@grpc/grpc-js'
|
||||
import * as protoLoader from '@grpc/proto-loader'
|
||||
import path from 'path'
|
||||
import * as readline from 'readline'
|
||||
|
||||
const PROTO_PATH = path.resolve(import.meta.dirname, '../src/proto/openclaude.proto')
|
||||
|
||||
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
|
||||
keepCase: true,
|
||||
longs: String,
|
||||
enums: String,
|
||||
defaults: true,
|
||||
oneofs: true,
|
||||
})
|
||||
|
||||
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition) as any
|
||||
const openclaudeProto = protoDescriptor.openclaude.v1
|
||||
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout
|
||||
})
|
||||
|
||||
function askQuestion(query: string): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
rl.question(query, resolve)
|
||||
})
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const host = process.env.GRPC_HOST || 'localhost'
|
||||
const port = process.env.GRPC_PORT || '50051'
|
||||
const client = new openclaudeProto.AgentService(
|
||||
`${host}:${port}`,
|
||||
grpc.credentials.createInsecure()
|
||||
)
|
||||
|
||||
let call: grpc.ClientDuplexStream<any, any> | null = null
|
||||
|
||||
const startStream = () => {
|
||||
call = client.Chat()
|
||||
let textStreamed = false
|
||||
|
||||
call.on('data', async (serverMessage: any) => {
|
||||
if (serverMessage.text_chunk) {
|
||||
process.stdout.write(serverMessage.text_chunk.text)
|
||||
textStreamed = true
|
||||
} else if (serverMessage.tool_start) {
|
||||
console.log(`\n\x1b[36m[Tool Call]\x1b[0m \x1b[1m${serverMessage.tool_start.tool_name}\x1b[0m`)
|
||||
console.log(`\x1b[90m${serverMessage.tool_start.arguments_json}\x1b[0m\n`)
|
||||
} else if (serverMessage.tool_result) {
|
||||
console.log(`\n\x1b[32m[Tool Result]\x1b[0m \x1b[1m${serverMessage.tool_result.tool_name}\x1b[0m`)
|
||||
const out = serverMessage.tool_result.output
|
||||
if (out.length > 500) {
|
||||
console.log(`\x1b[90m${out.substring(0, 500)}...\n(Output truncated, total length: ${out.length})\x1b[0m`)
|
||||
} else {
|
||||
console.log(`\x1b[90m${out}\x1b[0m`)
|
||||
}
|
||||
} else if (serverMessage.action_required) {
|
||||
const action = serverMessage.action_required
|
||||
console.log(`\n\x1b[33m[Action Required]\x1b[0m`)
|
||||
const reply = await askQuestion(`\x1b[1m${action.question}\x1b[0m (y/n) > `)
|
||||
|
||||
call?.write({
|
||||
input: {
|
||||
prompt_id: action.prompt_id,
|
||||
reply: reply.trim()
|
||||
}
|
||||
})
|
||||
} else if (serverMessage.done) {
|
||||
if (!textStreamed && serverMessage.done.full_text) {
|
||||
process.stdout.write(serverMessage.done.full_text)
|
||||
}
|
||||
textStreamed = false
|
||||
console.log('\n\x1b[32m[Generation Complete]\x1b[0m')
|
||||
promptUser()
|
||||
} else if (serverMessage.error) {
|
||||
console.error(`\n\x1b[31m[Server Error]\x1b[0m ${serverMessage.error.message}`)
|
||||
promptUser()
|
||||
}
|
||||
})
|
||||
|
||||
call.on('end', () => {
|
||||
console.log('\n\x1b[90m[Stream closed by server]\x1b[0m')
|
||||
// Don't prompt user here, let 'done' or 'error' handlers do it
|
||||
})
|
||||
|
||||
call.on('error', (err: Error) => {
|
||||
console.error('\n\x1b[31m[Stream Error]\x1b[0m', err.message)
|
||||
promptUser()
|
||||
})
|
||||
}
|
||||
|
||||
const promptUser = async () => {
|
||||
const message = await askQuestion('\n\x1b[35m> \x1b[0m')
|
||||
|
||||
if (message.trim().toLowerCase() === '/exit' || message.trim().toLowerCase() === '/quit') {
|
||||
console.log('Bye!')
|
||||
rl.close()
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
if (!call || call.destroyed) {
|
||||
startStream()
|
||||
}
|
||||
|
||||
call!.write({
|
||||
request: {
|
||||
session_id: 'cli-session-1',
|
||||
message: message,
|
||||
working_directory: process.cwd()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
console.log('\x1b[32mOpenClaude gRPC CLI\x1b[0m')
|
||||
console.log('\x1b[90mType /exit to quit.\x1b[0m')
|
||||
promptUser()
|
||||
}
|
||||
|
||||
main()
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
buildAtomicChatProfileEnv,
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildMistralProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
createProfileFile,
|
||||
@@ -37,7 +38,7 @@ function parseArg(name: string): string | null {
|
||||
|
||||
function parseProviderArg(): ProviderProfile | 'auto' {
|
||||
const p = parseArg('--provider')?.toLowerCase()
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'mistral' || p === 'atomic-chat') return p
|
||||
return 'auto'
|
||||
}
|
||||
|
||||
@@ -90,6 +91,21 @@ async function main(): Promise<void> {
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = builtEnv
|
||||
} else if (selected === 'mistral') {
|
||||
const builtEnv = buildMistralProfileEnv({
|
||||
model: argModel || null,
|
||||
baseUrl: argBaseUrl || null,
|
||||
apiKey: argApiKey || null,
|
||||
processEnv: process.env,
|
||||
})
|
||||
|
||||
if (!builtEnv) {
|
||||
console.error('Mistral profile requires an API key. Use --api-key or set MISTRAL_API_KEY.')
|
||||
console.error('Get a free key at: https://admin.mistral.ai/organization/api-keys')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = builtEnv
|
||||
} else if (selected === 'ollama') {
|
||||
resolvedOllamaModel ??= await resolveOllamaModel(argModel, argBaseUrl, goal)
|
||||
@@ -169,7 +185,7 @@ async function main(): Promise<void> {
|
||||
|
||||
console.log(`Saved profile: ${selected}`)
|
||||
console.log(`Goal: ${goal}`)
|
||||
console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
|
||||
console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.MISTRAL_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
|
||||
console.log(`Path: ${outputPath}`)
|
||||
console.log('Next: bun run dev:profile')
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
|
||||
continue
|
||||
}
|
||||
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower ==='mistral' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
requestedProfile = lower as ProviderProfile | 'auto'
|
||||
continue
|
||||
}
|
||||
@@ -124,6 +124,8 @@ function printSummary(profile: ProviderProfile): void {
|
||||
console.log(`Launching profile: ${profile}`)
|
||||
if (profile === 'gemini') {
|
||||
console.log('Using configured Gemini provider settings.')
|
||||
} else if (profile === 'mistral') {
|
||||
console.log('Using configured Mistral provider settings.')
|
||||
} else if (profile === 'codex') {
|
||||
console.log('Using configured Codex/OpenAI-compatible provider settings.')
|
||||
} else if (profile === 'atomic-chat') {
|
||||
@@ -139,7 +141,7 @@ async function main(): Promise<void> {
|
||||
const options = parseLaunchOptions(process.argv.slice(2))
|
||||
const requestedProfile = options.requestedProfile
|
||||
if (!requestedProfile) {
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|mistral|atomic-chat|mistral|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
@@ -205,6 +207,11 @@ async function main(): Promise<void> {
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (profile === 'mistral' && !env.MISTRAL_API_KEY) {
|
||||
console.error('MISTRAL_API_KEY is required for mistral profile. Run: bun run profile:init -- --provider mistral --api-key <key>')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (profile === 'openai' && (!env.OPENAI_API_KEY || env.OPENAI_API_KEY === 'SUA_CHAVE')) {
|
||||
console.error('OPENAI_API_KEY is required for openai profile and cannot be SUA_CHAVE. Run: bun run profile:init -- --provider openai --api-key <key>')
|
||||
process.exit(1)
|
||||
|
||||
50
scripts/start-grpc.ts
Normal file
50
scripts/start-grpc.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import { GrpcServer } from '../src/grpc/server.ts'
|
||||
import { init } from '../src/entrypoints/init.ts'
|
||||
|
||||
// Polyfill MACRO which is normally injected by the bundler
|
||||
Object.assign(globalThis, {
|
||||
MACRO: {
|
||||
VERSION: '0.1.7',
|
||||
DISPLAY_VERSION: '0.1.7',
|
||||
PACKAGE_URL: '@gitlawb/openclaude',
|
||||
}
|
||||
})
|
||||
|
||||
async function main() {
|
||||
console.log('Starting OpenClaude gRPC Server...')
|
||||
await init()
|
||||
|
||||
// Mirror CLI bootstrap: hydrate secure tokens and resolve provider profile
|
||||
const { enableConfigs } = await import('../src/utils/config.js')
|
||||
enableConfigs()
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../src/utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGeminiAccessTokenFromSecureStorage } = await import('../src/utils/geminiCredentials.js')
|
||||
hydrateGeminiAccessTokenFromSecureStorage()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../src/utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
|
||||
const { buildStartupEnvFromProfile, applyProfileEnvToProcessEnv } = await import('../src/utils/providerProfile.js')
|
||||
const { getProviderValidationError, validateProviderEnvOrExit } = await import('../src/utils/providerValidation.js')
|
||||
const startupEnv = await buildStartupEnvFromProfile({ processEnv: process.env })
|
||||
if (startupEnv !== process.env) {
|
||||
const startupProfileError = await getProviderValidationError(startupEnv)
|
||||
if (startupProfileError) {
|
||||
console.warn(`Warning: ignoring saved provider profile. ${startupProfileError}`)
|
||||
} else {
|
||||
applyProfileEnvToProcessEnv(process.env, startupEnv)
|
||||
}
|
||||
}
|
||||
await validateProviderEnvOrExit()
|
||||
|
||||
const port = process.env.GRPC_PORT ? parseInt(process.env.GRPC_PORT, 10) : 50051
|
||||
const host = process.env.GRPC_HOST || 'localhost'
|
||||
const server = new GrpcServer()
|
||||
|
||||
server.start(port, host)
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error('Fatal error starting gRPC server:', err)
|
||||
process.exit(1)
|
||||
})
|
||||
@@ -118,14 +118,18 @@ function isLocalBaseUrl(baseUrl: string): boolean {
|
||||
}
|
||||
|
||||
const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
|
||||
const MISTRAL_DEFAULT_BASE_URL = 'https://api.mistral.ai/v1'
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
|
||||
function currentBaseUrl(): string {
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return process.env.MISTRAL_BASE_URL ?? MISTRAL_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
}
|
||||
return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
|
||||
}
|
||||
@@ -155,9 +159,34 @@ function checkGeminiEnv(): CheckResult[] {
|
||||
return results
|
||||
}
|
||||
|
||||
function checkMistralEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const model = process.env.MISTRAL_MODEL
|
||||
const key = process.env.MISTRAL_API_KEY
|
||||
const baseUrl = process.env.MISTRAL_BASE_URL ?? MISTRAL_DEFAULT_BASE_URL
|
||||
|
||||
results.push(pass('Provider mode', 'Mistral provider enabled.'))
|
||||
|
||||
if (!model) {
|
||||
results.push(pass('MISTRAL_MODEL', 'Not set. Default will be used at runtime.'))
|
||||
} else {
|
||||
results.push(pass('MISTRAL_MODEL', model))
|
||||
}
|
||||
|
||||
results.push(pass('MISTRAL_BASE_URL', baseUrl))
|
||||
|
||||
if (!key) {
|
||||
results.push(fail('MISTRAL_API_KEY', 'Missing. Set MISTRAL_API_KEY.'))
|
||||
} else {
|
||||
results.push(pass('MISTRAL_API_KEY', 'Configured.'))
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
function checkGithubEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
results.push(pass('Provider mode', 'GitHub Models provider enabled.'))
|
||||
|
||||
const token = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
|
||||
@@ -186,12 +215,17 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
|
||||
if (useGemini) {
|
||||
return checkGeminiEnv()
|
||||
}
|
||||
|
||||
if (useMistral) {
|
||||
return checkMistralEnv()
|
||||
}
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
return checkGithubEnv()
|
||||
}
|
||||
@@ -268,8 +302,9 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
|
||||
if (!useGemini && !useOpenAI && !useGithub) {
|
||||
if (!useGemini && !useOpenAI && !useGithub && !useMistral) {
|
||||
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
@@ -326,6 +361,8 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
})
|
||||
} else if (useGemini && (process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY)) {
|
||||
headers.Authorization = `Bearer ${process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY}`
|
||||
} else if (useMistral && process.env.MISTRAL_API_KEY) {
|
||||
headers.Authorization = `Bearer ${process.env.MISTRAL_API_KEY}`
|
||||
} else if (process.env.OPENAI_API_KEY) {
|
||||
headers.Authorization = `Bearer ${process.env.OPENAI_API_KEY}`
|
||||
}
|
||||
@@ -373,7 +410,8 @@ function checkOllamaProcessorMode(): CheckResult {
|
||||
if (
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
) {
|
||||
return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
@@ -425,6 +463,14 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
|
||||
GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY),
|
||||
}
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return {
|
||||
CLAUDE_CODE_USE_MISTRAL: true,
|
||||
MISTRAL_MODEL: process.env.MISTRAL_MODEL ?? '(unset, default: devstral-latest)',
|
||||
MISTRAL_BASE_URL: process.env.MISTRAL_BASE_URL ?? 'https://api.mistral.ai/v1',
|
||||
MISTRAL_API_KEY_SET: Boolean(process.env.MISTRAL_API_KEY),
|
||||
}
|
||||
}
|
||||
if (
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
@@ -435,7 +481,7 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
|
||||
process.env.OPENAI_MODEL ??
|
||||
'(unset, default: github:copilot → openai/gpt-4.1)',
|
||||
OPENAI_BASE_URL:
|
||||
process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE,
|
||||
process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE,
|
||||
GITHUB_TOKEN_SET: Boolean(
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN,
|
||||
),
|
||||
|
||||
@@ -400,12 +400,12 @@ export async function update() {
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
` cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write('Try running with sudo or fix npm permissions\n')
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: claude install\n',
|
||||
'Or consider using native installation with: openclaude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
@@ -415,11 +415,11 @@ export async function update() {
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
` cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: claude install\n',
|
||||
'Or consider using native installation with: openclaude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
|
||||
@@ -32,6 +32,7 @@ import logout from './commands/logout/index.js'
|
||||
import installGitHubApp from './commands/install-github-app/index.js'
|
||||
import installSlackApp from './commands/install-slack-app/index.js'
|
||||
import breakCache from './commands/break-cache/index.js'
|
||||
import cacheProbe from './commands/cache-probe/index.js'
|
||||
import mcp from './commands/mcp/index.js'
|
||||
import mobile from './commands/mobile/index.js'
|
||||
import onboarding from './commands/onboarding/index.js'
|
||||
@@ -136,6 +137,7 @@ import hooks from './commands/hooks/index.js'
|
||||
import files from './commands/files/index.js'
|
||||
import branch from './commands/branch/index.js'
|
||||
import agents from './commands/agents/index.js'
|
||||
import autoFix from './commands/auto-fix.js'
|
||||
import plugin from './commands/plugin/index.js'
|
||||
import reloadPlugins from './commands/reload-plugins/index.js'
|
||||
import rewind from './commands/rewind/index.js'
|
||||
@@ -143,6 +145,7 @@ import heapDump from './commands/heapdump/index.js'
|
||||
import mockLimits from './commands/mock-limits/index.js'
|
||||
import bridgeKick from './commands/bridge-kick.js'
|
||||
import version from './commands/version.js'
|
||||
import wiki from './commands/wiki/index.js'
|
||||
import summary from './commands/summary/index.js'
|
||||
import {
|
||||
resetLimits,
|
||||
@@ -263,8 +266,10 @@ const COMMANDS = memoize((): Command[] => [
|
||||
addDir,
|
||||
advisor,
|
||||
agents,
|
||||
autoFix,
|
||||
branch,
|
||||
btw,
|
||||
cacheProbe,
|
||||
chrome,
|
||||
clear,
|
||||
color,
|
||||
@@ -324,6 +329,7 @@ const COMMANDS = memoize((): Command[] => [
|
||||
usage,
|
||||
usageReport,
|
||||
vim,
|
||||
wiki,
|
||||
...(webCmd ? [webCmd] : []),
|
||||
...(forkCmd ? [forkCmd] : []),
|
||||
...(buddy ? [buddy] : []),
|
||||
|
||||
25
src/commands/auto-fix.ts
Normal file
25
src/commands/auto-fix.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import type { Command } from '../types/command.js'
|
||||
|
||||
const command: Command = {
|
||||
name: 'auto-fix',
|
||||
description: 'Configure auto-fix: run lint/test after AI edits',
|
||||
isEnabled: () => true,
|
||||
type: 'prompt',
|
||||
progressMessage: 'Configuring auto-fix...',
|
||||
contentLength: 0,
|
||||
source: 'builtin',
|
||||
async getPromptForCommand() {
|
||||
return [
|
||||
{
|
||||
type: 'text',
|
||||
text:
|
||||
'The user wants to configure auto-fix settings. Auto-fix automatically runs lint and test commands after AI file edits, feeding errors back for self-repair.\n\n' +
|
||||
'Current settings location: `.claude/settings.json` or `.claude/settings.local.json`\n\n' +
|
||||
'Example configuration:\n```json\n{\n "autoFix": {\n "enabled": true,\n "lint": "eslint . --fix",\n "test": "bun test",\n "maxRetries": 3,\n "timeout": 30000\n }\n}\n```\n\n' +
|
||||
'Ask the user what lint and test commands they use, then help them set up the configuration.',
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
export default command
|
||||
413
src/commands/cache-probe/cache-probe.ts
Normal file
413
src/commands/cache-probe/cache-probe.ts
Normal file
@@ -0,0 +1,413 @@
|
||||
import { getSessionId } from '../../bootstrap/state.js'
|
||||
import { resolveProviderRequest } from '../../services/api/providerConfig.js'
|
||||
import type { LocalCommandCall } from '../../types/command.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import { getMainLoopModel } from '../../utils/model/model.js'
|
||||
|
||||
const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
// Large system prompt (~6000 chars, ~1500 tokens) to cross the 1024-token cache threshold
|
||||
const SYSTEM_PROMPT = [
|
||||
'You are a coding assistant. Answer concisely.',
|
||||
'CONTEXT: User is working on a TypeScript project with Bun runtime.',
|
||||
...Array.from(
|
||||
{ length: 80 },
|
||||
(_, i) =>
|
||||
`Rule ${i + 1}: Follow best practices for TypeScript including strict typing, error handling, testing, and clean code. Prefer explicit types over any. Use const assertions. Await all async operations.`,
|
||||
),
|
||||
].join('\n\n')
|
||||
|
||||
const USER_MESSAGE = 'Say "hello" and nothing else.'
|
||||
const DELAY_MS = 3000
|
||||
|
||||
/**
|
||||
* Extract model family from a versioned model string.
|
||||
* e.g. "gpt-5.4-0626" → "gpt-5.4", "codex-mini-latest" → "codex-mini"
|
||||
*/
|
||||
function getModelFamily(model: string | undefined): string {
|
||||
if (!model) return 'unknown'
|
||||
return model
|
||||
.replace(/-\d{4,}$/, '')
|
||||
.replace(/-latest$/, '')
|
||||
.replace(/-preview$/, '')
|
||||
}
|
||||
|
||||
function getField(obj: unknown, path: string): unknown {
|
||||
return path
|
||||
.split('.')
|
||||
.reduce((o: any, k: string) => (o != null ? o[k] : undefined), obj)
|
||||
}
|
||||
|
||||
interface ProbeResult {
|
||||
label: string
|
||||
status: number
|
||||
elapsed: number
|
||||
headers: Record<string, string>
|
||||
usage: Record<string, unknown> | null
|
||||
responseId: string | null
|
||||
error: string | null
|
||||
}
|
||||
|
||||
async function sendProbe(
|
||||
url: string,
|
||||
headers: Record<string, string>,
|
||||
body: Record<string, unknown>,
|
||||
label: string,
|
||||
): Promise<ProbeResult> {
|
||||
const start = Date.now()
|
||||
let response: Response
|
||||
try {
|
||||
response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
} catch (err: any) {
|
||||
return {
|
||||
label,
|
||||
status: 0,
|
||||
elapsed: Date.now() - start,
|
||||
headers: {},
|
||||
usage: null,
|
||||
responseId: null,
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
const elapsed = Date.now() - start
|
||||
|
||||
const respHeaders: Record<string, string> = {}
|
||||
response.headers.forEach((value, key) => {
|
||||
respHeaders[key] = value
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorBody = await response.text().catch(() => '')
|
||||
return {
|
||||
label,
|
||||
status: response.status,
|
||||
elapsed,
|
||||
headers: respHeaders,
|
||||
usage: null,
|
||||
responseId: null,
|
||||
error: errorBody,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse SSE stream for usage data
|
||||
const text = await response.text()
|
||||
let usage: Record<string, unknown> | null = null
|
||||
let responseId: string | null = null
|
||||
|
||||
const isResponses = url.endsWith('/responses')
|
||||
for (const chunk of text.split('\n\n')) {
|
||||
const lines = chunk
|
||||
.split('\n')
|
||||
.map((l) => l.trim())
|
||||
.filter(Boolean)
|
||||
|
||||
if (isResponses) {
|
||||
const eventLine = lines.find((l) => l.startsWith('event: '))
|
||||
const dataLines = lines.filter((l) => l.startsWith('data: '))
|
||||
if (!eventLine || !dataLines.length) continue
|
||||
const event = eventLine.slice(7).trim()
|
||||
if (
|
||||
event === 'response.completed' ||
|
||||
event === 'response.incomplete'
|
||||
) {
|
||||
try {
|
||||
const data = JSON.parse(
|
||||
dataLines.map((l) => l.slice(6)).join('\n'),
|
||||
)
|
||||
usage = (data?.response?.usage as Record<string, unknown>) ?? null
|
||||
responseId = (data?.response?.id as string) ?? null
|
||||
} catch {}
|
||||
}
|
||||
} else {
|
||||
for (const line of lines) {
|
||||
if (!line.startsWith('data: ')) continue
|
||||
const raw = line.slice(6).trim()
|
||||
if (raw === '[DONE]') continue
|
||||
try {
|
||||
const data = JSON.parse(raw) as Record<string, unknown>
|
||||
if (data.usage) {
|
||||
usage = data.usage as Record<string, unknown>
|
||||
responseId = (data.id as string) ?? null
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { label, status: response.status, elapsed, headers: respHeaders, usage, responseId, error: null }
|
||||
}
|
||||
|
||||
function formatResult(r: ProbeResult): string {
|
||||
const lines: string[] = [`--- ${r.label} ---`]
|
||||
if (r.error) {
|
||||
lines.push(` ERROR (HTTP ${r.status}): ${r.error.slice(0, 200)}`)
|
||||
return lines.join('\n')
|
||||
}
|
||||
lines.push(` HTTP ${r.status} — ${r.elapsed}ms`)
|
||||
if (r.responseId) lines.push(` response.id: ${r.responseId}`)
|
||||
|
||||
if (r.usage) {
|
||||
lines.push(' Usage:')
|
||||
lines.push(` ${JSON.stringify(r.usage, null, 2).replace(/\n/g, '\n ')}`)
|
||||
} else {
|
||||
lines.push(' Usage: null')
|
||||
}
|
||||
|
||||
// Interesting headers
|
||||
for (const h of [
|
||||
'openai-processing-ms',
|
||||
'x-ratelimit-remaining',
|
||||
'x-ratelimit-limit',
|
||||
'x-ms-region',
|
||||
'x-github-request-id',
|
||||
'x-request-id',
|
||||
]) {
|
||||
if (r.headers[h]) lines.push(` ${h}: ${r.headers[h]}`)
|
||||
}
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
export const call: LocalCommandCall = async (args) => {
|
||||
const parts = (args ?? '').trim().split(/\s+/).filter(Boolean)
|
||||
const noKey = parts.includes('--no-key')
|
||||
const modelOverride = parts.find((p) => !p.startsWith('--')) || undefined
|
||||
const modelStr = modelOverride ?? getMainLoopModel()
|
||||
const request = resolveProviderRequest({ model: modelStr })
|
||||
const isGithub = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
|
||||
// Resolve API key the same way the OpenAI shim does
|
||||
let apiKey = process.env.OPENAI_API_KEY ?? ''
|
||||
if (!apiKey && isGithub) {
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
apiKey =
|
||||
process.env.OPENAI_API_KEY ??
|
||||
process.env.GITHUB_TOKEN ??
|
||||
process.env.GH_TOKEN ??
|
||||
''
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
return {
|
||||
type: 'text',
|
||||
value:
|
||||
'No API key found. Make sure you are in an active OpenAI-compatible or GitHub Copilot session.\n' +
|
||||
'For GitHub Copilot: run /onboard-github first.\n' +
|
||||
'For OpenAI-compatible: set OPENAI_API_KEY.',
|
||||
}
|
||||
}
|
||||
|
||||
const useResponses = request.transport === 'codex_responses'
|
||||
const endpoint = useResponses ? '/responses' : '/chat/completions'
|
||||
const url = `${request.baseUrl}${endpoint}`
|
||||
const family = getModelFamily(request.resolvedModel)
|
||||
const cacheKey = `${getSessionId()}:${family}`
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
originator: 'openclaude',
|
||||
}
|
||||
if (isGithub) {
|
||||
Object.assign(headers, COPILOT_HEADERS)
|
||||
}
|
||||
|
||||
let body: Record<string, unknown>
|
||||
if (useResponses) {
|
||||
body = {
|
||||
model: request.resolvedModel,
|
||||
instructions: SYSTEM_PROMPT,
|
||||
input: [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'user',
|
||||
content: [{ type: 'input_text', text: USER_MESSAGE }],
|
||||
},
|
||||
],
|
||||
stream: true,
|
||||
...(noKey ? {} : {
|
||||
store: false,
|
||||
prompt_cache_key: cacheKey,
|
||||
prompt_cache_retention: '24h',
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
body = {
|
||||
model: request.resolvedModel,
|
||||
messages: [
|
||||
{ role: 'system', content: SYSTEM_PROMPT },
|
||||
{ role: 'user', content: USER_MESSAGE },
|
||||
],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
max_tokens: 20,
|
||||
...(noKey ? {} : {
|
||||
store: false,
|
||||
prompt_cache_key: cacheKey,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Log configuration
|
||||
const config = [
|
||||
`[cache-probe] Starting cache probe${noKey ? ' (--no-key: cache params OMITTED)' : ''}`,
|
||||
` model: ${request.resolvedModel} (family: ${family})`,
|
||||
` transport: ${request.transport}`,
|
||||
` endpoint: ${url}`,
|
||||
` prompt_cache_key: ${noKey ? 'NOT SENT' : cacheKey}`,
|
||||
` store: ${noKey ? 'NOT SENT' : 'false'}`,
|
||||
` system prompt: ~${Math.round(SYSTEM_PROMPT.length / 4)} tokens`,
|
||||
` delay between calls: ${DELAY_MS}ms`,
|
||||
].join('\n')
|
||||
logForDebugging(config)
|
||||
|
||||
// Call 1 — Cold
|
||||
const r1 = await sendProbe(url, headers, body, 'CALL 1 — Cold (no cache)')
|
||||
logForDebugging(`[cache-probe]\n${formatResult(r1)}`)
|
||||
|
||||
if (r1.error) {
|
||||
return {
|
||||
type: 'text',
|
||||
value: `Cache probe failed on first call: HTTP ${r1.status}\n${r1.error.slice(0, 300)}\n\nFull details in debug log.`,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait
|
||||
await new Promise((r) => setTimeout(r, DELAY_MS))
|
||||
|
||||
// Call 2 — Warm
|
||||
const r2 = await sendProbe(url, headers, body, 'CALL 2 — Warm (cache expected)')
|
||||
logForDebugging(`[cache-probe]\n${formatResult(r2)}`)
|
||||
|
||||
// --- Comparison ---
|
||||
const fields = [
|
||||
'input_tokens',
|
||||
'output_tokens',
|
||||
'total_tokens',
|
||||
'prompt_tokens',
|
||||
'completion_tokens',
|
||||
'input_tokens_details.cached_tokens',
|
||||
'prompt_tokens_details.cached_tokens',
|
||||
'output_tokens_details.reasoning_tokens',
|
||||
]
|
||||
|
||||
const comparison: string[] = ['[cache-probe] COMPARISON']
|
||||
comparison.push(
|
||||
` ${'Field'.padEnd(42)} ${'Call 1'.padStart(8)} ${'Call 2'.padStart(8)} ${'Delta'.padStart(8)}`,
|
||||
)
|
||||
comparison.push(` ${'-'.repeat(72)}`)
|
||||
|
||||
for (const f of fields) {
|
||||
const v1 = getField(r1.usage, f)
|
||||
const v2 = getField(r2.usage, f)
|
||||
if (v1 === undefined && v2 === undefined) continue
|
||||
const d =
|
||||
typeof v1 === 'number' && typeof v2 === 'number' ? v2 - v1 : ''
|
||||
comparison.push(
|
||||
` ${f.padEnd(42)} ${String(v1 ?? '-').padStart(8)} ${String(v2 ?? '-').padStart(8)} ${String(d).padStart(8)}`,
|
||||
)
|
||||
}
|
||||
|
||||
comparison.push('')
|
||||
comparison.push(
|
||||
` Latency: ${r1.elapsed}ms → ${r2.elapsed}ms (${r2.elapsed - r1.elapsed > 0 ? '+' : ''}${r2.elapsed - r1.elapsed}ms)`,
|
||||
)
|
||||
|
||||
// Header comparison
|
||||
for (const h of ['openai-processing-ms', 'x-ms-region', 'x-ratelimit-remaining']) {
|
||||
const v1 = r1.headers[h]
|
||||
const v2 = r2.headers[h]
|
||||
if (v1 || v2) {
|
||||
comparison.push(` ${h}: ${v1 ?? '-'} → ${v2 ?? '-'}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Verdict
|
||||
const cached2 =
|
||||
(getField(r2.usage, 'input_tokens_details.cached_tokens') as number) ??
|
||||
(getField(r2.usage, 'prompt_tokens_details.cached_tokens') as number) ??
|
||||
0
|
||||
const input1 =
|
||||
((r1.usage?.input_tokens ?? r1.usage?.prompt_tokens) as number) ?? 0
|
||||
const input2 =
|
||||
((r2.usage?.input_tokens ?? r2.usage?.prompt_tokens) as number) ?? 0
|
||||
|
||||
let verdict: string
|
||||
if (cached2 > 0) {
|
||||
const rate = input2 > 0 ? Math.round((cached2 / input2) * 100) : '?'
|
||||
verdict = `CACHE HIT: ${cached2} cached tokens (${rate}% of input)`
|
||||
} else if (input1 === 0 && input2 === 0) {
|
||||
verdict = 'INCONCLUSIVE: Server returns 0 input_tokens — cannot measure'
|
||||
} else if (r2.elapsed < r1.elapsed * 0.6 && input1 > 100) {
|
||||
verdict = `POSSIBLE SILENT CACHING: Call 2 was ${Math.round((1 - r2.elapsed / r1.elapsed) * 100)}% faster but no cached_tokens reported`
|
||||
} else {
|
||||
verdict = 'NO CACHE DETECTED'
|
||||
}
|
||||
|
||||
comparison.push(`\n Verdict: ${verdict}`)
|
||||
|
||||
// --- Simulate what main's shim code does with this usage ---
|
||||
// codexShim.ts makeUsage() — used for Responses API (GPT-5+/Codex)
|
||||
function mainMakeUsage(u: any) {
|
||||
return {
|
||||
input_tokens: u?.input_tokens ?? 0,
|
||||
output_tokens: u?.output_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0, // ← main hardcodes this to 0
|
||||
}
|
||||
}
|
||||
// openaiShim.ts convertChunkUsage() — used for Chat Completions
|
||||
function mainConvertChunkUsage(u: any) {
|
||||
return {
|
||||
input_tokens: u?.prompt_tokens ?? 0,
|
||||
output_tokens: u?.completion_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: u?.prompt_tokens_details?.cached_tokens ?? 0,
|
||||
}
|
||||
}
|
||||
|
||||
const shimFn = useResponses ? mainMakeUsage : mainConvertChunkUsage
|
||||
const shim1 = shimFn(r1.usage)
|
||||
const shim2 = shimFn(r2.usage)
|
||||
|
||||
comparison.push('')
|
||||
comparison.push(` --- What main's shim reports (${useResponses ? 'codexShim.makeUsage' : 'openaiShim.convertChunkUsage'}) ---`)
|
||||
comparison.push(` Call 1: cache_read_input_tokens=${shim1.cache_read_input_tokens}`)
|
||||
comparison.push(` Call 2: cache_read_input_tokens=${shim2.cache_read_input_tokens}`)
|
||||
if (useResponses && cached2 > 0) {
|
||||
comparison.push(` BUG: Server returned ${cached2} cached tokens but main's makeUsage() drops it → reports 0`)
|
||||
} else if (!useResponses && shim2.cache_read_input_tokens > 0) {
|
||||
comparison.push(` OK: Chat Completions path on main correctly reads cached_tokens`)
|
||||
}
|
||||
|
||||
logForDebugging(comparison.join('\n'))
|
||||
|
||||
// User-facing summary
|
||||
const mode = noKey ? ' (NO cache key sent)' : ''
|
||||
const shimLabel = useResponses ? 'codexShim.makeUsage()' : 'openaiShim.convertChunkUsage()'
|
||||
const summary = [
|
||||
`Cache Probe — ${request.resolvedModel} via ${useResponses ? 'Responses API' : 'Chat Completions'}${mode}`,
|
||||
'',
|
||||
`Call 1: ${r1.elapsed}ms, input=${input1}, cached=${(getField(r1.usage, 'input_tokens_details.cached_tokens') as number) ?? (getField(r1.usage, 'prompt_tokens_details.cached_tokens') as number) ?? 0}`,
|
||||
`Call 2: ${r2.elapsed}ms, input=${input2}, cached=${cached2}`,
|
||||
'',
|
||||
verdict,
|
||||
'',
|
||||
`What main's ${shimLabel} reports:`,
|
||||
` Call 2 cache_read_input_tokens = ${shim2.cache_read_input_tokens}${useResponses && cached2 > 0 ? ' ← BUG: server sent ' + cached2 + ' but main drops it' : ''}`,
|
||||
'',
|
||||
'Full details written to debug log.',
|
||||
].join('\n')
|
||||
|
||||
return { type: 'text', value: summary }
|
||||
}
|
||||
17
src/commands/cache-probe/index.ts
Normal file
17
src/commands/cache-probe/index.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
|
||||
const cacheProbe: Command = {
|
||||
type: 'local',
|
||||
name: 'cache-probe',
|
||||
description:
|
||||
'Send identical requests to test prompt caching (results in debug log)',
|
||||
argumentHint: '[model] [--no-key]',
|
||||
isEnabled: () =>
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB),
|
||||
supportsNonInteractive: false,
|
||||
load: () => import('./cache-probe.js'),
|
||||
}
|
||||
|
||||
export default cacheProbe
|
||||
@@ -39,16 +39,16 @@ type InstallState = {
|
||||
message: string;
|
||||
warnings?: string[];
|
||||
};
|
||||
function getInstallationPath(): string {
|
||||
export function getInstallationPath(): string {
|
||||
const isWindows = env.platform === 'win32';
|
||||
const homeDir = homedir();
|
||||
if (isWindows) {
|
||||
// Convert to Windows-style path
|
||||
const windowsPath = join(homeDir, '.local', 'bin', 'claude.exe');
|
||||
const windowsPath = join(homeDir, '.local', 'bin', 'openclaude.exe');
|
||||
// Replace forward slashes with backslashes for Windows display
|
||||
return windowsPath.replace(/\//g, '\\');
|
||||
}
|
||||
return '~/.local/bin/claude';
|
||||
return '~/.local/bin/openclaude';
|
||||
}
|
||||
function SetupNotes(t0) {
|
||||
const $ = _c(5);
|
||||
|
||||
68
src/commands/model/model.test.tsx
Normal file
68
src/commands/model/model.test.tsx
Normal file
@@ -0,0 +1,68 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
CLAUDE_CODE_USE_MISTRAL: process.env.CLAUDE_CODE_USE_MISTRAL,
|
||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
||||
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
process.env.CLAUDE_CODE_USE_MISTRAL = originalEnv.CLAUDE_CODE_USE_MISTRAL
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
test('opens the model picker without awaiting local model discovery refresh', async () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.CLAUDE_CODE_USE_MISTRAL
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.OPENAI_API_BASE
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5-coder-7b-instruct'
|
||||
|
||||
let resolveDiscovery: (() => void) | undefined
|
||||
const discoverOpenAICompatibleModelOptions = mock(
|
||||
() =>
|
||||
new Promise<void>(resolve => {
|
||||
resolveDiscovery = resolve
|
||||
}),
|
||||
)
|
||||
|
||||
mock.module('../../utils/model/openaiModelDiscovery.js', () => ({
|
||||
discoverOpenAICompatibleModelOptions,
|
||||
}))
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe('openai:http://127.0.0.1:8080/v1')
|
||||
|
||||
const { call } = await import('./model.js')
|
||||
const result = await Promise.race([
|
||||
call(() => {}, {} as never, ''),
|
||||
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
|
||||
])
|
||||
|
||||
resolveDiscovery?.()
|
||||
|
||||
expect(result).not.toBe('timeout')
|
||||
})
|
||||
@@ -4,6 +4,7 @@ import * as React from 'react';
|
||||
import type { CommandResultDisplay } from '../../commands.js';
|
||||
import { ModelPicker } from '../../components/ModelPicker.js';
|
||||
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js';
|
||||
import { fetchBootstrapData } from '../../services/api/bootstrap.js';
|
||||
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
|
||||
import { useAppState, useSetAppState } from '../../state/AppState.js';
|
||||
import type { LocalJSXCommandCall } from '../../types/command.js';
|
||||
@@ -19,6 +20,7 @@ import { getActiveOpenAIModelOptionsCache, setActiveOpenAIModelOptionsCache } fr
|
||||
import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js';
|
||||
import { isModelAllowed } from '../../utils/model/modelAllowlist.js';
|
||||
import { validateModel } from '../../utils/model/validateModel.js';
|
||||
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js';
|
||||
function ModelPickerWrapper(t0) {
|
||||
const $ = _c(17);
|
||||
const {
|
||||
@@ -282,7 +284,7 @@ function haveSameModelOptions(left: ModelOption[], right: ModelOption[]): boolea
|
||||
});
|
||||
}
|
||||
async function refreshOpenAIModelOptionsCache(): Promise<void> {
|
||||
if (getAPIProvider() !== 'openai') {
|
||||
if (!getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
@@ -319,7 +321,9 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
|
||||
});
|
||||
return <SetModelAndClose args={args} onDone={onDone} />;
|
||||
}
|
||||
await refreshOpenAIModelOptionsCache();
|
||||
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
|
||||
void refreshOpenAIModelOptionsCache();
|
||||
}
|
||||
return <ModelPickerWrapper onDone={onDone} />;
|
||||
};
|
||||
function renderModelLabel(model: string | null): string {
|
||||
|
||||
@@ -2,8 +2,9 @@ import type { Command } from '../../commands.js'
|
||||
|
||||
const onboardGithub: Command = {
|
||||
name: 'onboard-github',
|
||||
aliases: ['onboarding-github', 'onboardgithub', 'onboardinggithub'],
|
||||
description:
|
||||
'Interactive setup for GitHub Models: device login or PAT, saved to secure storage',
|
||||
'Interactive setup for GitHub Copilot: OAuth device login stored in secure storage',
|
||||
type: 'local-jsx',
|
||||
load: () => import('./onboard-github.js'),
|
||||
}
|
||||
|
||||
148
src/commands/onboard-github/onboard-github.test.ts
Normal file
148
src/commands/onboard-github/onboard-github.test.ts
Normal file
@@ -0,0 +1,148 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
activateGithubOnboardingMode,
|
||||
applyGithubOnboardingProcessEnv,
|
||||
buildGithubOnboardingSettingsEnv,
|
||||
hasExistingGithubModelsLoginToken,
|
||||
shouldForceGithubRelogin,
|
||||
} from './onboard-github.js'
|
||||
|
||||
describe('shouldForceGithubRelogin', () => {
|
||||
test.each(['force', '--force', 'relogin', '--relogin', 'reauth', '--reauth'])(
|
||||
'treats %s as force re-login',
|
||||
arg => {
|
||||
expect(shouldForceGithubRelogin(arg)).toBe(true)
|
||||
},
|
||||
)
|
||||
|
||||
test('returns false for empty or unknown args', () => {
|
||||
expect(shouldForceGithubRelogin('')).toBe(false)
|
||||
expect(shouldForceGithubRelogin(undefined)).toBe(false)
|
||||
expect(shouldForceGithubRelogin('something-else')).toBe(false)
|
||||
})
|
||||
|
||||
test('treats force flags as present in multi-word args', () => {
|
||||
expect(shouldForceGithubRelogin('--force extra')).toBe(true)
|
||||
expect(shouldForceGithubRelogin('foo --relogin bar')).toBe(true)
|
||||
expect(shouldForceGithubRelogin('abc reauth xyz')).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('hasExistingGithubModelsLoginToken', () => {
|
||||
test('returns true when GITHUB_TOKEN is present', () => {
|
||||
expect(
|
||||
hasExistingGithubModelsLoginToken({ GITHUB_TOKEN: 'token' }, ''),
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true when GH_TOKEN is present', () => {
|
||||
expect(
|
||||
hasExistingGithubModelsLoginToken({ GH_TOKEN: 'token' }, ''),
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true when stored token exists', () => {
|
||||
expect(hasExistingGithubModelsLoginToken({}, 'stored-token')).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false when both env and stored token are missing', () => {
|
||||
expect(hasExistingGithubModelsLoginToken({}, '')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('onboarding auth precedence cleanup', () => {
|
||||
test('clears preexisting OpenAI auth when switching to GitHub', () => {
|
||||
const env: NodeJS.ProcessEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OPENAI_API_KEY: 'sk-stale-openai-key',
|
||||
OPENAI_ORG: 'org-old',
|
||||
OPENAI_PROJECT: 'project-old',
|
||||
OPENAI_ORGANIZATION: 'org-legacy',
|
||||
OPENAI_BASE_URL: 'https://api.openai.com/v1',
|
||||
OPENAI_API_BASE: 'https://api.openai.com/v1',
|
||||
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',
|
||||
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID: 'profile_old',
|
||||
}
|
||||
|
||||
applyGithubOnboardingProcessEnv('github:copilot', env)
|
||||
|
||||
expect(env.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
||||
expect(env.OPENAI_MODEL).toBe('github:copilot')
|
||||
|
||||
expect(env.OPENAI_API_KEY).toBeUndefined()
|
||||
expect(env.OPENAI_ORG).toBeUndefined()
|
||||
expect(env.OPENAI_PROJECT).toBeUndefined()
|
||||
expect(env.OPENAI_ORGANIZATION).toBeUndefined()
|
||||
expect(env.OPENAI_BASE_URL).toBeUndefined()
|
||||
expect(env.OPENAI_API_BASE).toBeUndefined()
|
||||
|
||||
expect(env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||
expect(env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
|
||||
expect(env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBeUndefined()
|
||||
|
||||
const settingsEnv = buildGithubOnboardingSettingsEnv('github:copilot')
|
||||
expect(settingsEnv.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
||||
expect(settingsEnv.OPENAI_MODEL).toBe('github:copilot')
|
||||
expect(settingsEnv.OPENAI_API_KEY).toBeUndefined()
|
||||
expect(settingsEnv.OPENAI_ORG).toBeUndefined()
|
||||
expect(settingsEnv.OPENAI_PROJECT).toBeUndefined()
|
||||
expect(settingsEnv.OPENAI_ORGANIZATION).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('activateGithubOnboardingMode', () => {
|
||||
test('activates settings/env/hydration in order when merge succeeds', () => {
|
||||
const calls: string[] = []
|
||||
|
||||
const result = activateGithubOnboardingMode(' github:copilot ', {
|
||||
mergeSettingsEnv: model => {
|
||||
calls.push(`merge:${model}`)
|
||||
return { ok: true }
|
||||
},
|
||||
applyProcessEnv: model => {
|
||||
calls.push(`apply:${model}`)
|
||||
},
|
||||
hydrateToken: () => {
|
||||
calls.push('hydrate')
|
||||
},
|
||||
onChangeAPIKey: () => {
|
||||
calls.push('onChangeAPIKey')
|
||||
},
|
||||
})
|
||||
|
||||
expect(result).toEqual({ ok: true })
|
||||
expect(calls).toEqual([
|
||||
'merge:github:copilot',
|
||||
'apply:github:copilot',
|
||||
'hydrate',
|
||||
'onChangeAPIKey',
|
||||
])
|
||||
})
|
||||
|
||||
test('stops activation when settings merge fails', () => {
|
||||
const calls: string[] = []
|
||||
|
||||
const result = activateGithubOnboardingMode(DEFAULT_MODEL_FOR_TESTS, {
|
||||
mergeSettingsEnv: () => {
|
||||
calls.push('merge')
|
||||
return { ok: false, detail: 'settings write failed' }
|
||||
},
|
||||
applyProcessEnv: () => {
|
||||
calls.push('apply')
|
||||
},
|
||||
hydrateToken: () => {
|
||||
calls.push('hydrate')
|
||||
},
|
||||
onChangeAPIKey: () => {
|
||||
calls.push('onChangeAPIKey')
|
||||
},
|
||||
})
|
||||
|
||||
expect(result).toEqual({ ok: false, detail: 'settings write failed' })
|
||||
expect(calls).toEqual(['merge'])
|
||||
})
|
||||
})
|
||||
|
||||
const DEFAULT_MODEL_FOR_TESTS = 'github:copilot'
|
||||
@@ -2,9 +2,9 @@ import * as React from 'react'
|
||||
import { useCallback, useState } from 'react'
|
||||
import { Select } from '../../components/CustomSelect/select.js'
|
||||
import { Spinner } from '../../components/Spinner.js'
|
||||
import TextInput from '../../components/TextInput.js'
|
||||
import { Box, Text } from '../../ink.js'
|
||||
import {
|
||||
exchangeForCopilotToken,
|
||||
openVerificationUri,
|
||||
pollAccessToken,
|
||||
requestDeviceCode,
|
||||
@@ -12,29 +12,134 @@ import {
|
||||
import type { LocalJSXCommandCall } from '../../types/command.js'
|
||||
import {
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
readGithubModelsToken,
|
||||
saveGithubModelsToken,
|
||||
} from '../../utils/githubModelsCredentials.js'
|
||||
import { updateSettingsForSource } from '../../utils/settings/settings.js'
|
||||
import { getSettingsForSource, updateSettingsForSource } from '../../utils/settings/settings.js'
|
||||
|
||||
const DEFAULT_MODEL = 'github:copilot'
|
||||
const FORCE_RELOGIN_ARGS = new Set([
|
||||
'force',
|
||||
'--force',
|
||||
'relogin',
|
||||
'--relogin',
|
||||
'reauth',
|
||||
'--reauth',
|
||||
])
|
||||
|
||||
type Step =
|
||||
| 'menu'
|
||||
| 'device-busy'
|
||||
| 'pat'
|
||||
| 'error'
|
||||
type Step = 'menu' | 'device-busy' | 'error'
|
||||
|
||||
const PROVIDER_SPECIFIC_KEYS = new Set([
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_BASE',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_API_KEY',
|
||||
'GOOGLE_API_KEY',
|
||||
'GEMINI_BASE_URL',
|
||||
'GEMINI_MODEL',
|
||||
'GEMINI_ACCESS_TOKEN',
|
||||
'GEMINI_AUTH_MODE',
|
||||
])
|
||||
|
||||
export function shouldForceGithubRelogin(args?: string): boolean {
|
||||
const normalized = (args ?? '').trim().toLowerCase()
|
||||
if (!normalized) {
|
||||
return false
|
||||
}
|
||||
return normalized.split(/\s+/).some(arg => FORCE_RELOGIN_ARGS.has(arg))
|
||||
}
|
||||
|
||||
const GITHUB_PAT_PREFIXES = ['ghp_', 'gho_','ghs_', 'ghr_', 'github_pat_']
|
||||
|
||||
function isGithubPat(token: string): boolean {
|
||||
return GITHUB_PAT_PREFIXES.some(prefix => token.startsWith(prefix))
|
||||
}
|
||||
|
||||
export function hasExistingGithubModelsLoginToken(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
storedToken?: string,
|
||||
): boolean {
|
||||
const envToken = env.GITHUB_TOKEN?.trim() || env.GH_TOKEN?.trim()
|
||||
if (envToken) {
|
||||
// PATs are no longer supported - require OAuth re-auth
|
||||
if (isGithubPat(envToken)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
const persisted = (storedToken ?? readGithubModelsToken())?.trim()
|
||||
// PATs are no longer supported - require OAuth re-auth
|
||||
if (persisted && isGithubPat(persisted)) {
|
||||
return false
|
||||
}
|
||||
return Boolean(persisted)
|
||||
}
|
||||
|
||||
export function buildGithubOnboardingSettingsEnv(
|
||||
model: string,
|
||||
): Record<string, string | undefined> {
|
||||
return {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: model,
|
||||
OPENAI_API_KEY: undefined,
|
||||
OPENAI_ORG: undefined,
|
||||
OPENAI_PROJECT: undefined,
|
||||
OPENAI_ORGANIZATION: undefined,
|
||||
OPENAI_BASE_URL: undefined,
|
||||
OPENAI_API_BASE: undefined,
|
||||
CLAUDE_CODE_USE_OPENAI: undefined,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined,
|
||||
}
|
||||
}
|
||||
|
||||
export function applyGithubOnboardingProcessEnv(
|
||||
model: string,
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): void {
|
||||
env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
env.OPENAI_MODEL = model
|
||||
|
||||
delete env.OPENAI_API_KEY
|
||||
delete env.OPENAI_ORG
|
||||
delete env.OPENAI_PROJECT
|
||||
delete env.OPENAI_ORGANIZATION
|
||||
delete env.OPENAI_BASE_URL
|
||||
delete env.OPENAI_API_BASE
|
||||
|
||||
delete env.CLAUDE_CODE_USE_OPENAI
|
||||
delete env.CLAUDE_CODE_USE_GEMINI
|
||||
delete env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete env.CLAUDE_CODE_USE_VERTEX
|
||||
delete env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
delete env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
|
||||
}
|
||||
|
||||
function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } {
|
||||
const currentSettings = getSettingsForSource('userSettings')
|
||||
const currentEnv = currentSettings?.env ?? {}
|
||||
|
||||
const newEnv: Record<string, string> = {}
|
||||
for (const [key, value] of Object.entries(currentEnv)) {
|
||||
if (!PROVIDER_SPECIFIC_KEYS.has(key)) {
|
||||
newEnv[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
newEnv.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
newEnv.OPENAI_MODEL = model
|
||||
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: model,
|
||||
CLAUDE_CODE_USE_OPENAI: undefined as any,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined as any,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined as any,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined as any,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
|
||||
},
|
||||
env: newEnv,
|
||||
})
|
||||
if (error) {
|
||||
return { ok: false, detail: error.message }
|
||||
@@ -42,6 +147,32 @@ function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } {
|
||||
return { ok: true }
|
||||
}
|
||||
|
||||
export function activateGithubOnboardingMode(
|
||||
model: string = DEFAULT_MODEL,
|
||||
options?: {
|
||||
mergeSettingsEnv?: (model: string) => { ok: boolean; detail?: string }
|
||||
applyProcessEnv?: (model: string) => void
|
||||
hydrateToken?: () => void
|
||||
onChangeAPIKey?: () => void
|
||||
},
|
||||
): { ok: boolean; detail?: string } {
|
||||
const normalizedModel = model.trim() || DEFAULT_MODEL
|
||||
const mergeSettingsEnv = options?.mergeSettingsEnv ?? mergeUserSettingsEnv
|
||||
const applyProcessEnv = options?.applyProcessEnv ?? applyGithubOnboardingProcessEnv
|
||||
const hydrateToken =
|
||||
options?.hydrateToken ?? hydrateGithubModelsTokenFromSecureStorage
|
||||
|
||||
const merged = mergeSettingsEnv(normalizedModel)
|
||||
if (!merged.ok) {
|
||||
return merged
|
||||
}
|
||||
|
||||
applyProcessEnv(normalizedModel)
|
||||
hydrateToken()
|
||||
options?.onChangeAPIKey?.()
|
||||
return { ok: true }
|
||||
}
|
||||
|
||||
function OnboardGithub(props: {
|
||||
onDone: Parameters<LocalJSXCommandCall>[0]
|
||||
onChangeAPIKey: () => void
|
||||
@@ -53,32 +184,42 @@ function OnboardGithub(props: {
|
||||
user_code: string
|
||||
verification_uri: string
|
||||
} | null>(null)
|
||||
const [patDraft, setPatDraft] = useState('')
|
||||
const [cursorOffset, setCursorOffset] = useState(0)
|
||||
|
||||
const finalize = useCallback(
|
||||
async (token: string, model: string = DEFAULT_MODEL) => {
|
||||
const saved = saveGithubModelsToken(token)
|
||||
async (
|
||||
token: string,
|
||||
model: string = DEFAULT_MODEL,
|
||||
oauthToken?: string,
|
||||
) => {
|
||||
const saved = saveGithubModelsToken(token, oauthToken)
|
||||
if (!saved.success) {
|
||||
setErrorMsg(saved.warning ?? 'Could not save token to secure storage.')
|
||||
setStep('error')
|
||||
return
|
||||
}
|
||||
const merged = mergeUserSettingsEnv(model.trim() || DEFAULT_MODEL)
|
||||
if (!merged.ok) {
|
||||
const activated = activateGithubOnboardingMode(model, {
|
||||
onChangeAPIKey,
|
||||
})
|
||||
if (!activated.ok) {
|
||||
setErrorMsg(
|
||||
`Token saved, but settings were not updated: ${merged.detail ?? 'unknown error'}. ` +
|
||||
`Token saved, but settings were not updated: ${activated.detail ?? 'unknown error'}. ` +
|
||||
`Add env CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL to ~/.claude/settings.json manually.`,
|
||||
)
|
||||
setStep('error')
|
||||
return
|
||||
}
|
||||
// Clear stale provider-specific env vars from the current session
|
||||
// so resolveProviderRequest() doesn't pick up a previous provider's
|
||||
// base URL or key after onboarding completes.
|
||||
for (const key of PROVIDER_SPECIFIC_KEYS) {
|
||||
delete process.env[key]
|
||||
}
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = model.trim() || DEFAULT_MODEL
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
onChangeAPIKey()
|
||||
onDone(
|
||||
'GitHub Models onboard complete. Token stored in secure storage; user settings updated. Restart if the model does not switch.',
|
||||
'GitHub Copilot onboard complete. Copilot token and OAuth token stored in secure storage (Windows/Linux: ~/.claude/.credentials.json, macOS: Keychain fallback to ~/.claude/.credentials.json); user settings updated. Restart if the model does not switch.',
|
||||
{ display: 'user' },
|
||||
)
|
||||
},
|
||||
@@ -96,11 +237,12 @@ function OnboardGithub(props: {
|
||||
verification_uri: device.verification_uri,
|
||||
})
|
||||
await openVerificationUri(device.verification_uri)
|
||||
const token = await pollAccessToken(device.device_code, {
|
||||
const oauthToken = await pollAccessToken(device.device_code, {
|
||||
initialInterval: device.interval,
|
||||
timeoutSeconds: device.expires_in,
|
||||
})
|
||||
await finalize(token, DEFAULT_MODEL)
|
||||
const copilotToken = await exchangeForCopilotToken(oauthToken)
|
||||
await finalize(copilotToken.token, DEFAULT_MODEL, oauthToken)
|
||||
} catch (e) {
|
||||
setErrorMsg(e instanceof Error ? e.message : String(e))
|
||||
setStep('error')
|
||||
@@ -139,7 +281,7 @@ function OnboardGithub(props: {
|
||||
if (step === 'device-busy') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>GitHub device login</Text>
|
||||
<Text>GitHub Copilot sign-in</Text>
|
||||
{deviceHint ? (
|
||||
<>
|
||||
<Text>
|
||||
@@ -147,54 +289,22 @@ function OnboardGithub(props: {
|
||||
{deviceHint.verification_uri}
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
A browser window may have opened. Waiting for authorization…
|
||||
A browser window may have opened. Waiting for authorization...
|
||||
</Text>
|
||||
</>
|
||||
) : (
|
||||
<Text dimColor>Requesting device code from GitHub…</Text>
|
||||
<Text dimColor>Requesting device code from GitHub...</Text>
|
||||
)}
|
||||
<Spinner />
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
if (step === 'pat') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>Paste a GitHub personal access token with access to GitHub Models.</Text>
|
||||
<Text dimColor>Input is masked. Enter to submit; Esc to go back.</Text>
|
||||
<TextInput
|
||||
value={patDraft}
|
||||
mask="*"
|
||||
onChange={setPatDraft}
|
||||
onSubmit={async (value: string) => {
|
||||
const t = value.trim()
|
||||
if (!t) {
|
||||
return
|
||||
}
|
||||
await finalize(t, DEFAULT_MODEL)
|
||||
}}
|
||||
onExit={() => {
|
||||
setStep('menu')
|
||||
setPatDraft('')
|
||||
}}
|
||||
columns={80}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={setCursorOffset}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
const menuOptions = [
|
||||
{
|
||||
label: 'Sign in with browser (device code)',
|
||||
label: 'Sign in with browser',
|
||||
value: 'device' as const,
|
||||
},
|
||||
{
|
||||
label: 'Paste personal access token',
|
||||
value: 'pat' as const,
|
||||
},
|
||||
{
|
||||
label: 'Cancel',
|
||||
value: 'cancel' as const,
|
||||
@@ -203,10 +313,10 @@ function OnboardGithub(props: {
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text bold>GitHub Models setup</Text>
|
||||
<Text bold>GitHub Copilot setup</Text>
|
||||
<Text dimColor>
|
||||
Stores your token in the OS credential store (macOS Keychain when available)
|
||||
and enables CLAUDE_CODE_USE_GITHUB in your user settings — no export
|
||||
and enables CLAUDE_CODE_USE_GITHUB in your user settings - no export
|
||||
GITHUB_TOKEN needed for future runs.
|
||||
</Text>
|
||||
<Select
|
||||
@@ -216,10 +326,6 @@ function OnboardGithub(props: {
|
||||
onDone('GitHub onboard cancelled', { display: 'system' })
|
||||
return
|
||||
}
|
||||
if (v === 'pat') {
|
||||
setStep('pat')
|
||||
return
|
||||
}
|
||||
void runDeviceFlow()
|
||||
}}
|
||||
/>
|
||||
@@ -227,7 +333,28 @@ function OnboardGithub(props: {
|
||||
)
|
||||
}
|
||||
|
||||
export const call: LocalJSXCommandCall = async (onDone, context) => {
|
||||
export const call: LocalJSXCommandCall = async (onDone, context, args) => {
|
||||
const forceRelogin = shouldForceGithubRelogin(args)
|
||||
if (hasExistingGithubModelsLoginToken() && !forceRelogin) {
|
||||
const activated = activateGithubOnboardingMode(DEFAULT_MODEL, {
|
||||
onChangeAPIKey: context.onChangeAPIKey,
|
||||
})
|
||||
if (!activated.ok) {
|
||||
onDone(
|
||||
`GitHub token detected, but settings activation failed: ${activated.detail ?? 'unknown error'}. ` +
|
||||
'Set CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL=github:copilot in user settings manually.',
|
||||
{ display: 'system' },
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
onDone(
|
||||
'GitHub Models already authorized. Activated GitHub Models mode using your existing token. Use /onboard-github --force to re-authenticate.',
|
||||
{ display: 'user' },
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<OnboardGithub
|
||||
onDone={onDone}
|
||||
|
||||
@@ -52,7 +52,11 @@ async function renderFinalFrame(node: React.ReactNode): Promise<string> {
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
await instance.waitUntilExit()
|
||||
// Timeout guard: if render throws before exit effect fires, don't hang
|
||||
await Promise.race([
|
||||
instance.waitUntilExit(),
|
||||
new Promise<void>(resolve => setTimeout(resolve, 3000)),
|
||||
])
|
||||
return stripAnsi(extractLastFrame(getOutput()))
|
||||
}
|
||||
|
||||
@@ -197,6 +201,21 @@ test('buildProfileSaveMessage maps provider fields without echoing secrets', ()
|
||||
expect(message).not.toContain('sk-secret-12345678')
|
||||
})
|
||||
|
||||
test('buildProfileSaveMessage labels local openai-compatible profiles consistently', () => {
|
||||
const message = buildProfileSaveMessage(
|
||||
'openai',
|
||||
{
|
||||
OPENAI_MODEL: 'gpt-5.4',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
|
||||
)
|
||||
|
||||
expect(message).toContain('Saved Local OpenAI-compatible profile.')
|
||||
expect(message).toContain('Model: gpt-5.4')
|
||||
expect(message).toContain('Endpoint: http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly', () => {
|
||||
const message = buildProfileSaveMessage(
|
||||
'gemini',
|
||||
@@ -230,6 +249,51 @@ test('buildCurrentProviderSummary redacts poisoned model and endpoint values', (
|
||||
expect(summary.endpointLabel).toBe('sk-...5678')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'qwen2.5-coder-7b-instruct',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
persisted: null,
|
||||
})
|
||||
|
||||
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
|
||||
expect(summary.modelLabel).toBe('qwen2.5-coder-7b-instruct')
|
||||
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-5.4',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
persisted: null,
|
||||
})
|
||||
|
||||
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
|
||||
expect(summary.modelLabel).toBe('gpt-5.4')
|
||||
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary recognizes GitHub Models mode', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: 'github:copilot',
|
||||
OPENAI_BASE_URL: 'https://models.github.ai/inference',
|
||||
},
|
||||
persisted: null,
|
||||
})
|
||||
|
||||
expect(summary.providerLabel).toBe('GitHub Models')
|
||||
expect(summary.modelLabel).toBe('github:copilot')
|
||||
expect(summary.endpointLabel).toBe('https://models.github.ai/inference')
|
||||
})
|
||||
|
||||
test('getProviderWizardDefaults ignores poisoned current provider values', () => {
|
||||
const defaults = getProviderWizardDefaults({
|
||||
OPENAI_API_KEY: 'sk-secret-12345678',
|
||||
|
||||
@@ -15,17 +15,21 @@ import { Box, Text } from '../../ink.js'
|
||||
import {
|
||||
DEFAULT_CODEX_BASE_URL,
|
||||
DEFAULT_OPENAI_BASE_URL,
|
||||
isLocalProviderUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
} from '../../services/api/providerConfig.js'
|
||||
import {
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildMistralProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
createProfileFile,
|
||||
DEFAULT_GEMINI_BASE_URL,
|
||||
DEFAULT_GEMINI_MODEL,
|
||||
DEFAULT_MISTRAL_BASE_URL,
|
||||
DEFAULT_MISTRAL_MODEL,
|
||||
deleteProfileFile,
|
||||
loadProfileFile,
|
||||
maskSecretForDisplay,
|
||||
@@ -52,7 +56,11 @@ import {
|
||||
recommendOllamaModel,
|
||||
type RecommendationGoal,
|
||||
} from '../../utils/providerRecommendation.js'
|
||||
import { hasLocalOllama, listOllamaModels } from '../../utils/providerDiscovery.js'
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
hasLocalOllama,
|
||||
listOllamaModels,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
|
||||
type ProviderChoice = 'auto' | ProviderProfile | 'clear'
|
||||
|
||||
@@ -69,6 +77,14 @@ type Step =
|
||||
baseUrl: string | null
|
||||
defaultModel: string
|
||||
}
|
||||
| { name: 'mistral-key'; defaultModel: string }
|
||||
| { name: 'mistral-base'; apiKey: string; defaultModel: string }
|
||||
| {
|
||||
name: 'mistral-model'
|
||||
apiKey: string
|
||||
baseUrl: string | null
|
||||
defaultModel: string
|
||||
}
|
||||
| { name: 'gemini-auth-method' }
|
||||
| { name: 'gemini-key' }
|
||||
| { name: 'gemini-access-token' }
|
||||
@@ -111,6 +127,8 @@ type ProviderWizardDefaults = {
|
||||
openAIModel: string
|
||||
openAIBaseUrl: string
|
||||
geminiModel: string
|
||||
mistralModel: string
|
||||
mistralBaseUrl: string
|
||||
}
|
||||
|
||||
function isEnvTruthy(value: string | undefined): boolean {
|
||||
@@ -142,11 +160,19 @@ export function getProviderWizardDefaults(
|
||||
const safeGeminiModel =
|
||||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, processEnv) ||
|
||||
DEFAULT_GEMINI_MODEL
|
||||
const safeMistralModel =
|
||||
sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, processEnv) ||
|
||||
DEFAULT_MISTRAL_MODEL
|
||||
const safeMistralBaseUrl =
|
||||
sanitizeProviderConfigValue(processEnv.MISTRAL_BASE_URL, processEnv) ||
|
||||
DEFAULT_MISTRAL_BASE_URL
|
||||
|
||||
return {
|
||||
openAIModel: safeOpenAIModel,
|
||||
openAIBaseUrl: safeOpenAIBaseUrl,
|
||||
geminiModel: safeGeminiModel,
|
||||
mistralModel: safeMistralModel,
|
||||
mistralBaseUrl: safeMistralBaseUrl,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,6 +199,38 @@ export function buildCurrentProviderSummary(options?: {
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return {
|
||||
providerLabel: 'Mistral',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
processEnv.MISTRAL_MODEL ?? DEFAULT_MISTRAL_MODEL,
|
||||
processEnv
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
processEnv.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL,
|
||||
processEnv
|
||||
),
|
||||
savedProfileLabel,
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return {
|
||||
providerLabel: 'GitHub Models',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
processEnv.OPENAI_MODEL ?? 'github:copilot',
|
||||
processEnv,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
processEnv.OPENAI_BASE_URL ??
|
||||
processEnv.OPENAI_API_BASE ??
|
||||
'https://models.github.ai/inference',
|
||||
processEnv,
|
||||
),
|
||||
savedProfileLabel,
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_OPENAI)) {
|
||||
const request = resolveProviderRequest({
|
||||
model: processEnv.OPENAI_MODEL,
|
||||
@@ -182,10 +240,8 @@ export function buildCurrentProviderSummary(options?: {
|
||||
let providerLabel = 'OpenAI-compatible'
|
||||
if (request.transport === 'codex_responses') {
|
||||
providerLabel = 'Codex'
|
||||
} else if (request.baseUrl.includes('localhost:11434')) {
|
||||
providerLabel = 'Ollama'
|
||||
} else if (request.baseUrl.includes('localhost:1234')) {
|
||||
providerLabel = 'LM Studio'
|
||||
} else if (isLocalProviderUrl(request.baseUrl)) {
|
||||
providerLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -239,6 +295,24 @@ function buildSavedProfileSummary(
|
||||
? 'configured'
|
||||
: undefined,
|
||||
}
|
||||
case 'mistral':
|
||||
return {
|
||||
providerLabel: 'Mistral',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
env.MISTRAL_MODEL ?? DEFAULT_MISTRAL_MODEL,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
credentialLabel:
|
||||
maskSecretForDisplay(env.MISTRAL_API_KEY) !== undefined
|
||||
? 'configured'
|
||||
: undefined,
|
||||
}
|
||||
case 'codex':
|
||||
return {
|
||||
providerLabel: 'Codex',
|
||||
@@ -272,16 +346,20 @@ function buildSavedProfileSummary(
|
||||
),
|
||||
}
|
||||
case 'openai':
|
||||
default:
|
||||
default: {
|
||||
const baseUrl = env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL
|
||||
|
||||
return {
|
||||
providerLabel: 'OpenAI-compatible',
|
||||
providerLabel: isLocalProviderUrl(baseUrl)
|
||||
? getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
: 'OpenAI-compatible',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
env.OPENAI_MODEL ?? 'gpt-4o',
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL,
|
||||
baseUrl,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
@@ -290,6 +368,7 @@ function buildSavedProfileSummary(
|
||||
? 'configured'
|
||||
: undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -448,6 +527,11 @@ function ProviderChooser({
|
||||
value: 'gemini',
|
||||
description: 'Use Google Gemini with API key, access token, or local ADC',
|
||||
},
|
||||
{
|
||||
label: 'Mistral',
|
||||
value: 'mistral',
|
||||
description: 'Use Mistral with API key'
|
||||
},
|
||||
{
|
||||
label: 'Codex',
|
||||
value: 'codex',
|
||||
@@ -946,6 +1030,11 @@ export function ProviderWizard({
|
||||
})
|
||||
} else if (value === 'gemini') {
|
||||
setStep({ name: 'gemini-auth-method' })
|
||||
} else if (value === 'mistral') {
|
||||
setStep({
|
||||
name: 'mistral-key',
|
||||
defaultModel: defaults.mistralModel,
|
||||
})
|
||||
} else if (value === 'clear') {
|
||||
const filePath = deleteProfileFile()
|
||||
onDone(`Removed saved provider profile at ${filePath}. Restart OpenClaude to go back to normal startup.`, {
|
||||
@@ -1085,6 +1174,101 @@ export function ProviderWizard({
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-key':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 1 of 3"
|
||||
description={
|
||||
process.env.MISTRAL_API_KEY
|
||||
? 'Enter an API key, or leave this blank to reuse the current MISTRAL_API_KEY from this session.'
|
||||
: 'Enter the API key for your Mistral provider.'
|
||||
}
|
||||
initialValue=""
|
||||
placeholder="..."
|
||||
mask="*"
|
||||
allowEmpty={Boolean(process.env.MISTRAL_API_KEY)}
|
||||
validate={value => {
|
||||
const candidate = value.trim() || process.env.MISTRAL_API_KEY || ''
|
||||
return sanitizeApiKey(candidate)
|
||||
? null
|
||||
: 'Enter a real API key. Placeholder values like SUA_CHAVE are not valid.'
|
||||
}}
|
||||
onSubmit={value => {
|
||||
const apiKey = value.trim() || process.env.MISTRAL_API_KEY || ''
|
||||
setStep({
|
||||
name: 'mistral-base',
|
||||
apiKey,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}}
|
||||
onCancel={() => setStep({ name: 'choose' })}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-base':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 2 of 3"
|
||||
description={`Optionally enter a base URL. Leave blank for ${DEFAULT_MISTRAL_BASE_URL}.`}
|
||||
initialValue={
|
||||
defaults.mistralBaseUrl === DEFAULT_MISTRAL_BASE_URL
|
||||
? ''
|
||||
: defaults.mistralBaseUrl
|
||||
}
|
||||
placeholder={DEFAULT_MISTRAL_BASE_URL}
|
||||
allowEmpty
|
||||
onSubmit={value => {
|
||||
setStep({
|
||||
name: 'mistral-model',
|
||||
apiKey: step.apiKey,
|
||||
baseUrl: value.trim() || null,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}}
|
||||
onCancel={() =>
|
||||
setStep({
|
||||
name: 'mistral-key',
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-model':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 3 of 3"
|
||||
description={`Enter a model name. Leave blank for ${step.defaultModel}.`}
|
||||
initialValue={defaults.mistralModel ?? step.defaultModel}
|
||||
placeholder={step.defaultModel}
|
||||
allowEmpty
|
||||
onSubmit={value => {
|
||||
const env = buildMistralProfileEnv({
|
||||
model: value.trim() || step.defaultModel,
|
||||
baseUrl: step.baseUrl,
|
||||
apiKey: step.apiKey,
|
||||
processEnv: process.env,
|
||||
})
|
||||
if (env) {
|
||||
finishProfileSave(onDone, 'mistral', env)
|
||||
}
|
||||
}}
|
||||
onCancel={() =>
|
||||
setStep({
|
||||
name: 'mistral-base',
|
||||
apiKey: step.apiKey,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'gemini-auth-method': {
|
||||
const hasShellGeminiKey = Boolean(
|
||||
process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY,
|
||||
|
||||
@@ -65,7 +65,7 @@ export async function call(onDone: (result?: string) => void, _context: unknown,
|
||||
|
||||
// Get the local settings path and make it relative to cwd
|
||||
const localSettingsPath = getSettingsFilePathForSource('localSettings');
|
||||
const relativePath = localSettingsPath ? relative(getCwdState(), localSettingsPath) : '.claude/settings.local.json';
|
||||
const relativePath = localSettingsPath ? relative(getCwdState(), localSettingsPath) : '.openclaude/settings.local.json';
|
||||
const message = color('success', themeName)(`Added "${cleanPattern}" to excluded commands in ${relativePath}`);
|
||||
onDone(message);
|
||||
return null;
|
||||
|
||||
12
src/commands/wiki/index.ts
Normal file
12
src/commands/wiki/index.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
|
||||
const wiki = {
|
||||
type: 'local-jsx',
|
||||
name: 'wiki',
|
||||
description: 'Initialize and inspect the OpenClaude project wiki',
|
||||
argumentHint: '[init|status]',
|
||||
immediate: true,
|
||||
load: () => import('./wiki.js'),
|
||||
} satisfies Command
|
||||
|
||||
export default wiki
|
||||
123
src/commands/wiki/wiki.tsx
Normal file
123
src/commands/wiki/wiki.tsx
Normal file
@@ -0,0 +1,123 @@
|
||||
import React from 'react'
|
||||
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js'
|
||||
import { ingestLocalWikiSource } from '../../services/wiki/ingest.js'
|
||||
import { initializeWiki } from '../../services/wiki/init.js'
|
||||
import { getWikiStatus } from '../../services/wiki/status.js'
|
||||
import type {
|
||||
LocalJSXCommandCall,
|
||||
LocalJSXCommandOnDone,
|
||||
} from '../../types/command.js'
|
||||
import { getCwd } from '../../utils/cwd.js'
|
||||
|
||||
function renderHelp(): string {
|
||||
return `Usage: /wiki [init|status|ingest <path>]
|
||||
|
||||
Manage the OpenClaude project wiki stored in .openclaude/wiki.
|
||||
|
||||
Commands:
|
||||
/wiki init Initialize the wiki structure in the current project
|
||||
/wiki status Show wiki status and page/source counts
|
||||
/wiki ingest Ingest a local file into wiki sources
|
||||
|
||||
Examples:
|
||||
/wiki init
|
||||
/wiki status
|
||||
/wiki ingest README.md`
|
||||
}
|
||||
|
||||
function formatInitResult(result: Awaited<ReturnType<typeof initializeWiki>>): string {
|
||||
const lines = [`Initialized OpenClaude wiki at ${result.root}`]
|
||||
|
||||
if (result.alreadyExisted) {
|
||||
lines.push('', 'Wiki already existed. No new files were created.')
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
if (result.createdFiles.length > 0) {
|
||||
lines.push('', 'Created files:')
|
||||
for (const file of result.createdFiles) {
|
||||
lines.push(`- ${file}`)
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
function formatStatus(status: Awaited<ReturnType<typeof getWikiStatus>>): string {
|
||||
if (!status.initialized) {
|
||||
return `OpenClaude wiki is not initialized in this project.\n\nRun /wiki init to create ${status.root}.`
|
||||
}
|
||||
|
||||
return [
|
||||
'OpenClaude wiki status',
|
||||
'',
|
||||
`Root: ${status.root}`,
|
||||
`Pages: ${status.pageCount}`,
|
||||
`Sources: ${status.sourceCount}`,
|
||||
`Schema: ${status.hasSchema ? 'present' : 'missing'}`,
|
||||
`Index: ${status.hasIndex ? 'present' : 'missing'}`,
|
||||
`Log: ${status.hasLog ? 'present' : 'missing'}`,
|
||||
`Last updated: ${status.lastUpdatedAt ?? 'unknown'}`,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
function formatIngestResult(
|
||||
result: Awaited<ReturnType<typeof ingestLocalWikiSource>>,
|
||||
): string {
|
||||
return [
|
||||
`Ingested ${result.sourceFile} into the OpenClaude wiki.`,
|
||||
'',
|
||||
`Title: ${result.title}`,
|
||||
`Source note: ${result.sourceNote}`,
|
||||
`Summary: ${result.summary}`,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
async function runWikiCommand(
|
||||
onDone: LocalJSXCommandOnDone,
|
||||
args: string,
|
||||
): Promise<void> {
|
||||
const cwd = getCwd()
|
||||
const normalized = args.trim().toLowerCase()
|
||||
|
||||
if (COMMON_HELP_ARGS.includes(normalized) || COMMON_INFO_ARGS.includes(normalized)) {
|
||||
onDone(renderHelp(), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (!normalized || normalized === 'status') {
|
||||
onDone(formatStatus(await getWikiStatus(cwd)), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (normalized === 'init') {
|
||||
onDone(formatInitResult(await initializeWiki(cwd)), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (normalized.startsWith('ingest')) {
|
||||
const pathArg = args.trim().slice('ingest'.length).trim()
|
||||
if (!pathArg) {
|
||||
onDone('Usage: /wiki ingest <local-file-path>', { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
onDone(formatIngestResult(await ingestLocalWikiSource(cwd, pathArg)), {
|
||||
display: 'system',
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
onDone(`Unknown wiki subcommand: ${args.trim()}\n\n${renderHelp()}`, {
|
||||
display: 'system',
|
||||
})
|
||||
}
|
||||
|
||||
export const call: LocalJSXCommandCall = async (
|
||||
onDone,
|
||||
_context,
|
||||
args,
|
||||
): Promise<React.ReactNode> => {
|
||||
await runWikiCommand(onDone, args ?? '')
|
||||
return null
|
||||
}
|
||||
@@ -188,9 +188,9 @@ export function AutoUpdater({
|
||||
✓ Update installed · Restart to apply
|
||||
</Text>}
|
||||
{(autoUpdaterResult?.status === 'install_failed' || autoUpdaterResult?.status === 'no_permissions') && <Text color="error" wrap="truncate">
|
||||
✗ Auto-update failed · Try <Text bold>claude doctor</Text> or{' '}
|
||||
✗ Auto-update failed · Try <Text bold>openclaude doctor</Text> or{' '}
|
||||
<Text bold>
|
||||
{hasLocalInstall ? `cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}` : `npm i -g ${MACRO.PACKAGE_URL}`}
|
||||
{hasLocalInstall ? `cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}` : `npm i -g ${MACRO.PACKAGE_URL}`}
|
||||
</Text>
|
||||
</Text>}
|
||||
</Box>;
|
||||
|
||||
@@ -31,9 +31,11 @@ export function BaseTextInput(t0) {
|
||||
} = t0;
|
||||
const {
|
||||
onInput,
|
||||
value,
|
||||
renderedValue,
|
||||
cursorLine,
|
||||
cursorColumn
|
||||
cursorColumn,
|
||||
offset,
|
||||
} = inputState;
|
||||
const t1 = Boolean(props.focus && props.showCursor && terminalFocus);
|
||||
let t2;
|
||||
@@ -78,7 +80,7 @@ export function BaseTextInput(t0) {
|
||||
renderedPlaceholder
|
||||
} = renderPlaceholder({
|
||||
placeholder: props.placeholder,
|
||||
value: props.value,
|
||||
value,
|
||||
showCursor: props.showCursor,
|
||||
focus: props.focus,
|
||||
terminalFocus,
|
||||
@@ -88,9 +90,9 @@ export function BaseTextInput(t0) {
|
||||
useInput(wrappedOnInput, {
|
||||
isActive: props.focus
|
||||
});
|
||||
const commandWithoutArgs = props.value && props.value.trim().indexOf(" ") === -1 || props.value && props.value.endsWith(" ");
|
||||
const showArgumentHint = Boolean(props.argumentHint && props.value && commandWithoutArgs && props.value.startsWith("/"));
|
||||
const cursorFiltered = props.showCursor && props.highlights ? props.highlights.filter(h => h.dimColor || props.cursorOffset < h.start || props.cursorOffset >= h.end) : props.highlights;
|
||||
const commandWithoutArgs = value && value.trim().indexOf(" ") === -1 || value && value.endsWith(" ");
|
||||
const showArgumentHint = Boolean(props.argumentHint && value && commandWithoutArgs && value.startsWith("/"));
|
||||
const cursorFiltered = props.showCursor && props.highlights ? props.highlights.filter(h => h.dimColor || offset < h.start || offset >= h.end) : props.highlights;
|
||||
const {
|
||||
viewportCharOffset,
|
||||
viewportCharEnd
|
||||
@@ -102,13 +104,13 @@ export function BaseTextInput(t0) {
|
||||
})) : cursorFiltered;
|
||||
const hasHighlights = filteredHighlights && filteredHighlights.length > 0;
|
||||
if (hasHighlights) {
|
||||
return <Box ref={cursorRef}><HighlightedInput text={renderedValue} highlights={filteredHighlights} />{showArgumentHint && <Text dimColor={true}>{props.value?.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>}{children}</Box>;
|
||||
return <Box ref={cursorRef}><HighlightedInput text={renderedValue} highlights={filteredHighlights} />{showArgumentHint && <Text dimColor={true}>{value.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>}{children}</Box>;
|
||||
}
|
||||
const T0 = Box;
|
||||
const T1 = Text;
|
||||
const t4 = "truncate-end";
|
||||
const t5 = showPlaceholder && props.placeholderElement ? props.placeholderElement : showPlaceholder && renderedPlaceholder ? <Ansi>{renderedPlaceholder}</Ansi> : <Ansi>{renderedValue}</Ansi>;
|
||||
const t6 = showArgumentHint && <Text dimColor={true}>{props.value?.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>;
|
||||
const t6 = showArgumentHint && <Text dimColor={true}>{value.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>;
|
||||
let t7;
|
||||
if ($[4] !== T1 || $[5] !== children || $[6] !== props || $[7] !== t5 || $[8] !== t6) {
|
||||
t7 = <T1 wrap={t4} dimColor={props.dimColor}>{t5}{t6}{children}</T1>;
|
||||
|
||||
@@ -103,7 +103,7 @@ test('login picker shows the third-party platform option', async () => {
|
||||
expect(output).toContain('3rd-party platform')
|
||||
})
|
||||
|
||||
test('third-party provider branch opens the provider wizard', async () => {
|
||||
test('third-party provider branch opens the first-run provider manager', async () => {
|
||||
const output = await renderFrame(
|
||||
<ConsoleOAuthFlow
|
||||
initialStatus={{ state: 'platform_setup' }}
|
||||
@@ -111,7 +111,9 @@ test('third-party provider branch opens the provider wizard', async () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(output).toContain('Set up a provider profile')
|
||||
expect(output).toContain('OpenAI-compatible')
|
||||
expect(output).toContain('Set up provider')
|
||||
expect(output).toContain('Anthropic')
|
||||
expect(output).toContain('OpenAI')
|
||||
expect(output).toContain('Ollama')
|
||||
expect(output).toContain('LM Studio')
|
||||
})
|
||||
|
||||
@@ -12,7 +12,7 @@ import { OAuthService } from '../services/oauth/index.js';
|
||||
import { getOauthAccountInfo, validateForceLoginOrg } from '../utils/auth.js';
|
||||
import { logError } from '../utils/log.js';
|
||||
import { getSettings_DEPRECATED } from '../utils/settings/settings.js';
|
||||
import { ProviderWizard } from '../commands/provider/provider.js';
|
||||
import { ProviderManager } from './ProviderManager.js';
|
||||
import { Select } from './CustomSelect/select.js';
|
||||
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js';
|
||||
import { Spinner } from './Spinner.js';
|
||||
@@ -450,16 +450,17 @@ function OAuthStatusMessage({
|
||||
|
||||
case 'platform_setup':
|
||||
return (
|
||||
<ProviderWizard
|
||||
<ProviderManager
|
||||
mode="first-run"
|
||||
onDone={result => {
|
||||
if (!result) {
|
||||
if (!result || result.action !== 'saved' || !result.message) {
|
||||
setOAuthStatus({ state: 'idle' })
|
||||
return
|
||||
}
|
||||
|
||||
setOAuthStatus({
|
||||
state: 'platform_setup_complete',
|
||||
message: result,
|
||||
message: result.message,
|
||||
})
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -285,7 +285,7 @@ export function Select(t0) {
|
||||
onChange,
|
||||
onCancel,
|
||||
onFocus,
|
||||
focusValue: defaultFocusValue
|
||||
defaultFocusValue,
|
||||
};
|
||||
$[7] = defaultFocusValue;
|
||||
$[8] = defaultValue;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { useCallback, useState } from 'react'
|
||||
import { isDeepStrictEqual } from 'util'
|
||||
import { useRegisterOverlay } from '../../context/overlayContext.js'
|
||||
import type { InputEvent } from '../../ink/events/input-event.js'
|
||||
// eslint-disable-next-line custom-rules/prefer-use-keybindings -- raw space/arrow multiselect input
|
||||
@@ -9,6 +8,7 @@ import {
|
||||
normalizeFullWidthSpace,
|
||||
} from '../../utils/stringUtils.js'
|
||||
import type { OptionWithDescription } from './select.js'
|
||||
import { optionsNavigateEqual } from './use-select-navigation.js'
|
||||
import { useSelectNavigation } from './use-select-navigation.js'
|
||||
|
||||
export type UseMultiSelectStateProps<T> = {
|
||||
@@ -174,7 +174,7 @@ export function useMultiSelectState<T>({
|
||||
// and the deleted ui/useMultiSelectState.ts — without this, MCPServerDesktopImportDialog
|
||||
// keeps colliding servers checked after getAllMcpConfigs() resolves.
|
||||
const [lastOptions, setLastOptions] = useState(options)
|
||||
if (options !== lastOptions && !isDeepStrictEqual(options, lastOptions)) {
|
||||
if (options !== lastOptions && !optionsNavigateEqual(options, lastOptions)) {
|
||||
setSelectedValues(defaultValue)
|
||||
setLastOptions(options)
|
||||
}
|
||||
|
||||
@@ -6,10 +6,34 @@ import {
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react'
|
||||
import { isDeepStrictEqual } from 'util'
|
||||
import OptionMap from './option-map.js'
|
||||
import type { OptionWithDescription } from './select.js'
|
||||
|
||||
/**
|
||||
* Compare two option arrays for structural equality on properties that
|
||||
* affect navigation behavior. ReactNode `label` and function `onChange`
|
||||
* are intentionally excluded — they are identity-unstable (new reference
|
||||
* each render) but don't change navigation semantics.
|
||||
*/
|
||||
export function optionsNavigateEqual<T>(
|
||||
a: OptionWithDescription<T>[],
|
||||
b: OptionWithDescription<T>[],
|
||||
): boolean {
|
||||
if (a.length !== b.length) return false
|
||||
for (let i = 0; i < a.length; i++) {
|
||||
const ao = a[i]!
|
||||
const bo = b[i]!
|
||||
if (
|
||||
ao.value !== bo.value ||
|
||||
ao.disabled !== bo.disabled ||
|
||||
ao.type !== bo.type
|
||||
) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type State<T> = {
|
||||
/**
|
||||
* Map where key is option's value and value is option's index.
|
||||
@@ -524,7 +548,7 @@ export function useSelectNavigation<T>({
|
||||
|
||||
const [lastOptions, setLastOptions] = useState(options)
|
||||
|
||||
if (options !== lastOptions && !isDeepStrictEqual(options, lastOptions)) {
|
||||
if (options !== lastOptions && !optionsNavigateEqual(options, lastOptions)) {
|
||||
dispatch({
|
||||
type: 'reset',
|
||||
state: createDefaultState({
|
||||
|
||||
@@ -35,6 +35,11 @@ export type UseSelectStateProps<T> = {
|
||||
*/
|
||||
onFocus?: (value: T) => void
|
||||
|
||||
/**
|
||||
* Initial value to focus when the component mounts.
|
||||
*/
|
||||
defaultFocusValue?: T
|
||||
|
||||
/**
|
||||
* Value to focus
|
||||
*/
|
||||
@@ -131,6 +136,7 @@ export function useSelectState<T>({
|
||||
onChange,
|
||||
onCancel,
|
||||
onFocus,
|
||||
defaultFocusValue,
|
||||
focusValue,
|
||||
}: UseSelectStateProps<T>): SelectState<T> {
|
||||
const [value, setValue] = useState<T | undefined>(defaultValue)
|
||||
@@ -138,7 +144,7 @@ export function useSelectState<T>({
|
||||
const navigation = useSelectNavigation<T>({
|
||||
visibleOptionCount,
|
||||
options,
|
||||
initialFocusValue: undefined,
|
||||
initialFocusValue: defaultFocusValue,
|
||||
onFocus,
|
||||
focusValue,
|
||||
})
|
||||
|
||||
@@ -112,7 +112,7 @@ export function HelpV2(t0) {
|
||||
}
|
||||
tabs.push(t6);
|
||||
if (false && antOnlyCommands.length > 0) {
|
||||
let t7;
|
||||
let t7;
|
||||
if ($[26] !== antOnlyCommands || $[27] !== close || $[28] !== columns || $[29] !== maxHeight) {
|
||||
t7 = <Tab key="internal-only" title="[internal-only]"><Commands commands={antOnlyCommands} maxHeight={maxHeight} columns={columns} title="Browse internal-only commands:" onCancel={close} /></Tab>;
|
||||
$[26] = antOnlyCommands;
|
||||
|
||||
@@ -67,6 +67,7 @@ import { isBilledAsExtraUsage } from '../../utils/extraUsage.js';
|
||||
import { getFastModeUnavailableReason, isFastModeAvailable, isFastModeCooldown, isFastModeEnabled, isFastModeSupportedByModel } from '../../utils/fastMode.js';
|
||||
import { isFullscreenEnvEnabled } from '../../utils/fullscreen.js';
|
||||
import type { PromptInputHelpers } from '../../utils/handlePromptSubmit.js';
|
||||
import { extractDraggedFilePaths } from '../../utils/dragDropPaths.js';
|
||||
import { getImageFromClipboard, PASTE_THRESHOLD } from '../../utils/imagePaste.js';
|
||||
import type { ImageDimensions } from '../../utils/imageResizer.js';
|
||||
import { cacheImagePath, storeImage } from '../../utils/imageStore.js';
|
||||
@@ -251,14 +252,24 @@ function PromptInput({
|
||||
show: false
|
||||
});
|
||||
const [cursorOffset, setCursorOffset] = useState<number>(input.length);
|
||||
// Track the last input value set via internal handlers so we can detect
|
||||
// external input changes (e.g. speech-to-text injection) and move cursor to end.
|
||||
// Track the last input value set via internal handlers so external updates
|
||||
// (for example speech-to-text injection) can still move the cursor to end
|
||||
// without clobbering a pending internal keystroke during render.
|
||||
const lastInternalInputRef = React.useRef(input);
|
||||
if (input !== lastInternalInputRef.current) {
|
||||
// Input changed externally (not through any internal handler) — move cursor to end
|
||||
setCursorOffset(input.length);
|
||||
const lastPropInputRef = React.useRef(input);
|
||||
React.useLayoutEffect(() => {
|
||||
if (input === lastPropInputRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
lastPropInputRef.current = input;
|
||||
if (input === lastInternalInputRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
lastInternalInputRef.current = input;
|
||||
}
|
||||
setCursorOffset(prev => prev === input.length ? prev : input.length);
|
||||
}, [input]);
|
||||
// Wrap onInputChange to track internal changes before they trigger re-render
|
||||
const trackAndSetInput = React.useCallback((value: string) => {
|
||||
lastInternalInputRef.current = value;
|
||||
@@ -1204,6 +1215,22 @@ function PromptInput({
|
||||
// Clean up pasted text - strip ANSI escape codes and normalize line endings and tabs
|
||||
let text = stripAnsi(rawText).replace(/\r/g, '\n').replaceAll('\t', ' ');
|
||||
|
||||
// Detect file paths from drag-and-drop and convert to @mentions.
|
||||
// When files are dragged into the terminal, the terminal sends their
|
||||
// absolute paths via bracketed paste. Image files are handled by the
|
||||
// image paste handler upstream; here we handle non-image files by
|
||||
// converting them to @mentions so they get attached on submit.
|
||||
const draggedPaths = extractDraggedFilePaths(text);
|
||||
if (draggedPaths.length > 0) {
|
||||
const mentions = draggedPaths
|
||||
.map(p => (p.includes(' ') || p.includes(':') ? `@"${p}"` : `@${p}`))
|
||||
.join(' ');
|
||||
// Ensure spacing around the mention(s) relative to existing input
|
||||
const charBefore = input[cursorOffset - 1];
|
||||
const prefix = charBefore && !/\s/.test(charBefore) ? ' ' : '';
|
||||
text = prefix + mentions + ' ';
|
||||
}
|
||||
|
||||
// Match typed/auto-suggest: `!cmd` pasted into empty input enters bash mode.
|
||||
if (input.length === 0) {
|
||||
const pastedMode = getModeFromInput(text);
|
||||
@@ -1245,12 +1272,23 @@ function PromptInput({
|
||||
if (isNonSpacePrintable(input, key)) return ' ' + input;
|
||||
return input;
|
||||
}, []);
|
||||
// Ref mirrors cursorOffset for use in synchronous loops (e.g. multi-image
|
||||
// paste) where React batches state updates and the closure value is stale.
|
||||
const cursorOffsetRef = useRef(cursorOffset);
|
||||
cursorOffsetRef.current = cursorOffset;
|
||||
|
||||
function insertTextAtCursor(text: string) {
|
||||
// Push current state to buffer before inserting
|
||||
pushToBuffer(input, cursorOffset, pastedContents);
|
||||
const newInput = input.slice(0, cursorOffset) + text + input.slice(cursorOffset);
|
||||
// Use refs for input/cursor so back-to-back calls in the same event
|
||||
// (e.g. onImagePaste loop for multiple dragged images) chain correctly
|
||||
// instead of each reading the same stale closure values.
|
||||
const currentInput = lastInternalInputRef.current;
|
||||
const currentOffset = cursorOffsetRef.current;
|
||||
pushToBuffer(currentInput, currentOffset, pastedContents);
|
||||
const newInput = currentInput.slice(0, currentOffset) + text + currentInput.slice(currentOffset);
|
||||
trackAndSetInput(newInput);
|
||||
setCursorOffset(cursorOffset + text.length);
|
||||
const newOffset = currentOffset + text.length;
|
||||
cursorOffsetRef.current = newOffset;
|
||||
setCursorOffset(newOffset);
|
||||
}
|
||||
const doublePressEscFromEmpty = useDoublePress(() => {}, () => onShowMessageSelector());
|
||||
|
||||
@@ -2173,7 +2211,7 @@ function PromptInput({
|
||||
multiline: true,
|
||||
onSubmit,
|
||||
onChange,
|
||||
value: historyMatch ? getValueFromInput(typeof historyMatch === 'string' ? historyMatch : historyMatch.display) : input,
|
||||
value: isSearchingHistory && historyMatch ? getValueFromInput(typeof historyMatch === 'string' ? historyMatch : historyMatch.display) : input,
|
||||
// History navigation is handled via TextInput props (onHistoryUp/onHistoryDown),
|
||||
// NOT via useKeybindings. This allows useTextInput's upOrHistoryUp/downOrHistoryDown
|
||||
// to try cursor movement first and only fall through to history navigation when the
|
||||
|
||||
@@ -123,8 +123,6 @@ const SuggestionItemRow = memo(function SuggestionItemRow({
|
||||
maxColumnWidth ?? stringWidth(item.displayText) + 5,
|
||||
maxNameWidth,
|
||||
)
|
||||
const displayTextColor = isSelected ? 'inverseText' : item.color
|
||||
const shouldDim = !isSelected
|
||||
|
||||
let displayText = item.displayText
|
||||
if (stringWidth(displayText) > displayTextWidth - 2) {
|
||||
@@ -144,21 +142,17 @@ const SuggestionItemRow = memo(function SuggestionItemRow({
|
||||
const truncatedDescription = item.description
|
||||
? truncateToWidth(item.description.replace(/\s+/g, ' '), descriptionWidth)
|
||||
: ''
|
||||
const lineContent = `${paddedDisplayText}${tagText}${truncatedDescription}`
|
||||
|
||||
return (
|
||||
<Box width="100%" opaque={true} backgroundColor={rowBackgroundColor}>
|
||||
<Text wrap="truncate">
|
||||
<Text color={displayTextColor} dimColor={shouldDim} bold={isSelected}>
|
||||
{paddedDisplayText}
|
||||
</Text>
|
||||
{tagText ? (
|
||||
<Text color={textColor} dimColor={!isSelected}>
|
||||
{tagText}
|
||||
</Text>
|
||||
) : null}
|
||||
<Text color={textColor} dimColor={!isSelected}>
|
||||
{truncatedDescription}
|
||||
</Text>
|
||||
<Text
|
||||
color={textColor}
|
||||
dimColor={!isSelected}
|
||||
bold={isSelected}
|
||||
wrap="truncate"
|
||||
>
|
||||
{lineContent}
|
||||
</Text>
|
||||
</Box>
|
||||
)
|
||||
|
||||
437
src/components/ProviderManager.test.tsx
Normal file
437
src/components/ProviderManager.test.tsx
Normal file
@@ -0,0 +1,437 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
const ORIGINAL_ENV = {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
}
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
options?: { timeoutMs?: number; intervalMs?: number },
|
||||
): Promise<void> {
|
||||
const timeoutMs = options?.timeoutMs ?? 2000
|
||||
const intervalMs = options?.intervalMs ?? 10
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
await Bun.sleep(intervalMs)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for ProviderManager test condition')
|
||||
}
|
||||
|
||||
function createDeferred<T>(): {
|
||||
promise: Promise<T>
|
||||
resolve: (value: T) => void
|
||||
} {
|
||||
let resolve!: (value: T) => void
|
||||
const promise = new Promise<T>(r => {
|
||||
resolve = r
|
||||
})
|
||||
return { promise, resolve }
|
||||
}
|
||||
|
||||
function mockProviderProfilesModule(options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
}): void {
|
||||
mock.module('../utils/providerProfiles.js', () => ({
|
||||
addProviderProfile: options?.addProviderProfile ?? (() => null),
|
||||
applyActiveProviderProfileFromConfig: () => {},
|
||||
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
|
||||
getActiveProviderProfile: () => null,
|
||||
getProviderPresetDefaults: (preset: string) =>
|
||||
preset === 'ollama'
|
||||
? {
|
||||
provider: 'openai',
|
||||
name: 'Ollama',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'llama3.1:8b',
|
||||
apiKey: '',
|
||||
}
|
||||
: {
|
||||
provider: 'openai',
|
||||
name: 'Mock provider',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'mock-model',
|
||||
apiKey: '',
|
||||
},
|
||||
getProviderProfiles: () => [],
|
||||
setActiveProviderProfile: () => null,
|
||||
updateProviderProfile: () => null,
|
||||
}))
|
||||
}
|
||||
|
||||
function mockProviderManagerDependencies(
|
||||
syncRead: () => string | undefined,
|
||||
asyncRead: () => Promise<string | undefined>,
|
||||
options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
hasLocalOllama?: () => Promise<boolean>
|
||||
listOllamaModels?: () => Promise<
|
||||
Array<{
|
||||
name: string
|
||||
sizeBytes?: number | null
|
||||
family?: string | null
|
||||
families?: string[]
|
||||
parameterSize?: string | null
|
||||
quantizationLevel?: string | null
|
||||
}>
|
||||
>
|
||||
},
|
||||
): void {
|
||||
mockProviderProfilesModule({ addProviderProfile: options?.addProviderProfile })
|
||||
|
||||
mock.module('../utils/providerDiscovery.js', () => ({
|
||||
hasLocalOllama: options?.hasLocalOllama ?? (async () => false),
|
||||
listOllamaModels: options?.listOllamaModels ?? (async () => []),
|
||||
}))
|
||||
|
||||
mock.module('../utils/githubModelsCredentials.js', () => ({
|
||||
clearGithubModelsToken: () => ({ success: true }),
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER: 'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED',
|
||||
hydrateGithubModelsTokenFromSecureStorage: () => {},
|
||||
readGithubModelsToken: syncRead,
|
||||
readGithubModelsTokenAsync: asyncRead,
|
||||
}))
|
||||
|
||||
mock.module('../utils/settings/settings.js', () => ({
|
||||
updateSettingsForSource: () => ({ error: null }),
|
||||
}))
|
||||
}
|
||||
|
||||
async function waitForFrameOutput(
|
||||
getOutput: () => string,
|
||||
predicate: (output: string) => boolean,
|
||||
timeoutMs = 2500,
|
||||
): Promise<string> {
|
||||
let output = ''
|
||||
|
||||
await waitForCondition(() => {
|
||||
output = stripAnsi(extractLastFrame(getOutput()))
|
||||
return predicate(output)
|
||||
}, { timeoutMs })
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
async function mountProviderManager(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: (result?: unknown) => void
|
||||
}>,
|
||||
options?: {
|
||||
mode?: 'first-run' | 'manage'
|
||||
onDone?: (result?: unknown) => void
|
||||
},
|
||||
): Promise<{
|
||||
stdin: PassThrough
|
||||
getOutput: () => string
|
||||
dispose: () => Promise<void>
|
||||
}> {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<KeybindingSetup>
|
||||
<ProviderManager
|
||||
mode={options?.mode ?? 'manage'}
|
||||
onDone={options?.onDone ?? (() => {})}
|
||||
/>
|
||||
</KeybindingSetup>
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
return {
|
||||
stdin,
|
||||
getOutput,
|
||||
dispose: async () => {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(0)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function renderProviderManagerFrame(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: (result?: unknown) => void
|
||||
}>,
|
||||
options?: {
|
||||
waitForOutput?: (output: string) => boolean
|
||||
timeoutMs?: number
|
||||
mode?: 'first-run' | 'manage'
|
||||
},
|
||||
): Promise<string> {
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: options?.mode,
|
||||
})
|
||||
const output = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => {
|
||||
if (!options?.waitForOutput) {
|
||||
return frame.includes('Provider manager')
|
||||
}
|
||||
return options.waitForOutput(frame)
|
||||
},
|
||||
options?.timeoutMs ?? 2500,
|
||||
)
|
||||
|
||||
await mounted.dispose()
|
||||
return output
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
|
||||
for (const [key, value] of Object.entries(ORIGINAL_ENV)) {
|
||||
if (value === undefined) {
|
||||
delete process.env[key as keyof typeof ORIGINAL_ENV]
|
||||
} else {
|
||||
process.env[key as keyof typeof ORIGINAL_ENV] = value
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
test('ProviderManager resolves GitHub virtual provider from async storage without sync reads in render flow', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const syncRead = mock(() => {
|
||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const asyncRead = mock(async () => 'stored-token')
|
||||
|
||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const output = await renderProviderManagerFrame(ProviderManager, {
|
||||
waitForOutput: frame =>
|
||||
frame.includes('Provider manager') &&
|
||||
frame.includes('GitHub Models') &&
|
||||
frame.includes('token stored'),
|
||||
})
|
||||
|
||||
expect(output).toContain('Provider manager')
|
||||
expect(output).toContain('GitHub Models')
|
||||
expect(output).toContain('token stored')
|
||||
expect(output).not.toContain('No provider profiles configured yet.')
|
||||
|
||||
expect(syncRead).not.toHaveBeenCalled()
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager first-run Ollama preset auto-detects installed models', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const onDone = mock(() => {})
|
||||
const addProviderProfile = mock((payload: {
|
||||
provider: string
|
||||
name: string
|
||||
baseUrl: string
|
||||
model: string
|
||||
apiKey?: string
|
||||
}) => ({
|
||||
id: 'provider_ollama',
|
||||
provider: payload.provider,
|
||||
name: payload.name,
|
||||
baseUrl: payload.baseUrl,
|
||||
model: payload.model,
|
||||
apiKey: payload.apiKey,
|
||||
}))
|
||||
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
addProviderProfile,
|
||||
hasLocalOllama: async () => true,
|
||||
listOllamaModels: async () => [
|
||||
{
|
||||
name: 'gemma4:31b-cloud',
|
||||
family: 'gemma',
|
||||
parameterSize: '31b',
|
||||
},
|
||||
{
|
||||
name: 'kimi-k2.5:cloud',
|
||||
family: 'kimi',
|
||||
parameterSize: '2.5b',
|
||||
},
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: 'first-run',
|
||||
onDone,
|
||||
})
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Set up provider') && frame.includes('Ollama'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(50)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
const modelFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame =>
|
||||
frame.includes('Choose an Ollama model') &&
|
||||
frame.includes('gemma4:31b-cloud') &&
|
||||
frame.includes('kimi-k2.5:cloud'),
|
||||
)
|
||||
|
||||
expect(modelFrame).toContain('Choose an Ollama model')
|
||||
expect(modelFrame).toContain('gemma4:31b-cloud')
|
||||
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => onDone.mock.calls.length > 0)
|
||||
|
||||
expect(addProviderProfile).toHaveBeenCalled()
|
||||
expect(addProviderProfile.mock.calls[0]?.[0]).toMatchObject({
|
||||
name: 'Ollama',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'gemma4:31b-cloud',
|
||||
})
|
||||
expect(onDone).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
action: 'saved',
|
||||
message: 'Provider configured: Ollama',
|
||||
}),
|
||||
)
|
||||
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const syncRead = mock(() => {
|
||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const deferredStoredToken = createDeferred<string | undefined>()
|
||||
const asyncRead = mock(async () => deferredStoredToken.promise)
|
||||
|
||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
|
||||
const firstFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Provider manager'),
|
||||
)
|
||||
|
||||
expect(firstFrame).toContain('Checking GitHub Models credentials...')
|
||||
expect(firstFrame).not.toContain('No provider profiles configured yet.')
|
||||
|
||||
deferredStoredToken.resolve('stored-token')
|
||||
|
||||
const resolvedFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('GitHub Models') && frame.includes('token stored'),
|
||||
)
|
||||
|
||||
expect(resolvedFrame).toContain('GitHub Models')
|
||||
expect(resolvedFrame).toContain('token stored')
|
||||
|
||||
await mounted.dispose()
|
||||
|
||||
expect(syncRead).not.toHaveBeenCalled()
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
@@ -3,8 +3,10 @@ import * as React from 'react'
|
||||
import { Box, Text } from '../ink.js'
|
||||
import { useKeybinding } from '../keybindings/useKeybinding.js'
|
||||
import type { ProviderProfile } from '../utils/config.js'
|
||||
import { hasLocalOllama, listOllamaModels } from '../utils/providerDiscovery.js'
|
||||
import {
|
||||
addProviderProfile,
|
||||
applyActiveProviderProfileFromConfig,
|
||||
deleteProviderProfile,
|
||||
getActiveProviderProfile,
|
||||
getProviderPresetDefaults,
|
||||
@@ -14,7 +16,20 @@ import {
|
||||
type ProviderProfileInput,
|
||||
updateProviderProfile,
|
||||
} from '../utils/providerProfiles.js'
|
||||
import { Select } from './CustomSelect/index.js'
|
||||
import {
|
||||
rankOllamaModels,
|
||||
recommendOllamaModel,
|
||||
} from '../utils/providerRecommendation.js'
|
||||
import {
|
||||
clearGithubModelsToken,
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER,
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
readGithubModelsToken,
|
||||
readGithubModelsTokenAsync,
|
||||
} from '../utils/githubModelsCredentials.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||
import { type OptionWithDescription, Select } from './CustomSelect/index.js'
|
||||
import { Pane } from './design-system/Pane.js'
|
||||
import TextInput from './TextInput.js'
|
||||
|
||||
@@ -32,6 +47,7 @@ type Props = {
|
||||
type Screen =
|
||||
| 'menu'
|
||||
| 'select-preset'
|
||||
| 'select-ollama-model'
|
||||
| 'form'
|
||||
| 'select-active'
|
||||
| 'select-edit'
|
||||
@@ -41,6 +57,16 @@ type DraftField = 'name' | 'baseUrl' | 'model' | 'apiKey'
|
||||
|
||||
type ProviderDraft = Record<DraftField, string>
|
||||
|
||||
type OllamaSelectionState =
|
||||
| { state: 'idle' }
|
||||
| { state: 'loading' }
|
||||
| {
|
||||
state: 'ready'
|
||||
options: OptionWithDescription<string>[]
|
||||
defaultValue?: string
|
||||
}
|
||||
| { state: 'unavailable'; message: string }
|
||||
|
||||
const FORM_STEPS: Array<{
|
||||
key: DraftField
|
||||
label: string
|
||||
@@ -75,6 +101,13 @@ const FORM_STEPS: Array<{
|
||||
},
|
||||
]
|
||||
|
||||
const GITHUB_PROVIDER_ID = '__github_models__'
|
||||
const GITHUB_PROVIDER_LABEL = 'GitHub Models'
|
||||
const GITHUB_PROVIDER_DEFAULT_MODEL = 'github:copilot'
|
||||
const GITHUB_PROVIDER_DEFAULT_BASE_URL = 'https://models.github.ai/inference'
|
||||
|
||||
type GithubCredentialSource = 'stored' | 'env' | 'none'
|
||||
|
||||
function toDraft(profile: ProviderProfile): ProviderDraft {
|
||||
return {
|
||||
name: profile.name,
|
||||
@@ -102,11 +135,83 @@ function profileSummary(profile: ProviderProfile, isActive: boolean): string {
|
||||
return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}`
|
||||
}
|
||||
|
||||
function getGithubCredentialSourceFromEnv(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): GithubCredentialSource {
|
||||
if (processEnv.GITHUB_TOKEN?.trim() || processEnv.GH_TOKEN?.trim()) {
|
||||
return 'env'
|
||||
}
|
||||
return 'none'
|
||||
}
|
||||
|
||||
async function resolveGithubCredentialSource(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): Promise<GithubCredentialSource> {
|
||||
const envSource = getGithubCredentialSourceFromEnv(processEnv)
|
||||
if (envSource !== 'none') {
|
||||
return envSource
|
||||
}
|
||||
|
||||
if (await readGithubModelsTokenAsync()) {
|
||||
return 'stored'
|
||||
}
|
||||
|
||||
return 'none'
|
||||
}
|
||||
|
||||
function isGithubProviderAvailable(
|
||||
credentialSource: GithubCredentialSource,
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): boolean {
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return true
|
||||
}
|
||||
return credentialSource !== 'none'
|
||||
}
|
||||
|
||||
function getGithubProviderModel(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return processEnv.OPENAI_MODEL?.trim() || GITHUB_PROVIDER_DEFAULT_MODEL
|
||||
}
|
||||
return GITHUB_PROVIDER_DEFAULT_MODEL
|
||||
}
|
||||
|
||||
function getGithubProviderSummary(
|
||||
isActive: boolean,
|
||||
credentialSource: GithubCredentialSource,
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
const credentialSummary =
|
||||
credentialSource === 'stored'
|
||||
? 'token stored'
|
||||
: credentialSource === 'env'
|
||||
? 'token via env'
|
||||
: 'no token found'
|
||||
const activeSuffix = isActive ? ' (active)' : ''
|
||||
return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}`
|
||||
}
|
||||
|
||||
export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const initialGithubCredentialSource = getGithubCredentialSourceFromEnv()
|
||||
const initialIsGithubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const initialHasGithubCredential = initialGithubCredentialSource !== 'none'
|
||||
|
||||
const [profiles, setProfiles] = React.useState(() => getProviderProfiles())
|
||||
const [activeProfileId, setActiveProfileId] = React.useState(
|
||||
() => getActiveProviderProfile()?.id,
|
||||
)
|
||||
const [githubProviderAvailable, setGithubProviderAvailable] = React.useState(
|
||||
() => isGithubProviderAvailable(initialGithubCredentialSource),
|
||||
)
|
||||
const [githubCredentialSource, setGithubCredentialSource] = React.useState<GithubCredentialSource>(
|
||||
() => initialGithubCredentialSource,
|
||||
)
|
||||
const [isGithubActive, setIsGithubActive] = React.useState(() => initialIsGithubActive)
|
||||
const [isGithubCredentialSourceResolved, setIsGithubCredentialSourceResolved] =
|
||||
React.useState(() => initialHasGithubCredential || initialIsGithubActive)
|
||||
const githubRefreshEpochRef = React.useRef(0)
|
||||
const [screen, setScreen] = React.useState<Screen>(
|
||||
mode === 'first-run' ? 'select-preset' : 'menu',
|
||||
)
|
||||
@@ -121,21 +226,216 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const [statusMessage, setStatusMessage] = React.useState<string | undefined>()
|
||||
const [errorMessage, setErrorMessage] = React.useState<string | undefined>()
|
||||
const [ollamaSelection, setOllamaSelection] = React.useState<OllamaSelectionState>({
|
||||
state: 'idle',
|
||||
})
|
||||
|
||||
const currentStep = FORM_STEPS[formStepIndex] ?? FORM_STEPS[0]
|
||||
const currentStepKey = currentStep.key
|
||||
const currentValue = draft[currentStepKey]
|
||||
|
||||
const refreshGithubProviderState = React.useCallback((): void => {
|
||||
const envCredentialSource = getGithubCredentialSourceFromEnv()
|
||||
const githubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const canResolveFromEnv = githubActive || envCredentialSource !== 'none'
|
||||
|
||||
if (canResolveFromEnv) {
|
||||
githubRefreshEpochRef.current += 1
|
||||
setGithubCredentialSource(envCredentialSource)
|
||||
setGithubProviderAvailable(isGithubProviderAvailable(envCredentialSource))
|
||||
setIsGithubActive(githubActive)
|
||||
setIsGithubCredentialSourceResolved(true)
|
||||
return
|
||||
}
|
||||
|
||||
setIsGithubCredentialSourceResolved(false)
|
||||
const refreshEpoch = ++githubRefreshEpochRef.current
|
||||
void (async () => {
|
||||
const credentialSource = await resolveGithubCredentialSource()
|
||||
if (refreshEpoch !== githubRefreshEpochRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
setGithubCredentialSource(credentialSource)
|
||||
setGithubProviderAvailable(isGithubProviderAvailable(credentialSource))
|
||||
setIsGithubActive(isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB))
|
||||
setIsGithubCredentialSourceResolved(true)
|
||||
})()
|
||||
}, [])
|
||||
|
||||
React.useEffect(() => {
|
||||
refreshGithubProviderState()
|
||||
|
||||
return () => {
|
||||
githubRefreshEpochRef.current += 1
|
||||
}
|
||||
}, [refreshGithubProviderState])
|
||||
|
||||
function refreshProfiles(): void {
|
||||
const nextProfiles = getProviderProfiles()
|
||||
setProfiles(nextProfiles)
|
||||
setActiveProfileId(getActiveProviderProfile()?.id)
|
||||
refreshGithubProviderState()
|
||||
}
|
||||
|
||||
function clearStartupProviderOverrideFromUserSettings(): string | null {
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_OPENAI: undefined as any,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined as any,
|
||||
CLAUDE_CODE_USE_GITHUB: undefined as any,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined as any,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined as any,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
|
||||
},
|
||||
})
|
||||
return error ? error.message : null
|
||||
}
|
||||
|
||||
function closeWithCancelled(message: string): void {
|
||||
onDone({ action: 'cancelled', message })
|
||||
}
|
||||
|
||||
function activateGithubProvider(): string | null {
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: GITHUB_PROVIDER_DEFAULT_MODEL,
|
||||
OPENAI_API_KEY: undefined as any,
|
||||
OPENAI_ORG: undefined as any,
|
||||
OPENAI_PROJECT: undefined as any,
|
||||
OPENAI_ORGANIZATION: undefined as any,
|
||||
OPENAI_BASE_URL: undefined as any,
|
||||
OPENAI_API_BASE: undefined as any,
|
||||
CLAUDE_CODE_USE_OPENAI: undefined as any,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined as any,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined as any,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined as any,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
|
||||
},
|
||||
})
|
||||
if (error) {
|
||||
return error.message
|
||||
}
|
||||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = GITHUB_PROVIDER_DEFAULT_MODEL
|
||||
delete process.env.OPENAI_API_KEY
|
||||
delete process.env.OPENAI_ORG
|
||||
delete process.env.OPENAI_PROJECT
|
||||
delete process.env.OPENAI_ORGANIZATION
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
return null
|
||||
}
|
||||
|
||||
function deleteGithubProvider(): string | null {
|
||||
const storedTokenBeforeClear = readGithubModelsToken()?.trim()
|
||||
const cleared = clearGithubModelsToken()
|
||||
if (!cleared.success) {
|
||||
return cleared.warning ?? 'Could not clear GitHub credentials.'
|
||||
}
|
||||
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_GITHUB: undefined as any,
|
||||
OPENAI_MODEL: undefined as any,
|
||||
OPENAI_BASE_URL: undefined as any,
|
||||
OPENAI_API_BASE: undefined as any,
|
||||
},
|
||||
})
|
||||
if (error) {
|
||||
return error.message
|
||||
}
|
||||
|
||||
const hydratedTokenInSession = process.env.GITHUB_TOKEN?.trim()
|
||||
if (
|
||||
process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER] === '1' &&
|
||||
hydratedTokenInSession &&
|
||||
(!storedTokenBeforeClear || hydratedTokenInSession === storedTokenBeforeClear)
|
||||
) {
|
||||
delete process.env.GITHUB_TOKEN
|
||||
}
|
||||
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.OPENAI_API_KEY
|
||||
delete process.env.OPENAI_ORG
|
||||
delete process.env.OPENAI_PROJECT
|
||||
delete process.env.OPENAI_ORGANIZATION
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
|
||||
// Restore active provider profile immediately when one exists.
|
||||
applyActiveProviderProfileFromConfig()
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
React.useEffect(() => {
|
||||
if (screen !== 'select-ollama-model') {
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
|
||||
void (async () => {
|
||||
const available = await hasLocalOllama(draft.baseUrl)
|
||||
if (!available) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Could not reach Ollama. Start Ollama first, or enter the endpoint manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels(draft.baseUrl)
|
||||
if (models.length === 0) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const ranked = rankOllamaModels(models, 'balanced')
|
||||
const recommended = recommendOllamaModel(models, 'balanced')
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'ready',
|
||||
defaultValue: recommended?.name ?? ranked[0]?.name,
|
||||
options: ranked.map(model => ({
|
||||
label: model.name,
|
||||
value: model.name,
|
||||
description: model.summary,
|
||||
})),
|
||||
})
|
||||
}
|
||||
})()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [draft.baseUrl, screen])
|
||||
|
||||
function startCreateFromPreset(preset: ProviderPreset): void {
|
||||
const defaults = getProviderPresetDefaults(preset)
|
||||
const nextDraft = {
|
||||
@@ -150,6 +450,13 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setFormStepIndex(0)
|
||||
setCursorOffset(nextDraft.name.length)
|
||||
setErrorMessage(undefined)
|
||||
|
||||
if (preset === 'ollama') {
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
setScreen('select-ollama-model')
|
||||
return
|
||||
}
|
||||
|
||||
setScreen('form')
|
||||
}
|
||||
|
||||
@@ -169,13 +476,13 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('form')
|
||||
}
|
||||
|
||||
function persistDraft(): void {
|
||||
function persistDraft(nextDraft: ProviderDraft = draft): void {
|
||||
const payload: ProviderProfileInput = {
|
||||
provider: draftProvider,
|
||||
name: draft.name,
|
||||
baseUrl: draft.baseUrl,
|
||||
model: draft.model,
|
||||
apiKey: draft.apiKey,
|
||||
name: nextDraft.name,
|
||||
baseUrl: nextDraft.baseUrl,
|
||||
model: nextDraft.model,
|
||||
apiKey: nextDraft.apiKey,
|
||||
}
|
||||
|
||||
const saved = editingProfileId
|
||||
@@ -187,11 +494,20 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return
|
||||
}
|
||||
|
||||
const isActiveSavedProfile = getActiveProviderProfile()?.id === saved.id
|
||||
const settingsOverrideError = isActiveSavedProfile
|
||||
? clearStartupProviderOverrideFromUserSettings()
|
||||
: null
|
||||
|
||||
refreshProfiles()
|
||||
setStatusMessage(
|
||||
const successMessage =
|
||||
editingProfileId
|
||||
? `Updated provider: ${saved.name}`
|
||||
: `Added provider: ${saved.name} (now active)`,
|
||||
: `Added provider: ${saved.name} (now active)`
|
||||
setStatusMessage(
|
||||
settingsOverrideError
|
||||
? `${successMessage}. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: successMessage,
|
||||
)
|
||||
|
||||
if (mode === 'first-run') {
|
||||
@@ -209,6 +525,83 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('menu')
|
||||
}
|
||||
|
||||
function renderOllamaSelection(): React.ReactNode {
|
||||
if (ollamaSelection.state === 'loading' || ollamaSelection.state === 'idle') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Checking Ollama
|
||||
</Text>
|
||||
<Text dimColor>Looking for installed Ollama models...</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
if (ollamaSelection.state === 'unavailable') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Ollama setup
|
||||
</Text>
|
||||
<Text dimColor>{ollamaSelection.message}</Text>
|
||||
<Select
|
||||
options={[
|
||||
{
|
||||
value: 'manual',
|
||||
label: 'Enter manually',
|
||||
description: 'Fill in the base URL and model yourself',
|
||||
},
|
||||
{
|
||||
value: 'back',
|
||||
label: 'Back',
|
||||
description: 'Choose another provider preset',
|
||||
},
|
||||
]}
|
||||
onChange={value => {
|
||||
if (value === 'manual') {
|
||||
setFormStepIndex(0)
|
||||
setCursorOffset(draft.name.length)
|
||||
setScreen('form')
|
||||
return
|
||||
}
|
||||
setScreen('select-preset')
|
||||
}}
|
||||
onCancel={() => setScreen('select-preset')}
|
||||
visibleOptionCount={2}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Choose an Ollama model
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
Pick one of the installed Ollama models to save into a local provider
|
||||
profile.
|
||||
</Text>
|
||||
<Select
|
||||
options={ollamaSelection.options}
|
||||
defaultValue={ollamaSelection.defaultValue}
|
||||
defaultFocusValue={ollamaSelection.defaultValue}
|
||||
inlineDescriptions
|
||||
visibleOptionCount={Math.min(8, ollamaSelection.options.length)}
|
||||
onChange={value => {
|
||||
const nextDraft = {
|
||||
...draft,
|
||||
model: value,
|
||||
}
|
||||
setDraft(nextDraft)
|
||||
persistDraft(nextDraft)
|
||||
}}
|
||||
onCancel={() => setScreen('select-preset')}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function handleFormSubmit(value: string): void {
|
||||
const trimmed = value.trim()
|
||||
|
||||
@@ -233,7 +626,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return
|
||||
}
|
||||
|
||||
persistDraft()
|
||||
persistDraft(nextDraft)
|
||||
}
|
||||
|
||||
function handleBackFromForm(): void {
|
||||
@@ -413,6 +806,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
|
||||
function renderMenu(): React.ReactNode {
|
||||
const hasProfiles = profiles.length > 0
|
||||
const hasSelectableProviders = hasProfiles || githubProviderAvailable
|
||||
|
||||
const options = [
|
||||
{
|
||||
@@ -424,7 +818,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
value: 'activate',
|
||||
label: 'Set active provider',
|
||||
description: 'Switch the active provider profile',
|
||||
disabled: !hasProfiles,
|
||||
disabled: !hasSelectableProviders,
|
||||
},
|
||||
{
|
||||
value: 'edit',
|
||||
@@ -436,7 +830,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
value: 'delete',
|
||||
label: 'Delete provider',
|
||||
description: 'Remove a provider profile',
|
||||
disabled: !hasProfiles,
|
||||
disabled: !hasSelectableProviders,
|
||||
},
|
||||
{
|
||||
value: 'done',
|
||||
@@ -455,14 +849,29 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
</Text>
|
||||
{statusMessage && <Text>{statusMessage}</Text>}
|
||||
<Box flexDirection="column">
|
||||
{profiles.length === 0 ? (
|
||||
<Text dimColor>No provider profiles configured yet.</Text>
|
||||
{profiles.length === 0 && !githubProviderAvailable ? (
|
||||
isGithubCredentialSourceResolved ? (
|
||||
<Text dimColor>No provider profiles configured yet.</Text>
|
||||
) : (
|
||||
<Text dimColor>Checking GitHub Models credentials...</Text>
|
||||
)
|
||||
) : (
|
||||
profiles.map(profile => (
|
||||
<Text key={profile.id} dimColor>
|
||||
- {profile.name}: {profileSummary(profile, profile.id === activeProfileId)}
|
||||
</Text>
|
||||
))
|
||||
<>
|
||||
{profiles.map(profile => (
|
||||
<Text key={profile.id} dimColor>
|
||||
- {profile.name}: {profileSummary(profile, profile.id === activeProfileId)}
|
||||
</Text>
|
||||
))}
|
||||
{githubProviderAvailable ? (
|
||||
<Text dimColor>
|
||||
- {GITHUB_PROVIDER_LABEL}:{' '}
|
||||
{getGithubProviderSummary(
|
||||
isGithubActive,
|
||||
githubCredentialSource,
|
||||
)}
|
||||
</Text>
|
||||
) : null}
|
||||
</>
|
||||
)}
|
||||
</Box>
|
||||
<Select
|
||||
@@ -474,7 +883,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('select-preset')
|
||||
break
|
||||
case 'activate':
|
||||
if (profiles.length > 0) {
|
||||
if (hasSelectableProviders) {
|
||||
setScreen('select-active')
|
||||
}
|
||||
break
|
||||
@@ -484,7 +893,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
}
|
||||
break
|
||||
case 'delete':
|
||||
if (profiles.length > 0) {
|
||||
if (hasSelectableProviders) {
|
||||
setScreen('select-delete')
|
||||
}
|
||||
break
|
||||
@@ -504,8 +913,29 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
title: string,
|
||||
emptyMessage: string,
|
||||
onSelect: (profileId: string) => void,
|
||||
options?: { includeGithub?: boolean },
|
||||
): React.ReactNode {
|
||||
if (profiles.length === 0) {
|
||||
const includeGithub = options?.includeGithub ?? false
|
||||
const selectOptions = profiles.map(profile => ({
|
||||
value: profile.id,
|
||||
label:
|
||||
profile.id === activeProfileId
|
||||
? `${profile.name} (active)`
|
||||
: profile.name,
|
||||
description: `${profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible'} · ${profile.baseUrl} · ${profile.model}`,
|
||||
}))
|
||||
|
||||
if (includeGithub && githubProviderAvailable) {
|
||||
selectOptions.push({
|
||||
value: GITHUB_PROVIDER_ID,
|
||||
label: isGithubActive
|
||||
? `${GITHUB_PROVIDER_LABEL} (active)`
|
||||
: GITHUB_PROVIDER_LABEL,
|
||||
description: `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel()}`,
|
||||
})
|
||||
}
|
||||
|
||||
if (selectOptions.length === 0) {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
@@ -528,25 +958,16 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
)
|
||||
}
|
||||
|
||||
const options = profiles.map(profile => ({
|
||||
value: profile.id,
|
||||
label:
|
||||
profile.id === activeProfileId
|
||||
? `${profile.name} (active)`
|
||||
: profile.name,
|
||||
description: `${profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible'} · ${profile.baseUrl} · ${profile.model}`,
|
||||
}))
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
{title}
|
||||
</Text>
|
||||
<Select
|
||||
options={options}
|
||||
options={selectOptions}
|
||||
onChange={onSelect}
|
||||
onCancel={() => setScreen('menu')}
|
||||
visibleOptionCount={Math.min(10, Math.max(2, options.length))}
|
||||
visibleOptionCount={Math.min(10, Math.max(2, selectOptions.length))}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
@@ -554,28 +975,51 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
|
||||
let content: React.ReactNode
|
||||
|
||||
switch (screen) {
|
||||
case 'select-preset':
|
||||
content = renderPresetSelection()
|
||||
break
|
||||
case 'form':
|
||||
content = renderForm()
|
||||
break
|
||||
switch (screen) {
|
||||
case 'select-preset':
|
||||
content = renderPresetSelection()
|
||||
break
|
||||
case 'select-ollama-model':
|
||||
content = renderOllamaSelection()
|
||||
break
|
||||
case 'form':
|
||||
content = renderForm()
|
||||
break
|
||||
case 'select-active':
|
||||
content = renderProfileSelection(
|
||||
'Set active provider',
|
||||
'No providers available. Add one first.',
|
||||
profileId => {
|
||||
if (profileId === GITHUB_PROVIDER_ID) {
|
||||
const githubError = activateGithubProvider()
|
||||
if (githubError) {
|
||||
setErrorMessage(`Could not activate GitHub provider: ${githubError}`)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
refreshProfiles()
|
||||
setStatusMessage(`Active provider: ${GITHUB_PROVIDER_LABEL}`)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
const active = setActiveProviderProfile(profileId)
|
||||
if (!active) {
|
||||
setErrorMessage('Could not change active provider.')
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
const settingsOverrideError =
|
||||
clearStartupProviderOverrideFromUserSettings()
|
||||
refreshProfiles()
|
||||
setStatusMessage(`Active provider: ${active.name}`)
|
||||
setStatusMessage(
|
||||
settingsOverrideError
|
||||
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: `Active provider: ${active.name}`,
|
||||
)
|
||||
setScreen('menu')
|
||||
},
|
||||
{ includeGithub: true },
|
||||
)
|
||||
break
|
||||
case 'select-edit':
|
||||
@@ -592,15 +1036,35 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
'Delete provider',
|
||||
'No providers available. Add one first.',
|
||||
profileId => {
|
||||
if (profileId === GITHUB_PROVIDER_ID) {
|
||||
const githubDeleteError = deleteGithubProvider()
|
||||
if (githubDeleteError) {
|
||||
setErrorMessage(`Could not delete GitHub provider: ${githubDeleteError}`)
|
||||
} else {
|
||||
refreshProfiles()
|
||||
setStatusMessage('GitHub provider deleted')
|
||||
}
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
const result = deleteProviderProfile(profileId)
|
||||
if (!result.removed) {
|
||||
setErrorMessage('Could not delete provider.')
|
||||
} else {
|
||||
const settingsOverrideError = result.activeProfileId
|
||||
? clearStartupProviderOverrideFromUserSettings()
|
||||
: null
|
||||
refreshProfiles()
|
||||
setStatusMessage('Provider deleted')
|
||||
setStatusMessage(
|
||||
settingsOverrideError
|
||||
? `Provider deleted. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: 'Provider deleted',
|
||||
)
|
||||
}
|
||||
setScreen('menu')
|
||||
},
|
||||
{ includeGithub: true },
|
||||
)
|
||||
break
|
||||
case 'menu':
|
||||
|
||||
@@ -5,6 +5,11 @@
|
||||
* Addresses: https://github.com/Gitlawb/openclaude/issues/55
|
||||
*/
|
||||
|
||||
import { isLocalProviderUrl } from '../services/api/providerConfig.js'
|
||||
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
|
||||
import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
|
||||
import { parseUserSpecifiedModel } from '../utils/model/model.js'
|
||||
|
||||
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
|
||||
|
||||
const ESC = '\x1b['
|
||||
@@ -82,6 +87,7 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
||||
const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true'
|
||||
const useMistral = process.env.CLAUDE_CODE_USE_MISTRAL === '1' || process.env.CLAUDE_CODE_USE_MISTRAL === 'true'
|
||||
|
||||
if (useGemini) {
|
||||
const model = process.env.GEMINI_MODEL || 'gemini-2.0-flash'
|
||||
@@ -89,17 +95,23 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
return { name: 'Google Gemini', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useMistral) {
|
||||
const model = process.env.MISTRAL_MODEL || 'devstral-latest'
|
||||
const baseUrl = process.env.MISTRAL_BASE_URL || 'https://api.mistral.ai/v1'
|
||||
return { name: 'Mistral', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useGithub) {
|
||||
const model = process.env.OPENAI_MODEL || 'github:copilot'
|
||||
const baseUrl =
|
||||
process.env.OPENAI_BASE_URL || 'https://models.github.ai/inference'
|
||||
return { name: 'GitHub Models', model, baseUrl, isLocal: false }
|
||||
process.env.OPENAI_BASE_URL || 'https://api.githubcopilot.com'
|
||||
return { name: 'GitHub Copilot', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useOpenAI) {
|
||||
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
|
||||
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
|
||||
const isLocal = isLocalProviderUrl(baseUrl)
|
||||
let name = 'OpenAI'
|
||||
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
|
||||
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
|
||||
@@ -107,10 +119,8 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
else if (/groq/i.test(baseUrl)) name = 'Groq'
|
||||
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
|
||||
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
|
||||
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
|
||||
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
|
||||
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
|
||||
else if (isLocal) name = 'Local'
|
||||
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
|
||||
// Resolve model alias to actual model name + reasoning effort
|
||||
let displayModel = rawModel
|
||||
@@ -138,9 +148,11 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
return { name, model: displayModel, baseUrl, isLocal }
|
||||
}
|
||||
|
||||
// Default: Anthropic
|
||||
const model = process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
|
||||
return { name: 'Anthropic', model, baseUrl: 'https://api.anthropic.com', isLocal: false }
|
||||
// Default: Anthropic - check settings.model first, then env vars
|
||||
const settings = getSettings_DEPRECATED() || {}
|
||||
const modelSetting = settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
|
||||
const resolvedModel = parseUserSpecifiedModel(modelSetting)
|
||||
return { name: 'Anthropic', model: resolvedModel, baseUrl: 'https://api.anthropic.com', isLocal: false }
|
||||
}
|
||||
|
||||
// ─── Box drawing ──────────────────────────────────────────────────────────────
|
||||
|
||||
231
src/components/TextInput.test.tsx
Normal file
231
src/components/TextInput.test.tsx
Normal file
@@ -0,0 +1,231 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { expect, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import TextInput from './TextInput.js'
|
||||
import VimTextInput from './VimTextInput.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
function DelayedControlledTextInput(): React.ReactNode {
|
||||
const [value, setValue] = React.useState('')
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const valueTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
const offsetTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
React.useEffect(() => {
|
||||
return () => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
}
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<AppStateProvider>
|
||||
<TextInput
|
||||
value={value}
|
||||
onChange={nextValue => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
valueTimerRef.current = setTimeout(() => {
|
||||
setValue(nextValue)
|
||||
}, 200)
|
||||
}}
|
||||
onSubmit={() => {}}
|
||||
placeholder="Type here..."
|
||||
columns={60}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={nextOffset => {
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
offsetTimerRef.current = setTimeout(() => {
|
||||
setCursorOffset(nextOffset)
|
||||
}, 200)
|
||||
}}
|
||||
focus
|
||||
showCursor
|
||||
multiline
|
||||
/>
|
||||
</AppStateProvider>
|
||||
)
|
||||
}
|
||||
|
||||
function DelayedControlledVimTextInput(): React.ReactNode {
|
||||
const [value, setValue] = React.useState('')
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const valueTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
const offsetTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
React.useEffect(() => {
|
||||
return () => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
}
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<AppStateProvider>
|
||||
<VimTextInput
|
||||
value={value}
|
||||
onChange={nextValue => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
valueTimerRef.current = setTimeout(() => {
|
||||
setValue(nextValue)
|
||||
}, 200)
|
||||
}}
|
||||
onSubmit={() => {}}
|
||||
placeholder="Type here..."
|
||||
columns={60}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={nextOffset => {
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
offsetTimerRef.current = setTimeout(() => {
|
||||
setCursorOffset(nextOffset)
|
||||
}, 200)
|
||||
}}
|
||||
initialMode="INSERT"
|
||||
focus
|
||||
showCursor
|
||||
multiline
|
||||
/>
|
||||
</AppStateProvider>
|
||||
)
|
||||
}
|
||||
|
||||
test('TextInput renders typed characters before delayed parent value commits', async () => {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(<DelayedControlledTextInput />)
|
||||
|
||||
await Bun.sleep(50)
|
||||
stdin.write('a')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('b')
|
||||
await Bun.sleep(25)
|
||||
|
||||
const output = stripAnsi(extractLastFrame(getOutput()))
|
||||
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
|
||||
expect(output).toContain('ab')
|
||||
expect(output).not.toContain('Type here...')
|
||||
})
|
||||
|
||||
test('VimTextInput preserves rapid typed characters before delayed parent value commits', async () => {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(<DelayedControlledVimTextInput />)
|
||||
|
||||
await Bun.sleep(50)
|
||||
stdin.write('a')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('s')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('d')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('f')
|
||||
await Bun.sleep(25)
|
||||
|
||||
const output = stripAnsi(extractLastFrame(getOutput()))
|
||||
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
|
||||
expect(output).toContain('asdf')
|
||||
expect(output).not.toContain('Type here...')
|
||||
})
|
||||
161
src/components/ThemePicker.test.tsx
Normal file
161
src/components/ThemePicker.test.tsx
Normal file
@@ -0,0 +1,161 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot, Text, useTheme } from '../ink.js'
|
||||
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import { ThemeProvider } from './design-system/ThemeProvider.js'
|
||||
|
||||
mock.module('./StructuredDiff.js', () => ({
|
||||
StructuredDiff: function StructuredDiffPreview(): React.ReactNode {
|
||||
const [theme] = useTheme()
|
||||
return <Text>{`Preview theme: ${theme}`}</Text>
|
||||
},
|
||||
}))
|
||||
|
||||
mock.module('./StructuredDiff/colorDiff.js', () => ({
|
||||
getColorModuleUnavailableReason: () => 'env',
|
||||
getSyntaxTheme: () => null,
|
||||
}))
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
timeoutMs = 2000,
|
||||
): Promise<void> {
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for ThemePicker test condition')
|
||||
}
|
||||
|
||||
async function waitForFrame(
|
||||
getOutput: () => string,
|
||||
predicate: (frame: string) => boolean,
|
||||
): Promise<string> {
|
||||
let frame = ''
|
||||
|
||||
await waitForCondition(() => {
|
||||
frame = stripAnsi(extractLastFrame(getOutput()))
|
||||
return predicate(frame)
|
||||
})
|
||||
|
||||
return frame
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
test('updates the preview when keyboard focus moves to another theme', async () => {
|
||||
const { ThemePicker } = await import('./ThemePicker.js')
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<KeybindingSetup>
|
||||
<ThemeProvider initialState="dark">
|
||||
<ThemePicker onThemeSelect={() => {}} />
|
||||
</ThemeProvider>
|
||||
</KeybindingSetup>
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
try {
|
||||
const initialFrame = await waitForFrame(
|
||||
getOutput,
|
||||
frame => frame.includes('Preview theme: dark'),
|
||||
)
|
||||
expect(initialFrame).toContain('Preview theme: dark')
|
||||
|
||||
stdin.write('j')
|
||||
|
||||
const updatedFrame = await waitForFrame(
|
||||
getOutput,
|
||||
frame => frame.includes('Preview theme: light'),
|
||||
)
|
||||
expect(updatedFrame).toContain('Preview theme: light')
|
||||
} finally {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(0)
|
||||
}
|
||||
})
|
||||
@@ -1,13 +1,14 @@
|
||||
import { c as _c } from "react-compiler-runtime";
|
||||
import { feature } from 'bun:bundle';
|
||||
import type { StructuredPatchHunk } from 'diff';
|
||||
import * as React from 'react';
|
||||
import { useExitOnCtrlCDWithKeybindings } from '../hooks/useExitOnCtrlCDWithKeybindings.js';
|
||||
import { useExitOnCtrlCDWithKeybindings } from '../hooks/useExitOnCtrlCDWithKeybindings.js'
|
||||
import { useTerminalSize } from '../hooks/useTerminalSize.js';
|
||||
import { Box, Text, usePreviewTheme, useTheme, useThemeSetting } from '../ink.js';
|
||||
import { useRegisterKeybindingContext } from '../keybindings/KeybindingContext.js';
|
||||
import { useKeybinding } from '../keybindings/useKeybinding.js';
|
||||
import { useShortcutDisplay } from '../keybindings/useShortcutDisplay.js';
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js';
|
||||
import type { AppState } from '../state/AppStateStore.js';
|
||||
import { gracefulShutdown } from '../utils/gracefulShutdown.js';
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js';
|
||||
import type { ThemeSetting } from '../utils/theme.js';
|
||||
@@ -16,6 +17,17 @@ import { Byline } from './design-system/Byline.js';
|
||||
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js';
|
||||
import { getColorModuleUnavailableReason, getSyntaxTheme } from './StructuredDiff/colorDiff.js';
|
||||
import { StructuredDiff } from './StructuredDiff.js';
|
||||
|
||||
type StructuredDiffComponent = React.ComponentType<{
|
||||
patch: StructuredPatchHunk
|
||||
dim: boolean
|
||||
filePath: string
|
||||
firstLine: string | null
|
||||
width: number
|
||||
skipHighlighting?: boolean
|
||||
}>
|
||||
const StructuredDiffView = StructuredDiff as StructuredDiffComponent
|
||||
|
||||
export type ThemePickerProps = {
|
||||
onThemeSelect: (setting: ThemeSetting) => void;
|
||||
showIntroText?: boolean;
|
||||
@@ -26,307 +38,224 @@ export type ThemePickerProps = {
|
||||
skipExitHandling?: boolean;
|
||||
/** Called when the user cancels (presses Escape). If skipExitHandling is true and this is provided, it will be called instead of just saving the preview. */
|
||||
onCancel?: () => void;
|
||||
};
|
||||
export function ThemePicker(t0) {
|
||||
const $ = _c(59);
|
||||
const {
|
||||
onThemeSelect,
|
||||
showIntroText: t1,
|
||||
helpText: t2,
|
||||
showHelpTextBelow: t3,
|
||||
hideEscToCancel: t4,
|
||||
skipExitHandling: t5,
|
||||
onCancel: onCancelProp
|
||||
} = t0;
|
||||
const showIntroText = t1 === undefined ? false : t1;
|
||||
const helpText = t2 === undefined ? "" : t2;
|
||||
const showHelpTextBelow = t3 === undefined ? false : t3;
|
||||
const hideEscToCancel = t4 === undefined ? false : t4;
|
||||
const skipExitHandling = t5 === undefined ? false : t5;
|
||||
}
|
||||
|
||||
const DEMO_PATCH: StructuredPatchHunk = {
|
||||
oldStart: 1,
|
||||
newStart: 1,
|
||||
oldLines: 3,
|
||||
newLines: 3,
|
||||
lines: [
|
||||
' function greet() {',
|
||||
'- console.log("Hello, World!");',
|
||||
'+ console.log("Hello, Claude!");',
|
||||
' }',
|
||||
],
|
||||
}
|
||||
|
||||
/**
|
||||
* Theme chooser with live preview. Implemented without react-compiler `_c` memo
|
||||
* caches so preview/subtree reconciliation cannot stick on stale element refs when
|
||||
* `setPreviewTheme` updates the resolved palette.
|
||||
*/
|
||||
export function ThemePicker({
|
||||
onThemeSelect,
|
||||
showIntroText = false,
|
||||
helpText = '',
|
||||
showHelpTextBelow = false,
|
||||
hideEscToCancel = false,
|
||||
skipExitHandling = false,
|
||||
onCancel: onCancelProp,
|
||||
}: ThemePickerProps) {
|
||||
const [theme] = useTheme();
|
||||
const themeSetting = useThemeSetting();
|
||||
const {
|
||||
columns
|
||||
} = useTerminalSize();
|
||||
let t6;
|
||||
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t6 = getColorModuleUnavailableReason();
|
||||
$[0] = t6;
|
||||
} else {
|
||||
t6 = $[0];
|
||||
}
|
||||
const colorModuleUnavailableReason = t6;
|
||||
let t7;
|
||||
if ($[1] !== theme) {
|
||||
t7 = colorModuleUnavailableReason === null ? getSyntaxTheme(theme) : null;
|
||||
$[1] = theme;
|
||||
$[2] = t7;
|
||||
} else {
|
||||
t7 = $[2];
|
||||
}
|
||||
const syntaxTheme = t7;
|
||||
const {
|
||||
setPreviewTheme,
|
||||
savePreview,
|
||||
cancelPreview
|
||||
} = usePreviewTheme();
|
||||
const syntaxHighlightingDisabled = useAppState(_temp) ?? false;
|
||||
const { columns } = useTerminalSize();
|
||||
const colorModuleUnavailableReason = React.useMemo(
|
||||
() => getColorModuleUnavailableReason(),
|
||||
[],
|
||||
)
|
||||
const syntaxTheme =
|
||||
colorModuleUnavailableReason === null ? getSyntaxTheme(theme) : null
|
||||
const { setPreviewTheme, savePreview, cancelPreview } = usePreviewTheme()
|
||||
const syntaxHighlightingDisabled = useAppState(
|
||||
(s: AppState) => s.settings.syntaxHighlightingDisabled ?? false
|
||||
);
|
||||
const setAppState = useSetAppState();
|
||||
useRegisterKeybindingContext("ThemePicker");
|
||||
useRegisterKeybindingContext("ThemePicker", true);
|
||||
const syntaxToggleShortcut = useShortcutDisplay("theme:toggleSyntaxHighlighting", "ThemePicker", "ctrl+t");
|
||||
let t8;
|
||||
if ($[3] !== setAppState || $[4] !== syntaxHighlightingDisabled) {
|
||||
t8 = () => {
|
||||
if (colorModuleUnavailableReason === null) {
|
||||
const newValue = !syntaxHighlightingDisabled;
|
||||
updateSettingsForSource("userSettings", {
|
||||
|
||||
const toggleSyntax = React.useCallback(() => {
|
||||
if (colorModuleUnavailableReason === null) {
|
||||
const newValue = !syntaxHighlightingDisabled
|
||||
updateSettingsForSource("userSettings", {
|
||||
syntaxHighlightingDisabled: newValue
|
||||
});
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
settings: {
|
||||
...prev.settings,
|
||||
syntaxHighlightingDisabled: newValue
|
||||
});
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
settings: {
|
||||
...prev.settings,
|
||||
syntaxHighlightingDisabled: newValue
|
||||
}
|
||||
}));
|
||||
}
|
||||
};
|
||||
$[3] = setAppState;
|
||||
$[4] = syntaxHighlightingDisabled;
|
||||
$[5] = t8;
|
||||
} else {
|
||||
t8 = $[5];
|
||||
}
|
||||
let t9;
|
||||
if ($[6] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t9 = {
|
||||
context: "ThemePicker"
|
||||
};
|
||||
$[6] = t9;
|
||||
} else {
|
||||
t9 = $[6];
|
||||
}
|
||||
useKeybinding("theme:toggleSyntaxHighlighting", t8, t9);
|
||||
const exitState = useExitOnCtrlCDWithKeybindings(skipExitHandling ? _temp2 : undefined);
|
||||
let t10;
|
||||
if ($[7] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t10 = [...(feature("AUTO_THEME") ? [{
|
||||
label: "Auto (match terminal)",
|
||||
value: "auto" as const
|
||||
}] : []), {
|
||||
label: "Dark mode",
|
||||
value: "dark"
|
||||
}, {
|
||||
label: "Light mode",
|
||||
value: "light"
|
||||
}, {
|
||||
label: "Dark mode (colorblind-friendly)",
|
||||
value: "dark-daltonized"
|
||||
}, {
|
||||
label: "Light mode (colorblind-friendly)",
|
||||
value: "light-daltonized"
|
||||
}, {
|
||||
label: "Dark mode (ANSI colors only)",
|
||||
value: "dark-ansi"
|
||||
}, {
|
||||
label: "Light mode (ANSI colors only)",
|
||||
value: "light-ansi"
|
||||
}];
|
||||
$[7] = t10;
|
||||
} else {
|
||||
t10 = $[7];
|
||||
}
|
||||
const themeOptions = t10;
|
||||
let t11;
|
||||
if ($[8] !== showIntroText) {
|
||||
t11 = showIntroText ? <Text>Let's get started.</Text> : <Text bold={true} color="permission">Theme</Text>;
|
||||
$[8] = showIntroText;
|
||||
$[9] = t11;
|
||||
} else {
|
||||
t11 = $[9];
|
||||
}
|
||||
let t12;
|
||||
if ($[10] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t12 = <Text bold={true}>Choose the text style that looks best with your terminal</Text>;
|
||||
$[10] = t12;
|
||||
} else {
|
||||
t12 = $[10];
|
||||
}
|
||||
let t13;
|
||||
if ($[11] !== helpText || $[12] !== showHelpTextBelow) {
|
||||
t13 = helpText && !showHelpTextBelow && <Text dimColor={true}>{helpText}</Text>;
|
||||
$[11] = helpText;
|
||||
$[12] = showHelpTextBelow;
|
||||
$[13] = t13;
|
||||
} else {
|
||||
t13 = $[13];
|
||||
}
|
||||
let t14;
|
||||
if ($[14] !== t13) {
|
||||
t14 = <Box flexDirection="column">{t12}{t13}</Box>;
|
||||
$[14] = t13;
|
||||
$[15] = t14;
|
||||
} else {
|
||||
t14 = $[15];
|
||||
}
|
||||
let t15;
|
||||
if ($[16] !== setPreviewTheme) {
|
||||
t15 = setting => {
|
||||
setPreviewTheme(setting as ThemeSetting);
|
||||
};
|
||||
$[16] = setPreviewTheme;
|
||||
$[17] = t15;
|
||||
} else {
|
||||
t15 = $[17];
|
||||
}
|
||||
let t16;
|
||||
if ($[18] !== onThemeSelect || $[19] !== savePreview) {
|
||||
t16 = setting_0 => {
|
||||
savePreview();
|
||||
onThemeSelect(setting_0 as ThemeSetting);
|
||||
};
|
||||
$[18] = onThemeSelect;
|
||||
$[19] = savePreview;
|
||||
$[20] = t16;
|
||||
} else {
|
||||
t16 = $[20];
|
||||
}
|
||||
let t17;
|
||||
if ($[21] !== cancelPreview || $[22] !== onCancelProp || $[23] !== skipExitHandling) {
|
||||
t17 = skipExitHandling ? () => {
|
||||
cancelPreview();
|
||||
onCancelProp?.();
|
||||
} : async () => {
|
||||
cancelPreview();
|
||||
await gracefulShutdown(0);
|
||||
};
|
||||
$[21] = cancelPreview;
|
||||
$[22] = onCancelProp;
|
||||
$[23] = skipExitHandling;
|
||||
$[24] = t17;
|
||||
} else {
|
||||
t17 = $[24];
|
||||
}
|
||||
let t18;
|
||||
if ($[25] !== t15 || $[26] !== t16 || $[27] !== t17 || $[28] !== themeSetting) {
|
||||
t18 = <Select options={themeOptions} onFocus={t15} onChange={t16} onCancel={t17} visibleOptionCount={themeOptions.length} defaultValue={themeSetting} defaultFocusValue={themeSetting} />;
|
||||
$[25] = t15;
|
||||
$[26] = t16;
|
||||
$[27] = t17;
|
||||
$[28] = themeSetting;
|
||||
$[29] = t18;
|
||||
} else {
|
||||
t18 = $[29];
|
||||
}
|
||||
let t19;
|
||||
if ($[30] !== t11 || $[31] !== t14 || $[32] !== t18) {
|
||||
t19 = <Box flexDirection="column" gap={1}>{t11}{t14}{t18}</Box>;
|
||||
$[30] = t11;
|
||||
$[31] = t14;
|
||||
$[32] = t18;
|
||||
$[33] = t19;
|
||||
} else {
|
||||
t19 = $[33];
|
||||
}
|
||||
let t20;
|
||||
if ($[34] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t20 = {
|
||||
oldStart: 1,
|
||||
newStart: 1,
|
||||
oldLines: 3,
|
||||
newLines: 3,
|
||||
lines: [" function greet() {", "- console.log(\"Hello, World!\");", "+ console.log(\"Hello, Claude!\");", " }"]
|
||||
};
|
||||
$[34] = t20;
|
||||
} else {
|
||||
t20 = $[34];
|
||||
}
|
||||
let t21;
|
||||
if ($[35] !== columns) {
|
||||
t21 = <Box flexDirection="column" borderTop={true} borderBottom={true} borderLeft={false} borderRight={false} borderStyle="dashed" borderColor="subtle"><StructuredDiff patch={t20} dim={false} filePath="demo.js" firstLine={null} width={columns} /></Box>;
|
||||
$[35] = columns;
|
||||
$[36] = t21;
|
||||
} else {
|
||||
t21 = $[36];
|
||||
}
|
||||
const t22 = colorModuleUnavailableReason === "env" ? `Syntax highlighting disabled (via CLAUDE_CODE_SYNTAX_HIGHLIGHT=${process.env.CLAUDE_CODE_SYNTAX_HIGHLIGHT})` : syntaxHighlightingDisabled ? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)` : syntaxTheme ? `Syntax theme: ${syntaxTheme.theme}${syntaxTheme.source ? ` (from ${syntaxTheme.source})` : ""} (${syntaxToggleShortcut} to disable)` : `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`;
|
||||
let t23;
|
||||
if ($[37] !== t22) {
|
||||
t23 = <Text dimColor={true}>{" "}{t22}</Text>;
|
||||
$[37] = t22;
|
||||
$[38] = t23;
|
||||
} else {
|
||||
t23 = $[38];
|
||||
}
|
||||
let t24;
|
||||
if ($[39] !== t21 || $[40] !== t23) {
|
||||
t24 = <Box flexDirection="column" width="100%">{t21}{t23}</Box>;
|
||||
$[39] = t21;
|
||||
$[40] = t23;
|
||||
$[41] = t24;
|
||||
} else {
|
||||
t24 = $[41];
|
||||
}
|
||||
let t25;
|
||||
if ($[42] !== t19 || $[43] !== t24) {
|
||||
t25 = <Box flexDirection="column" gap={1}>{t19}{t24}</Box>;
|
||||
$[42] = t19;
|
||||
$[43] = t24;
|
||||
$[44] = t25;
|
||||
} else {
|
||||
t25 = $[44];
|
||||
}
|
||||
const content = t25;
|
||||
}
|
||||
}));
|
||||
}
|
||||
}, [
|
||||
colorModuleUnavailableReason,
|
||||
syntaxHighlightingDisabled,
|
||||
setAppState,
|
||||
])
|
||||
|
||||
useKeybinding("theme:toggleSyntaxHighlighting", toggleSyntax, {
|
||||
context: "ThemePicker",
|
||||
})
|
||||
|
||||
const exitState = useExitOnCtrlCDWithKeybindings(
|
||||
skipExitHandling ? () => {} : undefined,
|
||||
)
|
||||
|
||||
const themeOptions = React.useMemo(
|
||||
() => [
|
||||
...(feature("AUTO_THEME")
|
||||
? [{ label: "Auto (match terminal)", value: "auto" as const }]
|
||||
: []), {
|
||||
label: "Dark mode",
|
||||
value: "dark" as const
|
||||
}, {
|
||||
label: "Light mode",
|
||||
value: "light" as const
|
||||
}, {
|
||||
label: "Dark mode (colorblind-friendly)",
|
||||
value: "dark-daltonized" as const,
|
||||
}, {
|
||||
label: "Light mode (colorblind-friendly)",
|
||||
value: "light-daltonized" as const,
|
||||
}, {
|
||||
label: "Dark mode (ANSI colors only)",
|
||||
value: "dark-ansi" as const
|
||||
}, {
|
||||
label: "Light mode (ANSI colors only)",
|
||||
value: "light-ansi" as const
|
||||
},],
|
||||
[],
|
||||
)
|
||||
|
||||
const handleRowFocus = React.useCallback(
|
||||
(setting: ThemeSetting) => {
|
||||
setPreviewTheme(setting)
|
||||
},
|
||||
[setPreviewTheme],
|
||||
)
|
||||
|
||||
const handleSelect = React.useCallback(
|
||||
(setting: ThemeSetting) => {
|
||||
savePreview()
|
||||
onThemeSelect(setting)
|
||||
},
|
||||
[savePreview, onThemeSelect],
|
||||
)
|
||||
|
||||
const handleCancel = React.useCallback(() => {
|
||||
cancelPreview()
|
||||
if (skipExitHandling) {
|
||||
onCancelProp?.()
|
||||
} else {
|
||||
void gracefulShutdown(0)
|
||||
}
|
||||
}, [cancelPreview, onCancelProp, skipExitHandling])
|
||||
|
||||
const syntaxHint =
|
||||
colorModuleUnavailableReason === 'env'
|
||||
? `Syntax highlighting disabled (via CLAUDE_CODE_SYNTAX_HIGHLIGHT=${process.env.CLAUDE_CODE_SYNTAX_HIGHLIGHT})`
|
||||
: syntaxHighlightingDisabled
|
||||
? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
|
||||
: syntaxTheme
|
||||
? `Syntax theme: ${syntaxTheme.theme}${syntaxTheme.source ? ` (from ${syntaxTheme.source})` : ''} (${syntaxToggleShortcut} to disable)`
|
||||
: `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
|
||||
|
||||
const header = showIntroText ? (
|
||||
<Text>{"Let's get started."}</Text>
|
||||
) : (
|
||||
<Text bold color="permission">
|
||||
Theme
|
||||
</Text>
|
||||
)
|
||||
|
||||
const introBlock = (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>Choose the text style that looks best with your terminal</Text>
|
||||
{helpText && !showHelpTextBelow ? (
|
||||
<Text dimColor>{helpText}</Text>
|
||||
) : null}
|
||||
</Box>
|
||||
)
|
||||
|
||||
const content = (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Box flexDirection="column" gap={1}>
|
||||
{header}
|
||||
{introBlock}
|
||||
<Select
|
||||
options={themeOptions}
|
||||
onFocus={handleRowFocus}
|
||||
onChange={handleSelect}
|
||||
onCancel={handleCancel}
|
||||
visibleOptionCount={themeOptions.length}
|
||||
defaultValue={themeSetting}
|
||||
defaultFocusValue={themeSetting}
|
||||
/>
|
||||
</Box>
|
||||
<Box flexDirection="column" width="100%">
|
||||
<Box
|
||||
key={theme}
|
||||
flexDirection="column"
|
||||
borderTop
|
||||
borderBottom
|
||||
borderLeft={false}
|
||||
borderRight={false}
|
||||
borderStyle="dashed"
|
||||
borderColor="subtle"
|
||||
>
|
||||
<StructuredDiffView
|
||||
patch={DEMO_PATCH}
|
||||
dim={false}
|
||||
filePath="demo.js"
|
||||
firstLine={null}
|
||||
width={columns}
|
||||
/>
|
||||
</Box>
|
||||
<Text dimColor>
|
||||
{' '}
|
||||
{syntaxHint}
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
)
|
||||
|
||||
if (!showIntroText) {
|
||||
let t26;
|
||||
if ($[45] !== content) {
|
||||
t26 = <Box flexDirection="column">{content}</Box>;
|
||||
$[45] = content;
|
||||
$[46] = t26;
|
||||
} else {
|
||||
t26 = $[46];
|
||||
}
|
||||
let t27;
|
||||
if ($[47] !== helpText || $[48] !== showHelpTextBelow) {
|
||||
t27 = showHelpTextBelow && helpText && <Box marginLeft={3}><Text dimColor={true}>{helpText}</Text></Box>;
|
||||
$[47] = helpText;
|
||||
$[48] = showHelpTextBelow;
|
||||
$[49] = t27;
|
||||
} else {
|
||||
t27 = $[49];
|
||||
}
|
||||
let t28;
|
||||
if ($[50] !== exitState || $[51] !== hideEscToCancel) {
|
||||
t28 = !hideEscToCancel && <Box><Text dimColor={true} italic={true}>{exitState.pending ? <>Press {exitState.keyName} again to exit</> : <Byline><KeyboardShortcutHint shortcut="Enter" action="select" /><KeyboardShortcutHint shortcut="Esc" action="cancel" /></Byline>}</Text></Box>;
|
||||
$[50] = exitState;
|
||||
$[51] = hideEscToCancel;
|
||||
$[52] = t28;
|
||||
} else {
|
||||
t28 = $[52];
|
||||
}
|
||||
let t29;
|
||||
if ($[53] !== t27 || $[54] !== t28) {
|
||||
t29 = <Box marginTop={1}>{t27}{t28}</Box>;
|
||||
$[53] = t27;
|
||||
$[54] = t28;
|
||||
$[55] = t29;
|
||||
} else {
|
||||
t29 = $[55];
|
||||
}
|
||||
let t30;
|
||||
if ($[56] !== t26 || $[57] !== t29) {
|
||||
t30 = <>{t26}{t29}</>;
|
||||
$[56] = t26;
|
||||
$[57] = t29;
|
||||
$[58] = t30;
|
||||
} else {
|
||||
t30 = $[58];
|
||||
}
|
||||
return t30;
|
||||
return (
|
||||
<>
|
||||
<Box flexDirection="column">{content}</Box>
|
||||
{showHelpTextBelow && helpText ? (
|
||||
<Box marginLeft={3}>
|
||||
<Text dimColor>{helpText}</Text>
|
||||
</Box>
|
||||
) : null}
|
||||
{!hideEscToCancel ? (
|
||||
<Box marginTop={1}>
|
||||
<Text dimColor italic>
|
||||
{exitState.pending ? (
|
||||
<>Press {exitState.keyName} again to exit</>
|
||||
) : (
|
||||
<Byline>
|
||||
<KeyboardShortcutHint shortcut="Enter" action="select" />
|
||||
<KeyboardShortcutHint shortcut="Esc" action="cancel" />
|
||||
</Byline>
|
||||
)}
|
||||
</Text>
|
||||
</Box>
|
||||
) : null}
|
||||
</>
|
||||
)
|
||||
}
|
||||
return content;
|
||||
}
|
||||
function _temp2() {}
|
||||
function _temp(s) {
|
||||
return s.settings.syntaxHighlightingDisabled;
|
||||
|
||||
return content
|
||||
}
|
||||
|
||||
@@ -68,11 +68,11 @@ When a user describes what they want an agent to do, you will:
|
||||
assistant: "Now let me use the test-runner agent to run the tests"
|
||||
</example>
|
||||
- <example>
|
||||
Context: User is creating an agent to respond to the word "hello" with a friendly jok.
|
||||
user: "Hello"
|
||||
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the greeting-responder agent to respond with a friendly joke"
|
||||
Context: User is creating an agent for Claude Code product questions.
|
||||
user: "How do I configure Claude Code hooks?"
|
||||
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the claude-code-guide agent to answer the question"
|
||||
<commentary>
|
||||
Since the user is greeting, use the greeting-responder agent to respond with a friendly joke.
|
||||
Since the user is asking how to use Claude Code, use the claude-code-guide agent.
|
||||
</commentary>
|
||||
</example>
|
||||
- If the user mentioned or implied that the agent should be used proactively, you should include examples of this.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { c as _c } from "react-compiler-runtime";
|
||||
import { feature } from 'bun:bundle';
|
||||
import React, { createContext, useContext, useEffect, useMemo, useState } from 'react';
|
||||
import useStdin from '../../ink/hooks/use-stdin.js';
|
||||
@@ -120,21 +119,8 @@ export function ThemeProvider({
|
||||
* accepts any ThemeSetting (including 'auto').
|
||||
*/
|
||||
export function useTheme() {
|
||||
const $ = _c(3);
|
||||
const {
|
||||
currentTheme,
|
||||
setThemeSetting
|
||||
} = useContext(ThemeContext);
|
||||
let t0;
|
||||
if ($[0] !== currentTheme || $[1] !== setThemeSetting) {
|
||||
t0 = [currentTheme, setThemeSetting];
|
||||
$[0] = currentTheme;
|
||||
$[1] = setThemeSetting;
|
||||
$[2] = t0;
|
||||
} else {
|
||||
t0 = $[2];
|
||||
}
|
||||
return t0;
|
||||
const { currentTheme, setThemeSetting } = useContext(ThemeContext);
|
||||
return [currentTheme, setThemeSetting] as const;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -145,25 +131,10 @@ export function useThemeSetting() {
|
||||
return useContext(ThemeContext).themeSetting;
|
||||
}
|
||||
export function usePreviewTheme() {
|
||||
const $ = _c(4);
|
||||
const {
|
||||
const { setPreviewTheme, savePreview, cancelPreview } = useContext(ThemeContext);
|
||||
return {
|
||||
setPreviewTheme,
|
||||
savePreview,
|
||||
cancelPreview
|
||||
} = useContext(ThemeContext);
|
||||
let t0;
|
||||
if ($[0] !== cancelPreview || $[1] !== savePreview || $[2] !== setPreviewTheme) {
|
||||
t0 = {
|
||||
setPreviewTheme,
|
||||
savePreview,
|
||||
cancelPreview
|
||||
};
|
||||
$[0] = cancelPreview;
|
||||
$[1] = savePreview;
|
||||
$[2] = setPreviewTheme;
|
||||
$[3] = t0;
|
||||
} else {
|
||||
t0 = $[3];
|
||||
}
|
||||
return t0;
|
||||
cancelPreview,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ export function optionForPermissionSaveDestination(saveDestination: EditableSett
|
||||
case 'userSettings':
|
||||
return {
|
||||
label: 'User settings',
|
||||
description: `Saved in at ~/.claude/settings.json`,
|
||||
description: `Saved in ~/.openclaude/settings.json`,
|
||||
value: saveDestination
|
||||
};
|
||||
}
|
||||
|
||||
@@ -33,14 +33,14 @@ export const IMAGE_TARGET_RAW_SIZE = (API_IMAGE_MAX_BASE64_SIZE * 3) / 4 // 3.75
|
||||
*
|
||||
* Note: The API internally resizes images larger than 1568px (source:
|
||||
* encoding/full_encoding.py), but this is handled server-side and doesn't
|
||||
* cause errors. These client-side limits (2000px) are slightly larger to
|
||||
* cause errors. These client-side limits (1568px) are slightly larger to
|
||||
* preserve quality when beneficial.
|
||||
*
|
||||
* The API_IMAGE_MAX_BASE64_SIZE (5MB) is the actual hard limit that causes
|
||||
* API errors if exceeded.
|
||||
*/
|
||||
export const IMAGE_MAX_WIDTH = 2000
|
||||
export const IMAGE_MAX_HEIGHT = 2000
|
||||
export const IMAGE_MAX_WIDTH = 1568
|
||||
export const IMAGE_MAX_HEIGHT = 1568
|
||||
|
||||
// =============================================================================
|
||||
// PDF LIMITS
|
||||
|
||||
@@ -2,8 +2,11 @@ import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { getSystemPrompt, DEFAULT_AGENT_PROMPT } from './prompts.js'
|
||||
import { CLI_SYSPROMPT_PREFIXES, getCLISyspromptPrefix } from './system.js'
|
||||
import { CLAUDE_CODE_GUIDE_AGENT } from '../tools/AgentTool/built-in/claudeCodeGuideAgent.js'
|
||||
import { GENERAL_PURPOSE_AGENT } from '../tools/AgentTool/built-in/generalPurposeAgent.js'
|
||||
import { EXPLORE_AGENT } from '../tools/AgentTool/built-in/exploreAgent.js'
|
||||
import { PLAN_AGENT } from '../tools/AgentTool/built-in/planAgent.js'
|
||||
import { STATUSLINE_SETUP_AGENT } from '../tools/AgentTool/built-in/statuslineSetup.js'
|
||||
|
||||
const originalSimpleEnv = process.env.CLAUDE_CODE_SIMPLE
|
||||
|
||||
@@ -13,10 +16,12 @@ afterEach(() => {
|
||||
|
||||
test('CLI identity prefixes describe OpenClaude instead of Claude Code', () => {
|
||||
expect(getCLISyspromptPrefix()).toContain('OpenClaude')
|
||||
expect(getCLISyspromptPrefix()).not.toContain('Claude Code')
|
||||
expect(getCLISyspromptPrefix()).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
for (const prefix of CLI_SYSPROMPT_PREFIXES) {
|
||||
expect(prefix).toContain('OpenClaude')
|
||||
expect(prefix).not.toContain('Claude Code')
|
||||
expect(prefix).not.toContain("Anthropic's official CLI for Claude")
|
||||
}
|
||||
})
|
||||
@@ -27,22 +32,53 @@ test('simple mode identity describes OpenClaude instead of Claude Code', async (
|
||||
const prompt = await getSystemPrompt([], 'gpt-4o')
|
||||
|
||||
expect(prompt[0]).toContain('OpenClaude')
|
||||
expect(prompt[0]).not.toContain('Claude Code')
|
||||
expect(prompt[0]).not.toContain("Anthropic's official CLI for Claude")
|
||||
})
|
||||
|
||||
test('built-in agent prompts describe OpenClaude instead of Claude Code', () => {
|
||||
expect(DEFAULT_AGENT_PROMPT).toContain('OpenClaude')
|
||||
expect(DEFAULT_AGENT_PROMPT).not.toContain('Claude Code')
|
||||
expect(DEFAULT_AGENT_PROMPT).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const generalPrompt = GENERAL_PURPOSE_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(generalPrompt).toContain('OpenClaude')
|
||||
expect(generalPrompt).not.toContain('Claude Code')
|
||||
expect(generalPrompt).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const explorePrompt = EXPLORE_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(explorePrompt).toContain('OpenClaude')
|
||||
expect(explorePrompt).not.toContain('Claude Code')
|
||||
expect(explorePrompt).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const planPrompt = PLAN_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(planPrompt).toContain('OpenClaude')
|
||||
expect(planPrompt).not.toContain('Claude Code')
|
||||
|
||||
const statuslinePrompt = STATUSLINE_SETUP_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(statuslinePrompt).toContain('OpenClaude')
|
||||
expect(statuslinePrompt).not.toContain('Claude Code')
|
||||
|
||||
const guidePrompt = CLAUDE_CODE_GUIDE_AGENT.getSystemPrompt({
|
||||
toolUseContext: {
|
||||
options: {
|
||||
commands: [],
|
||||
agentDefinitions: { activeAgents: [] },
|
||||
mcpClients: [],
|
||||
} as never,
|
||||
},
|
||||
})
|
||||
expect(guidePrompt).toContain('OpenClaude')
|
||||
expect(guidePrompt).toContain('You are the OpenClaude guide agent.')
|
||||
expect(guidePrompt).toContain('**OpenClaude** (the CLI tool)')
|
||||
expect(guidePrompt).not.toContain('You are the Claude guide agent.')
|
||||
expect(guidePrompt).not.toContain('**Claude Code** (the CLI tool)')
|
||||
})
|
||||
|
||||
@@ -214,7 +214,7 @@ function getSimpleDoingTasksSection(): string {
|
||||
]
|
||||
|
||||
const userHelpSubitems = [
|
||||
`/help: Get help with using Claude Code`,
|
||||
`/help: Get help with using OpenClaude`,
|
||||
`To give feedback, users should ${MACRO.ISSUES_EXPLAINER}`,
|
||||
]
|
||||
|
||||
@@ -242,7 +242,7 @@ function getSimpleDoingTasksSection(): string {
|
||||
: []),
|
||||
...(process.env.USER_TYPE === 'ant'
|
||||
? [
|
||||
`If the user reports a bug, slowness, or unexpected behavior with Claude Code itself (as opposed to asking you to fix their own code), recommend the appropriate slash command: /issue for model-related problems (odd outputs, wrong tool choices, hallucinations, refusals), or /share to upload the full session transcript for product bugs, crashes, slowness, or general issues. Only recommend these when the user is describing a problem with Claude Code.`,
|
||||
`If the user reports a bug, slowness, or unexpected behavior with OpenClaude itself (as opposed to asking you to fix their own code), recommend the appropriate slash command: /issue for model-related problems (odd outputs, wrong tool choices, hallucinations, refusals), or /share to upload the full session transcript for product bugs, crashes, slowness, or general issues. Only recommend these when the user is describing a problem with OpenClaude.`,
|
||||
]
|
||||
: []),
|
||||
`If the user asks for help or wants to give feedback inform them of the following:`,
|
||||
@@ -449,7 +449,7 @@ export async function getSystemPrompt(
|
||||
): Promise<string[]> {
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_SIMPLE)) {
|
||||
return [
|
||||
`You are OpenClaude, an open-source fork of Claude Code.\n\nCWD: ${getCwd()}\nDate: ${getSessionStartDate()}`,
|
||||
`You are OpenClaude, an open-source coding agent and CLI.\n\nCWD: ${getCwd()}\nDate: ${getSessionStartDate()}`,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -696,10 +696,10 @@ export async function computeSimpleEnvInfo(
|
||||
: `The most recent Claude model family is Claude 4.5/4.6. Model IDs — Opus 4.6: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.opus}', Sonnet 4.6: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.sonnet}', Haiku 4.5: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.haiku}'. When building AI applications, default to the latest and most capable Claude models.`,
|
||||
process.env.USER_TYPE === 'ant' && isUndercover()
|
||||
? null
|
||||
: `Claude Code is available as a CLI in the terminal, desktop app (Mac/Windows), web app (claude.ai/code), and IDE extensions (VS Code, JetBrains).`,
|
||||
: `OpenClaude is available as a CLI in the terminal and can be used across local development environments and IDE workflows.`,
|
||||
process.env.USER_TYPE === 'ant' && isUndercover()
|
||||
? null
|
||||
: `Fast mode for Claude Code uses the same ${FRONTIER_MODEL_NAME} model with faster output. It does NOT switch to a different model. It can be toggled with /fast.`,
|
||||
: `Fast mode for OpenClaude uses the same ${FRONTIER_MODEL_NAME} model with faster output. It does NOT switch to a different model. It can be toggled with /fast.`,
|
||||
].filter(item => item !== null)
|
||||
|
||||
return [
|
||||
@@ -755,7 +755,7 @@ export function getUnameSR(): string {
|
||||
return `${osType()} ${osRelease()}`
|
||||
}
|
||||
|
||||
export const DEFAULT_AGENT_PROMPT = `You are an agent for OpenClaude, an open-source fork of Claude Code. Given the user's message, you should use the tools available to complete the task. Complete the task fully—don't gold-plate, but don't leave it half-done. When you complete the task, respond with a concise report covering what was done and any key findings — the caller will relay this to the user, so it only needs the essentials.`
|
||||
export const DEFAULT_AGENT_PROMPT = `You are an agent for OpenClaude, an open-source coding agent and CLI. Given the user's message, you should use the tools available to complete the task. Complete the task fully—don't gold-plate, but don't leave it half-done. When you complete the task, respond with a concise report covering what was done and any key findings — the caller will relay this to the user, so it only needs the essentials.`
|
||||
|
||||
export async function enhanceSystemPromptWithEnvDetails(
|
||||
existingSystemPrompt: string[],
|
||||
|
||||
@@ -8,11 +8,11 @@ import { getAPIProvider } from '../utils/model/providers.js'
|
||||
import { getWorkload } from '../utils/workloadContext.js'
|
||||
|
||||
const DEFAULT_PREFIX =
|
||||
`You are OpenClaude, an open-source fork of Claude Code.`
|
||||
`You are OpenClaude, an open-source coding agent and CLI.`
|
||||
const AGENT_SDK_CLAUDE_CODE_PRESET_PREFIX =
|
||||
`You are OpenClaude, an open-source fork of Claude Code, running within the Claude Agent SDK.`
|
||||
`You are OpenClaude, an open-source coding agent and CLI running within the Claude Agent SDK.`
|
||||
const AGENT_SDK_PREFIX =
|
||||
`You are a Claude agent running in OpenClaude, built on the Claude Agent SDK.`
|
||||
`You are OpenClaude, built on the Claude Agent SDK.`
|
||||
|
||||
const CLI_SYSPROMPT_PREFIX_VALUES = [
|
||||
DEFAULT_PREFIX,
|
||||
|
||||
@@ -181,7 +181,7 @@ function formatCost(cost: number, maxDecimalPlaces: number = 4): string {
|
||||
function formatModelUsage(): string {
|
||||
const modelUsageMap = getModelUsage()
|
||||
if (Object.keys(modelUsageMap).length === 0) {
|
||||
return 'Usage: 0 input, 0 output, 0 cache read, 0 cache write'
|
||||
return 'Usage: 0 input, 0 output'
|
||||
}
|
||||
|
||||
// Accumulate usage by short name
|
||||
@@ -211,15 +211,19 @@ function formatModelUsage(): string {
|
||||
|
||||
let result = 'Usage by model:'
|
||||
for (const [shortName, usage] of Object.entries(usageByShortName)) {
|
||||
const usageString =
|
||||
let usageString =
|
||||
` ${formatNumber(usage.inputTokens)} input, ` +
|
||||
`${formatNumber(usage.outputTokens)} output, ` +
|
||||
`${formatNumber(usage.cacheReadInputTokens)} cache read, ` +
|
||||
`${formatNumber(usage.cacheCreationInputTokens)} cache write` +
|
||||
(usage.webSearchRequests > 0
|
||||
? `, ${formatNumber(usage.webSearchRequests)} web search`
|
||||
: '') +
|
||||
` (${formatCost(usage.costUSD)})`
|
||||
`${formatNumber(usage.outputTokens)} output`
|
||||
if (usage.cacheReadInputTokens > 0) {
|
||||
usageString += `, ${formatNumber(usage.cacheReadInputTokens)} cache read`
|
||||
}
|
||||
if (usage.cacheCreationInputTokens > 0) {
|
||||
usageString += `, ${formatNumber(usage.cacheCreationInputTokens)} cache write`
|
||||
}
|
||||
if (usage.webSearchRequests > 0) {
|
||||
usageString += `, ${formatNumber(usage.webSearchRequests)} web search`
|
||||
}
|
||||
usageString += ` (${formatCost(usage.costUSD)})`
|
||||
result += `\n` + `${shortName}:`.padStart(21) + usageString
|
||||
}
|
||||
return result
|
||||
|
||||
@@ -8,6 +8,34 @@ import {
|
||||
validateProviderEnvOrExit,
|
||||
} from '../utils/providerValidation.js'
|
||||
|
||||
// OpenClaude: polyfill globalThis.File for Node < 20.
|
||||
// undici v7 references `File` at module evaluation time (webidl type
|
||||
// assertions). Node 18 lacks the global, causing a ReferenceError inside
|
||||
// the bundled __commonJS require chain which deadlocks the process when a
|
||||
// proxy is configured (configureGlobalAgents → require_undici).
|
||||
// eslint-disable-next-line custom-rules/no-top-level-side-effects
|
||||
if (typeof globalThis.File === 'undefined') {
|
||||
try {
|
||||
// Node 18.13+ exposes File in node:buffer but not as a global.
|
||||
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
||||
const { File: NodeFile } = require('node:buffer')
|
||||
// @ts-expect-error -- polyfilling missing global
|
||||
globalThis.File = NodeFile
|
||||
} catch {
|
||||
// Absolute fallback: stub so `MakeTypeAssertion(File)` doesn't throw.
|
||||
// @ts-expect-error -- minimal polyfill
|
||||
globalThis.File = class File extends Blob {
|
||||
name: string
|
||||
lastModified: number
|
||||
constructor(parts: BlobPart[], name: string, opts?: FilePropertyBag) {
|
||||
super(parts, opts)
|
||||
this.name = name
|
||||
this.lastModified = opts?.lastModified ?? Date.now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OpenClaude: disable experimental API betas by default.
|
||||
// Tool search (defer_loading), global cache scope, and context management
|
||||
// require internal API support not available to external accounts → 500.
|
||||
@@ -68,15 +96,16 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
// Enable configs first so we can read settings
|
||||
{
|
||||
const { enableConfigs } = await import('../utils/config.js')
|
||||
enableConfigs()
|
||||
}
|
||||
|
||||
// Apply settings.env from user settings (includes GitHub provider settings from /onboard-github)
|
||||
{
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGeminiAccessTokenFromSecureStorage } = await import('../utils/geminiCredentials.js')
|
||||
hydrateGeminiAccessTokenFromSecureStorage()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
}
|
||||
|
||||
const startupEnv = await buildStartupEnvFromProfile({
|
||||
@@ -93,6 +122,16 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
// Hydrate GitHub credentials after profile is applied so CLAUDE_CODE_USE_GITHUB from profile is available
|
||||
{
|
||||
const {
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
refreshGithubModelsTokenIfNeeded,
|
||||
} = await import('../utils/githubModelsCredentials.js')
|
||||
await refreshGithubModelsTokenIfNeeded()
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
}
|
||||
|
||||
await validateProviderEnvOrExit()
|
||||
|
||||
// Print the gradient startup screen before the Ink UI loads
|
||||
|
||||
252
src/grpc/server.ts
Normal file
252
src/grpc/server.ts
Normal file
@@ -0,0 +1,252 @@
|
||||
import * as grpc from '@grpc/grpc-js'
|
||||
import * as protoLoader from '@grpc/proto-loader'
|
||||
import path from 'path'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { QueryEngine } from '../QueryEngine.js'
|
||||
import { getTools } from '../tools.js'
|
||||
import { getDefaultAppState } from '../state/AppStateStore.js'
|
||||
import { AppState } from '../state/AppState.js'
|
||||
import { FileStateCache, READ_FILE_STATE_CACHE_SIZE } from '../utils/fileStateCache.js'
|
||||
|
||||
const PROTO_PATH = path.resolve(import.meta.dirname, '../proto/openclaude.proto')
|
||||
|
||||
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
|
||||
keepCase: true,
|
||||
longs: String,
|
||||
enums: String,
|
||||
defaults: true,
|
||||
oneofs: true,
|
||||
})
|
||||
|
||||
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition) as any
|
||||
const openclaudeProto = protoDescriptor.openclaude.v1
|
||||
|
||||
const MAX_SESSIONS = 1000
|
||||
|
||||
export class GrpcServer {
|
||||
private server: grpc.Server
|
||||
private sessions: Map<string, any[]> = new Map()
|
||||
|
||||
constructor() {
|
||||
this.server = new grpc.Server()
|
||||
this.server.addService(openclaudeProto.AgentService.service, {
|
||||
Chat: this.handleChat.bind(this),
|
||||
})
|
||||
}
|
||||
|
||||
start(port: number = 50051, host: string = 'localhost') {
|
||||
this.server.bindAsync(
|
||||
`${host}:${port}`,
|
||||
grpc.ServerCredentials.createInsecure(),
|
||||
(error, boundPort) => {
|
||||
if (error) {
|
||||
console.error('Failed to start gRPC server')
|
||||
return
|
||||
}
|
||||
console.log(`gRPC Server running at ${host}:${boundPort}`)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
private handleChat(call: grpc.ServerDuplexStream<any, any>) {
|
||||
let engine: QueryEngine | null = null
|
||||
let appState: AppState = getDefaultAppState()
|
||||
const fileCache: FileStateCache = new FileStateCache(READ_FILE_STATE_CACHE_SIZE, 25 * 1024 * 1024)
|
||||
|
||||
// To handle ActionRequired (ask user for permission)
|
||||
const pendingRequests = new Map<string, (reply: string) => void>()
|
||||
|
||||
// Accumulated messages from previous turns for multi-turn context
|
||||
let previousMessages: any[] = []
|
||||
let sessionId = ''
|
||||
let interrupted = false
|
||||
|
||||
call.on('data', async (clientMessage) => {
|
||||
try {
|
||||
if (clientMessage.request) {
|
||||
if (engine) {
|
||||
call.write({
|
||||
error: {
|
||||
message: 'A request is already in progress on this stream',
|
||||
code: 'ALREADY_EXISTS'
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
interrupted = false
|
||||
const req = clientMessage.request
|
||||
sessionId = req.session_id || ''
|
||||
previousMessages = []
|
||||
|
||||
// Load previous messages from session store (cross-stream persistence)
|
||||
if (sessionId && this.sessions.has(sessionId)) {
|
||||
previousMessages = [...this.sessions.get(sessionId)!]
|
||||
}
|
||||
|
||||
const toolNameById = new Map<string, string>()
|
||||
|
||||
engine = new QueryEngine({
|
||||
cwd: req.working_directory || process.cwd(),
|
||||
tools: getTools(appState.toolPermissionContext), // Gets all available tools
|
||||
commands: [], // Slash commands
|
||||
mcpClients: [],
|
||||
agents: [],
|
||||
...(previousMessages.length > 0 ? { initialMessages: previousMessages } : {}),
|
||||
includePartialMessages: true,
|
||||
canUseTool: async (tool, input, context, assistantMsg, toolUseID) => {
|
||||
if (toolUseID) {
|
||||
toolNameById.set(toolUseID, tool.name)
|
||||
}
|
||||
// Notify client of the tool call first
|
||||
call.write({
|
||||
tool_start: {
|
||||
tool_name: tool.name,
|
||||
arguments_json: JSON.stringify(input),
|
||||
tool_use_id: toolUseID
|
||||
}
|
||||
})
|
||||
|
||||
// Ask user for permission
|
||||
const promptId = randomUUID()
|
||||
const question = `Approve ${tool.name}?`
|
||||
call.write({
|
||||
action_required: {
|
||||
prompt_id: promptId,
|
||||
question,
|
||||
type: 'CONFIRM_COMMAND'
|
||||
}
|
||||
})
|
||||
|
||||
return new Promise((resolve) => {
|
||||
pendingRequests.set(promptId, (reply) => {
|
||||
if (reply.toLowerCase() === 'yes' || reply.toLowerCase() === 'y') {
|
||||
resolve({ behavior: 'allow' })
|
||||
} else {
|
||||
resolve({ behavior: 'deny', reason: 'User denied via gRPC' })
|
||||
}
|
||||
})
|
||||
})
|
||||
},
|
||||
getAppState: () => appState,
|
||||
setAppState: (updater) => { appState = updater(appState) },
|
||||
readFileCache: fileCache,
|
||||
userSpecifiedModel: req.model,
|
||||
fallbackModel: req.model,
|
||||
})
|
||||
|
||||
// Track accumulated response data for FinalResponse
|
||||
let fullText = ''
|
||||
let promptTokens = 0
|
||||
let completionTokens = 0
|
||||
|
||||
const generator = engine.submitMessage(req.message)
|
||||
|
||||
for await (const msg of generator) {
|
||||
if (msg.type === 'stream_event') {
|
||||
if (msg.event.type === 'content_block_delta' && msg.event.delta.type === 'text_delta') {
|
||||
call.write({
|
||||
text_chunk: {
|
||||
text: msg.event.delta.text
|
||||
}
|
||||
})
|
||||
fullText += msg.event.delta.text
|
||||
}
|
||||
} else if (msg.type === 'user') {
|
||||
// Extract tool results
|
||||
const content = msg.message.content
|
||||
if (Array.isArray(content)) {
|
||||
for (const block of content) {
|
||||
if (block.type === 'tool_result') {
|
||||
let outputStr = ''
|
||||
if (typeof block.content === 'string') {
|
||||
outputStr = block.content
|
||||
} else if (Array.isArray(block.content)) {
|
||||
outputStr = block.content.map(c => c.type === 'text' ? c.text : '').join('\n')
|
||||
}
|
||||
call.write({
|
||||
tool_result: {
|
||||
tool_name: toolNameById.get(block.tool_use_id) ?? block.tool_use_id,
|
||||
tool_use_id: block.tool_use_id,
|
||||
output: outputStr,
|
||||
is_error: block.is_error || false
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result') {
|
||||
// Extract real token counts and final text from the result
|
||||
if (msg.subtype === 'success') {
|
||||
if (msg.result) {
|
||||
fullText = msg.result
|
||||
}
|
||||
promptTokens = msg.usage?.input_tokens ?? 0
|
||||
completionTokens = msg.usage?.output_tokens ?? 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!interrupted) {
|
||||
// Save messages for multi-turn context in subsequent requests
|
||||
previousMessages = [...engine.getMessages()]
|
||||
|
||||
// Persist to session store for cross-stream resumption
|
||||
if (sessionId) {
|
||||
if (!this.sessions.has(sessionId) && this.sessions.size >= MAX_SESSIONS) {
|
||||
// Evict oldest session (Map preserves insertion order)
|
||||
this.sessions.delete(this.sessions.keys().next().value)
|
||||
}
|
||||
this.sessions.set(sessionId, previousMessages)
|
||||
}
|
||||
|
||||
call.write({
|
||||
done: {
|
||||
full_text: fullText,
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
engine = null
|
||||
|
||||
} else if (clientMessage.input) {
|
||||
const promptId = clientMessage.input.prompt_id
|
||||
const reply = clientMessage.input.reply
|
||||
if (pendingRequests.has(promptId)) {
|
||||
pendingRequests.get(promptId)!(reply)
|
||||
pendingRequests.delete(promptId)
|
||||
}
|
||||
} else if (clientMessage.cancel) {
|
||||
interrupted = true
|
||||
if (engine) {
|
||||
engine.interrupt()
|
||||
}
|
||||
call.end()
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error('Error processing stream')
|
||||
call.write({
|
||||
error: {
|
||||
message: err.message || "Internal server error",
|
||||
code: "INTERNAL"
|
||||
}
|
||||
})
|
||||
call.end()
|
||||
}
|
||||
})
|
||||
|
||||
call.on('end', () => {
|
||||
interrupted = true
|
||||
// Unblock any pending permission prompts so canUseTool can return
|
||||
for (const resolve of pendingRequests.values()) {
|
||||
resolve('no')
|
||||
}
|
||||
if (engine) {
|
||||
engine.interrupt()
|
||||
}
|
||||
engine = null
|
||||
pendingRequests.clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useCallback, useEffect } from 'react'
|
||||
import { useCallback, useEffect, useSyncExternalStore } from 'react'
|
||||
import type { Command } from '../commands.js'
|
||||
import { useNotifications } from '../context/notifications.js'
|
||||
import {
|
||||
@@ -7,6 +7,11 @@ import {
|
||||
} from '../services/analytics/index.js'
|
||||
import { reinitializeLspServerManager } from '../services/lsp/manager.js'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import {
|
||||
getPluginCommandsState,
|
||||
setPluginCommandsState,
|
||||
subscribePluginCommands,
|
||||
} from '../state/pluginCommandsStore.js'
|
||||
import type { AgentDefinition } from '../tools/AgentTool/loadAgentsDir.js'
|
||||
import { count } from '../utils/array.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
@@ -39,6 +44,11 @@ export function useManagePlugins({
|
||||
}: {
|
||||
enabled?: boolean
|
||||
} = {}) {
|
||||
const pluginCommands = useSyncExternalStore(
|
||||
subscribePluginCommands,
|
||||
getPluginCommandsState,
|
||||
getPluginCommandsState,
|
||||
)
|
||||
const setAppState = useSetAppState()
|
||||
const needsRefresh = useAppState(s => s.plugins.needsRefresh)
|
||||
const { addNotification } = useNotifications()
|
||||
@@ -74,6 +84,7 @@ export function useManagePlugins({
|
||||
|
||||
try {
|
||||
commands = await getPluginCommands()
|
||||
setPluginCommandsState(commands)
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error)
|
||||
@@ -82,6 +93,7 @@ export function useManagePlugins({
|
||||
source: 'plugin-commands',
|
||||
error: `Failed to load plugin commands: ${errorMessage}`,
|
||||
})
|
||||
setPluginCommandsState([])
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -173,7 +185,7 @@ export function useManagePlugins({
|
||||
...prevState.plugins,
|
||||
enabled,
|
||||
disabled,
|
||||
commands,
|
||||
commands: [],
|
||||
errors: mergedErrors,
|
||||
},
|
||||
}
|
||||
@@ -226,6 +238,7 @@ export function useManagePlugins({
|
||||
logError(errorObj)
|
||||
logForDebugging(`Error loading plugins: ${error}`)
|
||||
// Set empty state on error, but preserve LSP errors and add the new error
|
||||
setPluginCommandsState([])
|
||||
setAppState(prevState => {
|
||||
// Keep existing LSP/non-plugin-loading errors
|
||||
const existingLspErrors = prevState.plugins.errors.filter(
|
||||
@@ -284,6 +297,11 @@ export function useManagePlugins({
|
||||
})
|
||||
}, [initialPluginLoad, enabled])
|
||||
|
||||
useEffect(() => {
|
||||
if (enabled) return
|
||||
setPluginCommandsState([])
|
||||
}, [enabled])
|
||||
|
||||
// Plugin state changed on disk (background reconcile, /plugin menu,
|
||||
// external settings edit). Show a notification; user runs /reload-plugins
|
||||
// to apply. The previous auto-refresh here had a stale-cache bug (only
|
||||
@@ -301,4 +319,6 @@ export function useManagePlugins({
|
||||
// Do NOT auto-refresh. Do NOT reset needsRefresh — /reload-plugins
|
||||
// consumes it via refreshActivePlugins().
|
||||
}, [enabled, needsRefresh, addNotification])
|
||||
|
||||
return enabled ? pluginCommands : []
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useLayoutEffect, useRef, useState } from 'react'
|
||||
import { isInputModeCharacter } from 'src/components/PromptInput/inputModes.js'
|
||||
import { useNotifications } from 'src/context/notifications.js'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
@@ -100,9 +101,74 @@ export function useTextInput({
|
||||
prewarmModifiers()
|
||||
}
|
||||
|
||||
const offset = externalOffset
|
||||
const setOffset = onOffsetChange
|
||||
const cursor = Cursor.fromText(originalValue, columns, offset)
|
||||
// Keep a local text/cursor mirror so consecutive keystrokes can advance
|
||||
// immediately even if the controlled parent value hasn't committed yet.
|
||||
const [renderState, setRenderState] = useState(() => ({
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
}))
|
||||
const liveValueRef = useRef(originalValue)
|
||||
const liveOffsetRef = useRef(externalOffset)
|
||||
const lastSeenPropsRef = useRef({
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
})
|
||||
const updateRenderedInput = (nextValue: string, nextOffset: number): void => {
|
||||
liveValueRef.current = nextValue
|
||||
liveOffsetRef.current = nextOffset
|
||||
setRenderState(prev =>
|
||||
prev.value === nextValue && prev.offset === nextOffset
|
||||
? prev
|
||||
: { value: nextValue, offset: nextOffset },
|
||||
)
|
||||
}
|
||||
useLayoutEffect(() => {
|
||||
if (
|
||||
lastSeenPropsRef.current.value === originalValue &&
|
||||
lastSeenPropsRef.current.offset === externalOffset
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
lastSeenPropsRef.current = {
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
}
|
||||
updateRenderedInput(originalValue, externalOffset)
|
||||
}, [originalValue, externalOffset])
|
||||
|
||||
const value = renderState.value
|
||||
const offset = renderState.offset
|
||||
const getLiveValue = (): string => liveValueRef.current
|
||||
const getLiveCursor = (): Cursor =>
|
||||
Cursor.fromText(liveValueRef.current, columns, liveOffsetRef.current)
|
||||
const setValue = (nextValue: string, nextOffset = liveOffsetRef.current): void => {
|
||||
const previousValue = liveValueRef.current
|
||||
const previousOffset = liveOffsetRef.current
|
||||
|
||||
if (previousValue === nextValue && previousOffset === nextOffset) {
|
||||
return
|
||||
}
|
||||
|
||||
updateRenderedInput(nextValue, nextOffset)
|
||||
|
||||
if (previousValue !== nextValue) {
|
||||
onChange(nextValue)
|
||||
}
|
||||
|
||||
if (previousOffset !== nextOffset) {
|
||||
onOffsetChange(nextOffset)
|
||||
}
|
||||
}
|
||||
const setOffset = (nextOffset: number): void => {
|
||||
if (nextOffset === liveOffsetRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
updateRenderedInput(liveValueRef.current, nextOffset)
|
||||
onOffsetChange(nextOffset)
|
||||
}
|
||||
const cursor = Cursor.fromText(value, columns, offset)
|
||||
const { addNotification, removeNotification } = useNotifications()
|
||||
|
||||
const handleCtrlC = useDoublePress(
|
||||
@@ -111,9 +177,11 @@ export function useTextInput({
|
||||
},
|
||||
() => onExit?.(),
|
||||
() => {
|
||||
if (originalValue) {
|
||||
const currentValue = getLiveValue()
|
||||
if (currentValue) {
|
||||
updateRenderedInput('', 0)
|
||||
onChange('')
|
||||
setOffset(0)
|
||||
onOffsetChange(0)
|
||||
onHistoryReset?.()
|
||||
}
|
||||
},
|
||||
@@ -125,7 +193,8 @@ export function useTextInput({
|
||||
// not dialog dismissal, and needs the double-press safety mechanism.
|
||||
const handleEscape = useDoublePress(
|
||||
(show: boolean) => {
|
||||
if (!originalValue || !show) {
|
||||
const currentValue = getLiveValue()
|
||||
if (!currentValue || !show) {
|
||||
return
|
||||
}
|
||||
addNotification({
|
||||
@@ -136,17 +205,19 @@ export function useTextInput({
|
||||
})
|
||||
},
|
||||
() => {
|
||||
const currentValue = getLiveValue()
|
||||
// Remove the "Esc again to clear" notification immediately
|
||||
removeNotification('escape-again-to-clear')
|
||||
onClearInput?.()
|
||||
if (originalValue) {
|
||||
if (currentValue) {
|
||||
// Track double-escape usage for feature discovery
|
||||
// Save to history before clearing
|
||||
if (originalValue.trim() !== '') {
|
||||
addToHistory(originalValue)
|
||||
if (currentValue.trim() !== '') {
|
||||
addToHistory(currentValue)
|
||||
}
|
||||
updateRenderedInput('', 0)
|
||||
onChange('')
|
||||
setOffset(0)
|
||||
onOffsetChange(0)
|
||||
onHistoryReset?.()
|
||||
}
|
||||
},
|
||||
@@ -154,13 +225,13 @@ export function useTextInput({
|
||||
|
||||
const handleEmptyCtrlD = useDoublePress(
|
||||
show => {
|
||||
if (originalValue !== '') {
|
||||
if (getLiveValue() !== '') {
|
||||
return
|
||||
}
|
||||
onExitMessage?.(show, 'Ctrl-D')
|
||||
},
|
||||
() => {
|
||||
if (originalValue !== '') {
|
||||
if (getLiveValue() !== '') {
|
||||
return
|
||||
}
|
||||
onExit?.()
|
||||
@@ -168,6 +239,7 @@ export function useTextInput({
|
||||
)
|
||||
|
||||
function handleCtrlD(): MaybeCursor {
|
||||
const cursor = getLiveCursor()
|
||||
if (cursor.text === '') {
|
||||
// When input is empty, handle double-press
|
||||
handleEmptyCtrlD()
|
||||
@@ -178,24 +250,28 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function killToLineEnd(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineEnd()
|
||||
pushToKillRing(killed, 'append')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function killToLineStart(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineStart()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function killWordBefore(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteWordBefore()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function yank(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const text = getLastKill()
|
||||
if (text.length > 0) {
|
||||
const startOffset = cursor.offset
|
||||
@@ -207,6 +283,7 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function handleYankPop(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const popResult = yankPop()
|
||||
if (!popResult) {
|
||||
return cursor
|
||||
@@ -222,13 +299,16 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
const handleCtrl = mapInput([
|
||||
['a', () => cursor.startOfLine()],
|
||||
['b', () => cursor.left()],
|
||||
['a', () => getLiveCursor().startOfLine()],
|
||||
['b', () => getLiveCursor().left()],
|
||||
['c', handleCtrlC],
|
||||
['d', handleCtrlD],
|
||||
['e', () => cursor.endOfLine()],
|
||||
['f', () => cursor.right()],
|
||||
['h', () => cursor.deleteTokenBefore() ?? cursor.backspace()],
|
||||
['e', () => getLiveCursor().endOfLine()],
|
||||
['f', () => getLiveCursor().right()],
|
||||
['h', () => {
|
||||
const cursor = getLiveCursor()
|
||||
return cursor.deleteTokenBefore() ?? cursor.backspace()
|
||||
}],
|
||||
['k', killToLineEnd],
|
||||
['n', () => downOrHistoryDown()],
|
||||
['p', () => upOrHistoryUp()],
|
||||
@@ -238,13 +318,15 @@ export function useTextInput({
|
||||
])
|
||||
|
||||
const handleMeta = mapInput([
|
||||
['b', () => cursor.prevWord()],
|
||||
['f', () => cursor.nextWord()],
|
||||
['d', () => cursor.deleteWordAfter()],
|
||||
['b', () => getLiveCursor().prevWord()],
|
||||
['f', () => getLiveCursor().nextWord()],
|
||||
['d', () => getLiveCursor().deleteWordAfter()],
|
||||
['y', handleYankPop],
|
||||
])
|
||||
|
||||
function handleEnter(key: Key) {
|
||||
const cursor = getLiveCursor()
|
||||
const currentValue = getLiveValue()
|
||||
if (
|
||||
multiline &&
|
||||
cursor.offset > 0 &&
|
||||
@@ -263,10 +345,11 @@ export function useTextInput({
|
||||
if (env.terminal === 'Apple_Terminal' && isModifierPressed('shift')) {
|
||||
return cursor.insert('\n')
|
||||
}
|
||||
onSubmit?.(originalValue)
|
||||
onSubmit?.(currentValue)
|
||||
}
|
||||
|
||||
function upOrHistoryUp() {
|
||||
const cursor = getLiveCursor()
|
||||
if (disableCursorMovementForUpDownKeys) {
|
||||
onHistoryUp?.()
|
||||
return cursor
|
||||
@@ -291,6 +374,7 @@ export function useTextInput({
|
||||
return cursor
|
||||
}
|
||||
function downOrHistoryDown() {
|
||||
const cursor = getLiveCursor()
|
||||
if (disableCursorMovementForUpDownKeys) {
|
||||
onHistoryDown?.()
|
||||
return cursor
|
||||
@@ -315,7 +399,7 @@ export function useTextInput({
|
||||
return cursor
|
||||
}
|
||||
|
||||
function mapKey(key: Key): InputMapper {
|
||||
function mapKey(key: Key, cursor: Cursor): InputMapper {
|
||||
switch (true) {
|
||||
case key.escape:
|
||||
return () => {
|
||||
@@ -429,6 +513,7 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function onInput(input: string, key: Key): void {
|
||||
const currentCursor = getLiveCursor()
|
||||
// Note: Image paste shortcut (chat:imagePaste) is handled via useKeybindings in PromptInput
|
||||
|
||||
// Apply filter if provided
|
||||
@@ -446,18 +531,15 @@ export function useTextInput({
|
||||
|
||||
// Apply all DEL characters as backspace operations synchronously
|
||||
// Try to delete tokens first, fall back to character backspace
|
||||
let currentCursor = cursor
|
||||
let nextCursor = currentCursor
|
||||
for (let i = 0; i < delCount; i++) {
|
||||
currentCursor =
|
||||
currentCursor.deleteTokenBefore() ?? currentCursor.backspace()
|
||||
nextCursor =
|
||||
nextCursor.deleteTokenBefore() ?? nextCursor.backspace()
|
||||
}
|
||||
|
||||
// Update state once with the final result
|
||||
if (!cursor.equals(currentCursor)) {
|
||||
if (cursor.text !== currentCursor.text) {
|
||||
onChange(currentCursor.text)
|
||||
}
|
||||
setOffset(currentCursor.offset)
|
||||
if (!currentCursor.equals(nextCursor)) {
|
||||
setValue(nextCursor.text, nextCursor.offset)
|
||||
}
|
||||
resetKillAccumulation()
|
||||
resetYankState()
|
||||
@@ -474,13 +556,10 @@ export function useTextInput({
|
||||
resetYankState()
|
||||
}
|
||||
|
||||
const nextCursor = mapKey(key)(filteredInput)
|
||||
const nextCursor = mapKey(key, currentCursor)(filteredInput)
|
||||
if (nextCursor) {
|
||||
if (!cursor.equals(nextCursor)) {
|
||||
if (cursor.text !== nextCursor.text) {
|
||||
onChange(nextCursor.text)
|
||||
}
|
||||
setOffset(nextCursor.offset)
|
||||
if (!currentCursor.equals(nextCursor)) {
|
||||
setValue(nextCursor.text, nextCursor.offset)
|
||||
}
|
||||
// SSH-coalesced Enter: on slow links, "o" + Enter can arrive as one
|
||||
// chunk "o\r". parseKeypress only matches s === '\r', so it hit the
|
||||
@@ -512,6 +591,7 @@ export function useTextInput({
|
||||
|
||||
return {
|
||||
onInput,
|
||||
value,
|
||||
renderedValue: cursor.render(
|
||||
cursorChar,
|
||||
mask,
|
||||
@@ -520,6 +600,7 @@ export function useTextInput({
|
||||
maxVisibleLines,
|
||||
),
|
||||
offset,
|
||||
setValue,
|
||||
setOffset,
|
||||
cursorLine: cursorPos.line - cursor.getViewportStartLine(maxVisibleLines),
|
||||
cursorColumn: cursorPos.column,
|
||||
|
||||
@@ -70,14 +70,14 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
// Vim behavior: move cursor left by 1 when exiting insert mode
|
||||
// (unless at beginning of line or at offset 0)
|
||||
const offset = textInput.offset
|
||||
if (offset > 0 && props.value[offset - 1] !== '\n') {
|
||||
if (offset > 0 && textInput.value[offset - 1] !== '\n') {
|
||||
textInput.setOffset(offset - 1)
|
||||
}
|
||||
|
||||
vimStateRef.current = { mode: 'NORMAL', command: { type: 'idle' } }
|
||||
setMode('NORMAL')
|
||||
onModeChange?.('NORMAL')
|
||||
}, [onModeChange, textInput, props.value])
|
||||
}, [onModeChange, textInput])
|
||||
|
||||
function createOperatorContext(
|
||||
cursor: Cursor,
|
||||
@@ -85,8 +85,8 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
): OperatorContext {
|
||||
return {
|
||||
cursor,
|
||||
text: props.value,
|
||||
setText: (newText: string) => props.onChange(newText),
|
||||
text: textInput.value,
|
||||
setText: (newText: string) => textInput.setValue(newText),
|
||||
setOffset: (offset: number) => textInput.setOffset(offset),
|
||||
enterInsert: (offset: number) => switchToInsertMode(offset),
|
||||
getRegister: () => persistentRef.current.register,
|
||||
@@ -110,15 +110,18 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
const change = persistentRef.current.lastChange
|
||||
if (!change) return
|
||||
|
||||
const cursor = Cursor.fromText(props.value, props.columns, textInput.offset)
|
||||
const cursor = Cursor.fromText(
|
||||
textInput.value,
|
||||
props.columns,
|
||||
textInput.offset,
|
||||
)
|
||||
const ctx = createOperatorContext(cursor, true)
|
||||
|
||||
switch (change.type) {
|
||||
case 'insert':
|
||||
if (change.text) {
|
||||
const newCursor = cursor.insert(change.text)
|
||||
props.onChange(newCursor.text)
|
||||
textInput.setOffset(newCursor.offset)
|
||||
textInput.setValue(newCursor.text, newCursor.offset)
|
||||
}
|
||||
break
|
||||
|
||||
@@ -179,7 +182,11 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
// lookups expect single chars and a prepended space would break them.
|
||||
const filtered = inputFilter ? inputFilter(rawInput, key) : rawInput
|
||||
const input = state.mode === 'INSERT' ? filtered : rawInput
|
||||
const cursor = Cursor.fromText(props.value, props.columns, textInput.offset)
|
||||
const cursor = Cursor.fromText(
|
||||
textInput.value,
|
||||
props.columns,
|
||||
textInput.offset,
|
||||
)
|
||||
|
||||
if (key.ctrl) {
|
||||
textInput.onInput(input, key)
|
||||
|
||||
@@ -115,7 +115,10 @@ export default class App extends PureComponent<Props, State> {
|
||||
keyParseState = INITIAL_STATE;
|
||||
// Timer for flushing incomplete escape sequences
|
||||
incompleteEscapeTimer: NodeJS.Timeout | null = null;
|
||||
stdinMode: 'readable' | 'data' = process.env.OPENCLAUDE_USE_READABLE_STDIN === '1' ? 'readable' : 'data';
|
||||
// Default to readable-mode stdin (legacy Ink behavior). The data-mode path
|
||||
// is kept as an explicit opt-in because some terminals can enter a state
|
||||
// where startup input appears frozen when data mode is the default.
|
||||
stdinMode: 'readable' | 'data' = process.env.OPENCLAUDE_USE_DATA_STDIN === '1' || process.env.OPENCLAUDE_USE_READABLE_STDIN === '0' ? 'data' : 'readable';
|
||||
// Timeout durations for incomplete sequences (ms)
|
||||
readonly NORMAL_TIMEOUT = 50; // Short timeout for regular esc sequences
|
||||
readonly PASTE_TIMEOUT = 500; // Longer timeout for paste operations
|
||||
|
||||
@@ -33,7 +33,7 @@ import createRenderer, { type Renderer } from './renderer.js';
|
||||
import { CellWidth, CharPool, cellAt, createScreen, HyperlinkPool, isEmptyCellAt, migrateScreenPools, StylePool } from './screen.js';
|
||||
import { applySearchHighlight } from './searchHighlight.js';
|
||||
import { applySelectionOverlay, captureScrolledRows, clearSelection, createSelectionState, extendSelection, type FocusMove, findPlainTextUrlAt, getSelectedText, hasSelection, moveFocus, type SelectionState, selectLineAt, selectWordAt, shiftAnchor, shiftSelection, shiftSelectionForFollow, startSelection, updateSelection } from './selection.js';
|
||||
import { SYNC_OUTPUT_SUPPORTED, supportsExtendedKeys, type Terminal, writeDiffToTerminal } from './terminal.js';
|
||||
import { shouldSkipMainScreenSyncMarkers, shouldUseMainScreenRewrite, SYNC_OUTPUT_SUPPORTED, supportsExtendedKeys, type Terminal, writeDiffToTerminal } from './terminal.js';
|
||||
import { CURSOR_HOME, cursorMove, cursorPosition, DISABLE_KITTY_KEYBOARD, DISABLE_MODIFY_OTHER_KEYS, ENABLE_KITTY_KEYBOARD, ENABLE_MODIFY_OTHER_KEYS, ERASE_SCREEN } from './termio/csi.js';
|
||||
import { DBP, DFE, DISABLE_MOUSE_TRACKING, ENABLE_MOUSE_TRACKING, ENTER_ALT_SCREEN, EXIT_ALT_SCREEN, SHOW_CURSOR } from './termio/dec.js';
|
||||
import { CLEAR_ITERM2_PROGRESS, CLEAR_TAB_STATUS, setClipboard, supportsTabStatus, wrapForMultiplexer } from './termio/osc.js';
|
||||
@@ -609,12 +609,13 @@ export default class Ink {
|
||||
};
|
||||
}
|
||||
const tDiff = performance.now();
|
||||
const rewriteMainScreen = !this.altScreenActive && shouldUseMainScreenRewrite();
|
||||
const diff = this.log.render(prevFrame, frame, this.altScreenActive,
|
||||
// DECSTBM needs BSU/ESU atomicity — without it the outer terminal
|
||||
// renders the scrolled-but-not-yet-repainted intermediate state.
|
||||
// tmux is the main case (re-emits DECSTBM with its own timing and
|
||||
// doesn't implement DEC 2026, so SYNC_OUTPUT_SUPPORTED is false).
|
||||
SYNC_OUTPUT_SUPPORTED);
|
||||
SYNC_OUTPUT_SUPPORTED, rewriteMainScreen);
|
||||
const diffMs = performance.now() - tDiff;
|
||||
// Swap buffers
|
||||
this.backFrame = this.frontFrame;
|
||||
@@ -759,7 +760,8 @@ export default class Ink {
|
||||
}
|
||||
}
|
||||
const tWrite = performance.now();
|
||||
writeDiffToTerminal(this.terminal, optimized, this.altScreenActive && !SYNC_OUTPUT_SUPPORTED);
|
||||
const skipSyncMarkers = this.altScreenActive ? !SYNC_OUTPUT_SUPPORTED : rewriteMainScreen || shouldSkipMainScreenSyncMarkers();
|
||||
writeDiffToTerminal(this.terminal, optimized, skipSyncMarkers);
|
||||
const writeMs = performance.now() - tWrite;
|
||||
|
||||
// Update blit safety for the NEXT frame. The frame just rendered
|
||||
|
||||
125
src/ink/log-update.test.ts
Normal file
125
src/ink/log-update.test.ts
Normal file
@@ -0,0 +1,125 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
|
||||
import type { Frame } from './frame.ts'
|
||||
import { LogUpdate } from './log-update.ts'
|
||||
import {
|
||||
CellWidth,
|
||||
CharPool,
|
||||
createScreen,
|
||||
HyperlinkPool,
|
||||
setCellAt,
|
||||
StylePool,
|
||||
} from './screen.ts'
|
||||
|
||||
function collectStdout(diff: ReturnType<LogUpdate['render']>): string {
|
||||
return diff
|
||||
.filter((patch): patch is Extract<(typeof diff)[number], { type: 'stdout' }> => patch.type === 'stdout')
|
||||
.map(patch => patch.content)
|
||||
.join('')
|
||||
}
|
||||
|
||||
function createHarness() {
|
||||
const stylePool = new StylePool()
|
||||
const charPool = new CharPool()
|
||||
const hyperlinkPool = new HyperlinkPool()
|
||||
|
||||
return {
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
log: new LogUpdate({ isTTY: true, stylePool }),
|
||||
}
|
||||
}
|
||||
|
||||
function frameFromLines(
|
||||
stylePool: StylePool,
|
||||
charPool: CharPool,
|
||||
hyperlinkPool: HyperlinkPool,
|
||||
lines: string[],
|
||||
cursor = { x: 0, y: lines.length, visible: true },
|
||||
): Frame {
|
||||
const width = lines.reduce((max, line) => Math.max(max, line.length), 0)
|
||||
const screen = createScreen(width, lines.length, stylePool, charPool, hyperlinkPool)
|
||||
|
||||
for (const [y, line] of lines.entries()) {
|
||||
for (const [x, char] of [...line].entries()) {
|
||||
setCellAt(screen, x, y, {
|
||||
char,
|
||||
styleId: stylePool.none,
|
||||
width: CellWidth.Narrow,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
screen,
|
||||
viewport: {
|
||||
width: Math.max(width, 1),
|
||||
height: 10,
|
||||
},
|
||||
cursor,
|
||||
}
|
||||
}
|
||||
|
||||
test('ghostty main-screen rewrite paints prompt content without full terminal reset when width is stable', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(stylePool, charPool, hyperlinkPool, [' '])
|
||||
const next = frameFromLines(stylePool, charPool, hyperlinkPool, ['prompt'])
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clearTerminal')).toBe(false)
|
||||
expect(diff.some(patch => patch.type === 'clear' && patch.count === 1)).toBe(
|
||||
true,
|
||||
)
|
||||
expect(stdout).toContain('prompt')
|
||||
})
|
||||
|
||||
test('ghostty main-screen rewrite clears only the changed prompt tail before repainting', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['status', '> abc'],
|
||||
)
|
||||
const next = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['status', '> abcd'],
|
||||
)
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clearTerminal')).toBe(false)
|
||||
expect(diff.some(patch => patch.type === 'clear' && patch.count === 1)).toBe(
|
||||
true,
|
||||
)
|
||||
expect(stdout).toContain('abcd')
|
||||
})
|
||||
|
||||
test('ghostty main-screen rewrite falls back to incremental diff for larger changes', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['row 0', 'row 1', 'row 2', 'row 3', 'row 4', '> abc'],
|
||||
)
|
||||
const next = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['row 0 updated', 'row 1', 'row 2', 'row 3', 'row 4', '> abcd'],
|
||||
)
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clear')).toBe(false)
|
||||
expect(stdout).toContain('updated')
|
||||
expect(stdout).toContain('abcd')
|
||||
})
|
||||
@@ -125,6 +125,7 @@ export class LogUpdate {
|
||||
next: Frame,
|
||||
altScreen = false,
|
||||
decstbmSafe = true,
|
||||
rewriteMainScreen = false,
|
||||
): Diff {
|
||||
if (!this.options.isTTY) {
|
||||
return this.renderFullFrame(next)
|
||||
@@ -146,6 +147,13 @@ export class LogUpdate {
|
||||
return fullResetSequence_CAUSES_FLICKER(next, 'resize', stylePool)
|
||||
}
|
||||
|
||||
if (!altScreen && rewriteMainScreen) {
|
||||
const rewriteStartY = findMainScreenRewriteStart(prev.screen, next.screen)
|
||||
if (rewriteStartY !== null) {
|
||||
return rewriteMainScreenFrame(prev, next, stylePool, rewriteStartY)
|
||||
}
|
||||
}
|
||||
|
||||
// DECSTBM scroll optimization: when a ScrollBox's scrollTop changed,
|
||||
// shift content with a hardware scroll (CSI top;bot r + CSI n S/T)
|
||||
// instead of rewriting the whole scroll region. The shiftRows on
|
||||
@@ -420,34 +428,8 @@ export class LogUpdate {
|
||||
// Main screen: if cursor needs to be past the last line of content
|
||||
// (typical: cursor.y = screen.height), emit \n to create that line
|
||||
// since cursor movement can't create new lines.
|
||||
if (altScreen) {
|
||||
// no-op; next frame's CSI H anchors cursor
|
||||
} else if (next.cursor.y >= next.screen.height) {
|
||||
// Move to column 0 of current line, then emit newlines to reach target row
|
||||
screen.txn(prev => {
|
||||
const rowsToCreate = next.cursor.y - prev.y
|
||||
if (rowsToCreate > 0) {
|
||||
// Use CR to resolve pending wrap (if any) without advancing
|
||||
// to the next line, then LF to create each new row.
|
||||
const patches: Diff = new Array<Diff[number]>(1 + rowsToCreate)
|
||||
patches[0] = CARRIAGE_RETURN
|
||||
for (let i = 0; i < rowsToCreate; i++) {
|
||||
patches[1 + i] = NEWLINE
|
||||
}
|
||||
return [patches, { dx: -prev.x, dy: rowsToCreate }]
|
||||
}
|
||||
// At or past target row - need to move cursor to correct position
|
||||
const dy = next.cursor.y - prev.y
|
||||
if (dy !== 0 || prev.x !== next.cursor.x) {
|
||||
// Use CR to clear pending wrap (if any), then cursor move
|
||||
const patches: Diff = [CARRIAGE_RETURN]
|
||||
patches.push({ type: 'cursorMove', x: next.cursor.x, y: dy })
|
||||
return [patches, { dx: next.cursor.x - prev.x, dy }]
|
||||
}
|
||||
return [[], { dx: 0, dy: 0 }]
|
||||
})
|
||||
} else {
|
||||
moveCursorTo(screen, next.cursor.x, next.cursor.y)
|
||||
if (!altScreen) {
|
||||
restoreMainScreenCursor(screen, next)
|
||||
}
|
||||
|
||||
const elapsed = performance.now() - startTime
|
||||
@@ -467,6 +449,77 @@ export class LogUpdate {
|
||||
}
|
||||
}
|
||||
|
||||
function rewriteMainScreenFrame(
|
||||
prev: Frame,
|
||||
next: Frame,
|
||||
stylePool: StylePool,
|
||||
startY: number,
|
||||
): Diff {
|
||||
const diff: Diff = []
|
||||
const clearCount = prev.screen.height - startY
|
||||
|
||||
if (clearCount > 0) {
|
||||
const clearStartY = prev.screen.height - 1
|
||||
const clearCursor = new VirtualScreen(prev.cursor, next.viewport.width)
|
||||
moveCursorTo(clearCursor, 0, clearStartY)
|
||||
diff.push(...clearCursor.diff)
|
||||
diff.push({ type: 'clear', count: clearCount })
|
||||
}
|
||||
|
||||
const screen = new VirtualScreen(
|
||||
clearCount > 0 ? { x: 0, y: startY } : prev.cursor,
|
||||
next.viewport.width,
|
||||
)
|
||||
renderFrameSlice(screen, next, startY, next.screen.height, stylePool)
|
||||
restoreMainScreenCursor(screen, next)
|
||||
|
||||
return [...diff, ...screen.diff]
|
||||
}
|
||||
|
||||
const MAX_MAIN_SCREEN_REWRITE_ROWS = 6
|
||||
|
||||
function findMainScreenRewriteStart(prev: Screen, next: Screen): number | null {
|
||||
const commonHeight = Math.min(prev.height, next.height)
|
||||
let firstChangedY = commonHeight
|
||||
|
||||
for (let y = 0; y < commonHeight; y += 1) {
|
||||
if (!rowsEqual(prev, next, y)) {
|
||||
firstChangedY = y
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
const rewriteRows = Math.max(prev.height, next.height) - firstChangedY
|
||||
if (rewriteRows <= 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return rewriteRows <= MAX_MAIN_SCREEN_REWRITE_ROWS ? firstChangedY : null
|
||||
}
|
||||
|
||||
function rowsEqual(prev: Screen, next: Screen, y: number): boolean {
|
||||
if (prev.width !== next.width) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (prev.softWrap[y] !== next.softWrap[y]) {
|
||||
return false
|
||||
}
|
||||
|
||||
const rowStart = y * prev.width
|
||||
const rowEnd = rowStart + prev.width
|
||||
for (let index = rowStart; index < rowEnd; index += 1) {
|
||||
if (
|
||||
prev.cells64[index] !== next.cells64[index] ||
|
||||
prev.noSelect[index] !== next.noSelect[index]
|
||||
) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
function transitionHyperlink(
|
||||
diff: Diff,
|
||||
current: Hyperlink,
|
||||
@@ -622,6 +675,37 @@ function renderFrameSlice(
|
||||
return screen
|
||||
}
|
||||
|
||||
function restoreMainScreenCursor(screen: VirtualScreen, next: Frame): void {
|
||||
if (next.cursor.y >= next.screen.height) {
|
||||
// Move to column 0 of current line, then emit newlines to reach target row
|
||||
screen.txn(prev => {
|
||||
const rowsToCreate = next.cursor.y - prev.y
|
||||
if (rowsToCreate > 0) {
|
||||
// Use CR to resolve pending wrap (if any) without advancing
|
||||
// to the next line, then LF to create each new row.
|
||||
const patches: Diff = new Array<Diff[number]>(1 + rowsToCreate)
|
||||
patches[0] = CARRIAGE_RETURN
|
||||
for (let i = 0; i < rowsToCreate; i++) {
|
||||
patches[1 + i] = NEWLINE
|
||||
}
|
||||
return [patches, { dx: -prev.x, dy: rowsToCreate }]
|
||||
}
|
||||
// At or past target row - need to move cursor to correct position
|
||||
const dy = next.cursor.y - prev.y
|
||||
if (dy !== 0 || prev.x !== next.cursor.x) {
|
||||
// Use CR to clear pending wrap (if any), then cursor move
|
||||
const patches: Diff = [CARRIAGE_RETURN]
|
||||
patches.push({ type: 'cursorMove', x: next.cursor.x, y: dy })
|
||||
return [patches, { dx: next.cursor.x - prev.x, dy }]
|
||||
}
|
||||
return [[], { dx: 0, dy: 0 }]
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
moveCursorTo(screen, next.cursor.x, next.cursor.y)
|
||||
}
|
||||
|
||||
type Delta = { dx: number; dy: number }
|
||||
|
||||
/**
|
||||
|
||||
369
src/ink/reconciler.test.ts
Normal file
369
src/ink/reconciler.test.ts
Normal file
@@ -0,0 +1,369 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { expect, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
|
||||
import type { DOMElement, ElementNames } from './dom.ts'
|
||||
import instances from './instances.ts'
|
||||
import { LayoutEdge } from './layout/node.ts'
|
||||
import type { ParsedKey } from './parse-keypress.ts'
|
||||
import { createRoot } from './root.ts'
|
||||
|
||||
type TestStdin = PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
const RAW_TEXT_STYLE = {
|
||||
flexDirection: 'row',
|
||||
flexGrow: 0,
|
||||
flexShrink: 1,
|
||||
textWrap: 'wrap',
|
||||
} as const
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: TestStdin
|
||||
} {
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as TestStdin
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
;(stdout as unknown as { rows: number }).rows = 24
|
||||
;(stdout as unknown as { isTTY: boolean }).isTTY = true
|
||||
|
||||
return { stdout, stdin }
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
errorMessage: string,
|
||||
timeoutMs = 2000,
|
||||
): Promise<void> {
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
|
||||
throw new Error(errorMessage)
|
||||
}
|
||||
|
||||
function getRootNode(stdout: PassThrough): DOMElement {
|
||||
const instance = getInkInstance(stdout)
|
||||
|
||||
if (!instance.rootNode) {
|
||||
throw new Error('Ink instance root node not found')
|
||||
}
|
||||
|
||||
return instance.rootNode
|
||||
}
|
||||
|
||||
function getInkInstance(stdout: PassThrough): {
|
||||
rootNode?: DOMElement
|
||||
dispatchKeyboardEvent: (parsedKey: ParsedKey) => void
|
||||
} {
|
||||
const instance = instances.get(
|
||||
stdout as unknown as NodeJS.WriteStream,
|
||||
) as
|
||||
| {
|
||||
rootNode?: DOMElement
|
||||
dispatchKeyboardEvent: (parsedKey: ParsedKey) => void
|
||||
}
|
||||
| undefined
|
||||
|
||||
if (!instance) {
|
||||
throw new Error('Ink instance not found')
|
||||
}
|
||||
|
||||
return instance
|
||||
}
|
||||
|
||||
function findElement(
|
||||
node: DOMElement,
|
||||
nodeName: ElementNames,
|
||||
): DOMElement | undefined {
|
||||
if (node.nodeName === nodeName) {
|
||||
return node
|
||||
}
|
||||
|
||||
for (const child of node.childNodes) {
|
||||
if (child.nodeName === '#text') {
|
||||
continue
|
||||
}
|
||||
|
||||
const found = findElement(child, nodeName)
|
||||
if (found) {
|
||||
return found
|
||||
}
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
function requireElement(stdout: PassThrough, nodeName: ElementNames): DOMElement {
|
||||
const found = findElement(getRootNode(stdout), nodeName)
|
||||
|
||||
if (!found) {
|
||||
throw new Error(`Expected to find ${nodeName} in Ink root tree`)
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
async function createHarness(): Promise<{
|
||||
stdout: PassThrough
|
||||
stdin: TestStdin
|
||||
root: Awaited<ReturnType<typeof createRoot>>
|
||||
dispose: () => Promise<void>
|
||||
}> {
|
||||
const { stdout, stdin } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
root,
|
||||
dispose: async () => {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
test('raw ink-box updates keyboard handlers and attributes in place across rerenders', async () => {
|
||||
const calls: string[] = []
|
||||
const firstHandler = () => calls.push('first')
|
||||
const secondHandler = () => calls.push('second')
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: firstHandler,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'first render',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const firstBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(firstBox.attributes.tabIndex).toBe(0)
|
||||
expect(firstBox._eventHandlers?.onKeyDown).toBe(firstHandler)
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: secondHandler,
|
||||
tabIndex: 1,
|
||||
},
|
||||
'second render',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const secondBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(secondBox).toBe(firstBox)
|
||||
expect(secondBox.attributes.tabIndex).toBe(1)
|
||||
expect(secondBox._eventHandlers?.onKeyDown).toBe(secondHandler)
|
||||
|
||||
getInkInstance(harness.stdout).dispatchKeyboardEvent({
|
||||
kind: 'key',
|
||||
name: 'a',
|
||||
fn: false,
|
||||
ctrl: false,
|
||||
meta: false,
|
||||
shift: false,
|
||||
option: false,
|
||||
super: false,
|
||||
sequence: 'a',
|
||||
raw: 'a',
|
||||
isPasted: false,
|
||||
})
|
||||
|
||||
await waitForCondition(
|
||||
() => calls.length === 1,
|
||||
'Timed out waiting for rerendered onKeyDown handler to fire',
|
||||
)
|
||||
|
||||
expect(calls).toEqual(['second'])
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-text updates textStyles in place across rerenders', async () => {
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-text',
|
||||
{
|
||||
style: RAW_TEXT_STYLE,
|
||||
textStyles: { color: 'ansi:red' },
|
||||
},
|
||||
'host text',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const firstText = requireElement(harness.stdout, 'ink-text')
|
||||
expect(firstText.textStyles).toEqual({ color: 'ansi:red' })
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-text',
|
||||
{
|
||||
style: RAW_TEXT_STYLE,
|
||||
textStyles: { color: 'ansi:blue' },
|
||||
},
|
||||
'host text',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const secondText = requireElement(harness.stdout, 'ink-text')
|
||||
expect(secondText).toBe(firstText)
|
||||
expect(secondText.textStyles).toEqual({ color: 'ansi:blue' })
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-box removes event handler when set to undefined', async () => {
|
||||
const calls: string[] = []
|
||||
const handler = () => calls.push('fired')
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: handler,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'with handler',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const box = requireElement(harness.stdout, 'ink-box')
|
||||
expect(box._eventHandlers?.onKeyDown).toBe(handler)
|
||||
|
||||
// Remove the handler
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'without handler',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const sameBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(sameBox).toBe(box)
|
||||
expect(sameBox._eventHandlers?.onKeyDown).toBeUndefined()
|
||||
|
||||
// Dispatch a key event and verify the removed handler is NOT called
|
||||
getInkInstance(harness.stdout).dispatchKeyboardEvent({
|
||||
kind: 'key',
|
||||
name: 'a',
|
||||
fn: false,
|
||||
ctrl: false,
|
||||
meta: false,
|
||||
shift: false,
|
||||
option: false,
|
||||
super: false,
|
||||
sequence: 'a',
|
||||
raw: 'a',
|
||||
isPasted: false,
|
||||
})
|
||||
|
||||
await Bun.sleep(50)
|
||||
expect(calls).toEqual([])
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-box updates layout style in place across rerenders', async () => {
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
style: { flexDirection: 'row', paddingLeft: 1 },
|
||||
},
|
||||
'styled box',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const box = requireElement(harness.stdout, 'ink-box')
|
||||
expect(box.style.flexDirection).toBe('row')
|
||||
expect(box.style.paddingLeft).toBe(1)
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
style: { flexDirection: 'column', paddingLeft: 2 },
|
||||
},
|
||||
'styled box',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const sameBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(sameBox).toBe(box)
|
||||
expect(sameBox.style.flexDirection).toBe('column')
|
||||
expect(sameBox.style.paddingLeft).toBe(2)
|
||||
|
||||
// Verify the update reached the layout engine, not just the style object
|
||||
const yogaNode = sameBox.yogaNode!
|
||||
expect(yogaNode).toBeDefined()
|
||||
yogaNode.calculateLayout(120)
|
||||
expect(yogaNode.getComputedPadding(LayoutEdge.Left)).toBe(2)
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
@@ -366,14 +366,12 @@ const reconciler = createReconciler<
|
||||
createTextInstance(
|
||||
text: string,
|
||||
_root: DOMElement,
|
||||
hostContext: HostContext,
|
||||
_hostContext: HostContext,
|
||||
): TextNode {
|
||||
if (!hostContext.isInsideText) {
|
||||
throw new Error(
|
||||
`Text string "${text}" must be rendered inside <Text> component`,
|
||||
)
|
||||
}
|
||||
|
||||
// react-compiler memoization can reuse cached <Text> elements without
|
||||
// re-traversing getChildHostContext, so hostContext.isInsideText may be
|
||||
// stale. Always create the text node — Ink will render it correctly
|
||||
// regardless of the context tracking state.
|
||||
return createTextNode(text)
|
||||
},
|
||||
resetTextContent() {},
|
||||
@@ -451,17 +449,25 @@ const reconciler = createReconciler<
|
||||
},
|
||||
commitUpdate(
|
||||
node: DOMElement,
|
||||
updatePayload: UpdatePayload | null,
|
||||
_type: ElementNames,
|
||||
_oldProps: Props,
|
||||
_newProps: Props,
|
||||
oldProps: Props,
|
||||
newProps: Props,
|
||||
): void {
|
||||
if (!updatePayload) {
|
||||
// React 19 mutation mode calls commitUpdate as
|
||||
// (instance, type, oldProps, newProps, fiber) and does not pass the
|
||||
// prepareUpdate() payload here. This renderer used to treat the second
|
||||
// argument as updatePayload, which left mounted ink-* nodes with stale
|
||||
// attributes, event handlers, and textStyles until something forced a
|
||||
// remount. Recompute the prop/style diff here so host nodes update
|
||||
// correctly in place on rerender.
|
||||
const props = diff(oldProps, newProps)
|
||||
const style = diff(oldProps['style'] as Styles, newProps['style'] as Styles)
|
||||
const nextStyle = newProps['style'] as Styles | undefined
|
||||
|
||||
if (!props && !style) {
|
||||
return
|
||||
}
|
||||
|
||||
const { props, style, nextStyle } = updatePayload
|
||||
|
||||
if (props) {
|
||||
for (const [key, value] of Object.entries(props)) {
|
||||
if (key === 'style') {
|
||||
|
||||
@@ -135,6 +135,13 @@ export function setXtversionName(name: string): void {
|
||||
if (xtversionName === undefined) xtversionName = name
|
||||
}
|
||||
|
||||
export function isGhosttyTerminal(): boolean {
|
||||
if (process.env.NODE_ENV === 'test') return false
|
||||
if (process.env.TERM_PROGRAM === 'ghostty') return true
|
||||
if (process.env.TERM === 'xterm-ghostty') return true
|
||||
return xtversionName?.toLowerCase().startsWith('ghostty') ?? false
|
||||
}
|
||||
|
||||
/** True if running in an xterm.js-based terminal (VS Code, Cursor, Windsurf
|
||||
* integrated terminals). Combines TERM_PROGRAM env check (fast, sync, but
|
||||
* not forwarded over SSH) with the XTVERSION probe result (async, survives
|
||||
@@ -145,6 +152,20 @@ export function isXtermJs(): boolean {
|
||||
return xtversionName?.startsWith('xterm.js') ?? false
|
||||
}
|
||||
|
||||
/** Ghostty currently repaints main-screen prompt updates more reliably
|
||||
* without DEC 2026 synchronized output. Prefer explicit terminal identity
|
||||
* (TERM_PROGRAM/TERM or XTVERSION) in real sessions, but keep tests
|
||||
* deterministic by disabling the env-based detection under NODE_ENV=test. */
|
||||
export function shouldSkipMainScreenSyncMarkers(): boolean {
|
||||
return isGhosttyTerminal()
|
||||
}
|
||||
|
||||
/** Ghostty's main-screen prompt updates are currently more reliable when we
|
||||
* bypass the incremental diff path and rewrite the visible prompt block. */
|
||||
export function shouldUseMainScreenRewrite(): boolean {
|
||||
return isGhosttyTerminal()
|
||||
}
|
||||
|
||||
// Terminals known to correctly implement the Kitty keyboard protocol
|
||||
// (CSI >1u) and/or xterm modifyOtherKeys (CSI >4;2m) for ctrl+shift+<letter>
|
||||
// disambiguation. We previously enabled unconditionally (#23350), assuming
|
||||
|
||||
148
src/ink/termio/osc.test.ts
Normal file
148
src/ink/termio/osc.test.ts
Normal file
@@ -0,0 +1,148 @@
|
||||
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { join } from 'node:path'
|
||||
|
||||
const originalEnv = { ...process.env }
|
||||
const originalPlatform = process.platform
|
||||
const mockedClipboardPath = join(process.cwd(), 'openclaude-clipboard.txt')
|
||||
|
||||
const generateTempFilePathMock = mock(() => mockedClipboardPath)
|
||||
|
||||
const execFileNoThrowMock = mock(
|
||||
async () => ({ code: 0, stdout: '', stderr: '' }),
|
||||
)
|
||||
|
||||
mock.module('../../utils/execFileNoThrow.js', () => ({
|
||||
execFileNoThrow: execFileNoThrowMock,
|
||||
execFileNoThrowWithCwd: execFileNoThrowMock,
|
||||
}))
|
||||
|
||||
mock.module('../../utils/tempfile.js', () => ({
|
||||
generateTempFilePath: generateTempFilePathMock,
|
||||
}))
|
||||
|
||||
async function importFreshOscModule() {
|
||||
return import(`./osc.ts?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
async function flushClipboardCopy(): Promise<void> {
|
||||
await new Promise(resolve => setTimeout(resolve, 0))
|
||||
}
|
||||
|
||||
async function waitForExecCall(
|
||||
command: string,
|
||||
attempts = 20,
|
||||
): Promise<(typeof execFileNoThrowMock.mock.calls)[number] | undefined> {
|
||||
for (let attempt = 0; attempt < attempts; attempt++) {
|
||||
const call = execFileNoThrowMock.mock.calls.find(([cmd]) => cmd === command)
|
||||
if (call) {
|
||||
return call
|
||||
}
|
||||
await flushClipboardCopy()
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
describe('Windows clipboard fallback', () => {
|
||||
beforeEach(() => {
|
||||
execFileNoThrowMock.mockClear()
|
||||
generateTempFilePathMock.mockClear()
|
||||
process.env = { ...originalEnv }
|
||||
delete process.env['SSH_CONNECTION']
|
||||
delete process.env['TMUX']
|
||||
Object.defineProperty(process, 'platform', { value: 'win32' })
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...originalEnv }
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform })
|
||||
})
|
||||
|
||||
test('uses PowerShell instead of clip.exe for local Windows copy', async () => {
|
||||
const { setClipboard } = await importFreshOscModule()
|
||||
|
||||
await setClipboard('Привет мир')
|
||||
await flushClipboardCopy()
|
||||
|
||||
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'clip')).toBe(
|
||||
false,
|
||||
)
|
||||
expect(
|
||||
execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'powershell'),
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
test('passes Windows clipboard text through a UTF-8 temp file instead of stdin', async () => {
|
||||
const { setClipboard } = await importFreshOscModule()
|
||||
|
||||
await setClipboard('Привет мир')
|
||||
await flushClipboardCopy()
|
||||
|
||||
const windowsCall = await waitForExecCall('powershell')
|
||||
|
||||
expect(windowsCall?.[2]).toMatchObject({
|
||||
stdin: 'ignore',
|
||||
})
|
||||
expect(windowsCall?.[2]).not.toMatchObject({ input: 'Привет мир' })
|
||||
expect(windowsCall?.[2]).not.toMatchObject({
|
||||
env: expect.objectContaining({
|
||||
OPENCLAUDE_CLIPBOARD_TEXT_B64: expect.any(String),
|
||||
}),
|
||||
})
|
||||
expect(windowsCall?.[1]).toContain(
|
||||
`$text = [System.IO.File]::ReadAllText('${mockedClipboardPath.replace(/'/g, "''")}', [System.Text.Encoding]::UTF8); Set-Clipboard -Value $text`,
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('clipboard path behavior remains stable', () => {
|
||||
beforeEach(() => {
|
||||
execFileNoThrowMock.mockClear()
|
||||
process.env = { ...originalEnv }
|
||||
delete process.env['SSH_CONNECTION']
|
||||
delete process.env['TMUX']
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...originalEnv }
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform })
|
||||
})
|
||||
|
||||
test('getClipboardPath stays native on local macOS', async () => {
|
||||
Object.defineProperty(process, 'platform', { value: 'darwin' })
|
||||
const { getClipboardPath } = await importFreshOscModule()
|
||||
|
||||
expect(getClipboardPath()).toBe('native')
|
||||
})
|
||||
|
||||
test('getClipboardPath stays tmux-buffer when TMUX is set', async () => {
|
||||
Object.defineProperty(process, 'platform', { value: 'linux' })
|
||||
process.env['TMUX'] = '/tmp/tmux-1000/default,123,0'
|
||||
const { getClipboardPath } = await importFreshOscModule()
|
||||
|
||||
expect(getClipboardPath()).toBe('tmux-buffer')
|
||||
})
|
||||
|
||||
test('Windows clipboard fallback is skipped over SSH', async () => {
|
||||
Object.defineProperty(process, 'platform', { value: 'win32' })
|
||||
process.env['SSH_CONNECTION'] = '1 2 3 4'
|
||||
const { setClipboard } = await importFreshOscModule()
|
||||
|
||||
await setClipboard('Привет мир')
|
||||
|
||||
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'powershell')).toBe(
|
||||
false,
|
||||
)
|
||||
})
|
||||
|
||||
test('local macOS clipboard fallback still uses pbcopy', async () => {
|
||||
Object.defineProperty(process, 'platform', { value: 'darwin' })
|
||||
const { setClipboard } = await importFreshOscModule()
|
||||
|
||||
await setClipboard('hello')
|
||||
|
||||
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'pbcopy')).toBe(
|
||||
true,
|
||||
)
|
||||
})
|
||||
})
|
||||
@@ -3,8 +3,10 @@
|
||||
*/
|
||||
|
||||
import { Buffer } from 'buffer'
|
||||
import { unlink, writeFile } from 'node:fs/promises'
|
||||
import { env } from '../../utils/env.js'
|
||||
import { execFileNoThrow } from '../../utils/execFileNoThrow.js'
|
||||
import { generateTempFilePath } from '../../utils/tempfile.js'
|
||||
import { BEL, ESC, ESC_TYPE, SEP } from './ansi.js'
|
||||
import type { Action, Color, TabStatusAction } from './types.js'
|
||||
|
||||
@@ -129,7 +131,7 @@ export async function tmuxLoadBuffer(text: string): Promise<boolean> {
|
||||
* Local (no SSH_CONNECTION): also shell out to a native clipboard utility.
|
||||
* OSC 52 and tmux -w both depend on terminal settings — iTerm2 disables
|
||||
* OSC 52 by default, VS Code shows a permission prompt on first use. Native
|
||||
* utilities (pbcopy/wl-copy/xclip/xsel/clip.exe) always work locally. Over
|
||||
* utilities (pbcopy/wl-copy/xclip/xsel/PowerShell Set-Clipboard) always work locally. Over
|
||||
* SSH these would write to the remote clipboard — OSC 52 is the right path there.
|
||||
*
|
||||
* Returns the sequence for the caller to write to stdout (raw OSC 52
|
||||
@@ -211,9 +213,32 @@ function copyNative(text: string): void {
|
||||
return
|
||||
}
|
||||
case 'win32':
|
||||
// clip.exe is always available on Windows. Unicode handling is
|
||||
// imperfect (system locale encoding) but good enough for a fallback.
|
||||
void execFileNoThrow('clip', [], opts)
|
||||
// Avoid piping non-ASCII text through the Windows stdin/codepage
|
||||
// boundary. Write UTF-8 text to a temp file and let PowerShell read it
|
||||
// directly as UTF-8 before calling Set-Clipboard.
|
||||
void (async () => {
|
||||
const tempPath = generateTempFilePath('openclaude-clipboard', '.txt')
|
||||
const escapedTempPath = tempPath.replace(/'/g, "''")
|
||||
try {
|
||||
await writeFile(tempPath, text, { encoding: 'utf8' })
|
||||
await execFileNoThrow(
|
||||
'powershell',
|
||||
[
|
||||
'-NoProfile',
|
||||
'-NonInteractive',
|
||||
'-Command',
|
||||
`$text = [System.IO.File]::ReadAllText('${escapedTempPath}', [System.Text.Encoding]::UTF8); Set-Clipboard -Value $text`,
|
||||
],
|
||||
{
|
||||
useCwd: false,
|
||||
timeout: opts.timeout,
|
||||
stdin: 'ignore',
|
||||
},
|
||||
)
|
||||
} finally {
|
||||
await unlink(tempPath).catch(() => {})
|
||||
}
|
||||
})().catch(() => {})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
101
src/proto/openclaude.proto
Normal file
101
src/proto/openclaude.proto
Normal file
@@ -0,0 +1,101 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package openclaude.v1;
|
||||
|
||||
// Main Agent Service
|
||||
service AgentService {
|
||||
// Bidirectional stream: client sends tasks and answers to agent prompts,
|
||||
// server streams text tokens, tool states, and requests permissions.
|
||||
rpc Chat(stream ClientMessage) returns (stream ServerMessage);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------
|
||||
// MESSAGES FROM CLIENT (Input)
|
||||
// ---------------------------------------------------------
|
||||
message ClientMessage {
|
||||
oneof payload {
|
||||
// 1. Initial request (first message in the stream)
|
||||
ChatRequest request = 2;
|
||||
|
||||
// 2. User response to an agent prompt (e.g., command confirmation)
|
||||
UserInput input = 3;
|
||||
|
||||
// 3. Interrupt signal (if the user clicks "Stop generation")
|
||||
CancelSignal cancel = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message ChatRequest {
|
||||
string message = 1;
|
||||
string working_directory = 2; // Where the agent should execute commands
|
||||
reserved 3; // Reserved to prevent accidental reuse
|
||||
optional string model = 4;
|
||||
string session_id = 5; // Non-empty = cross-stream session persistence
|
||||
}
|
||||
|
||||
message UserInput {
|
||||
string reply = 1; // Text response (e.g., "y", "no", or clarification)
|
||||
string prompt_id = 2; // ID of the prompt we are responding to
|
||||
}
|
||||
|
||||
message CancelSignal {
|
||||
string reason = 1;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------
|
||||
// MESSAGES FROM SERVER (Output / Events)
|
||||
// ---------------------------------------------------------
|
||||
message ServerMessage {
|
||||
// Using oneof guarantees that only one type of event arrives at a time
|
||||
oneof event {
|
||||
TextChunk text_chunk = 1; // Chunk of text from LLM
|
||||
ToolCallStart tool_start = 2; // Agent started using a tool
|
||||
ToolCallResult tool_result = 3; // Tool returned a result
|
||||
ActionRequired action_required = 4;// Agent requires human intervention
|
||||
FinalResponse done = 5; // Generation successfully completed
|
||||
ErrorResponse error = 6; // A critical error occurred
|
||||
}
|
||||
}
|
||||
|
||||
// Stream text chunk
|
||||
message TextChunk {
|
||||
string text = 1;
|
||||
}
|
||||
|
||||
// Agent decided to use a tool (bash, read_file, etc.)
|
||||
message ToolCallStart {
|
||||
string tool_name = 1;
|
||||
string arguments_json = 2; // Arguments in JSON format
|
||||
string tool_use_id = 3; // Correlation ID matching ToolCallResult
|
||||
}
|
||||
|
||||
// Result of tool execution
|
||||
message ToolCallResult {
|
||||
string tool_name = 1;
|
||||
string output = 2; // stdout/stderr or file contents
|
||||
bool is_error = 3; // Did the command itself fail
|
||||
string tool_use_id = 4; // Correlation ID matching ToolCallStart
|
||||
}
|
||||
|
||||
// Agent paused work and is waiting for user decision
|
||||
message ActionRequired {
|
||||
string prompt_id = 1; // Client must return this ID in UserInput
|
||||
string question = 2; // Question text (e.g., "Execute 'rm -rf /'?")
|
||||
enum ActionType {
|
||||
CONFIRM_COMMAND = 0; // Yes/No
|
||||
REQUEST_INFORMATION = 1; // Text input
|
||||
}
|
||||
ActionType type = 3;
|
||||
}
|
||||
|
||||
// Final statistics
|
||||
message FinalResponse {
|
||||
string full_text = 1; // The entire generated text
|
||||
int32 prompt_tokens = 2;
|
||||
int32 completion_tokens = 3;
|
||||
}
|
||||
|
||||
message ErrorResponse {
|
||||
string message = 1;
|
||||
string code = 2;
|
||||
}
|
||||
@@ -237,6 +237,8 @@ import { useOfficialMarketplaceNotification } from 'src/hooks/useOfficialMarketp
|
||||
import { usePromptsFromClaudeInChrome } from 'src/hooks/usePromptsFromClaudeInChrome.js';
|
||||
import { getTipToShowOnSpinner, recordShownTip } from 'src/services/tips/tipScheduler.js';
|
||||
import type { Theme } from 'src/utils/theme.js';
|
||||
import { isPromptTypingSuppressionActive } from './replInputSuppression.js';
|
||||
import { shouldRunStartupChecks } from './replStartupGates.js';
|
||||
import { checkAndDisableBypassPermissionsIfNeeded, checkAndDisableAutoModeIfNeeded, useKickOffCheckAndDisableBypassPermissionsIfNeeded, useKickOffCheckAndDisableAutoModeIfNeeded } from 'src/utils/permissions/bypassPermissionsKillswitch.js';
|
||||
import { SandboxManager } from 'src/utils/sandbox/sandbox-adapter.js';
|
||||
import { SANDBOX_NETWORK_ACCESS_TOOL_NAME } from 'src/cli/structuredIO.js';
|
||||
@@ -615,7 +617,6 @@ export function REPL({
|
||||
const toolPermissionContext = useAppState(s => s.toolPermissionContext);
|
||||
const verbose = useAppState(s => s.verbose);
|
||||
const mcp = useAppState(s => s.mcp);
|
||||
const plugins = useAppState(s => s.plugins);
|
||||
const agentDefinitions = useAppState(s => s.agentDefinitions);
|
||||
const fileHistory = useAppState(s => s.fileHistory);
|
||||
const initialMessage = useAppState(s => s.initialMessage);
|
||||
@@ -778,7 +779,7 @@ export function REPL({
|
||||
}, [localTools, initialTools]);
|
||||
|
||||
// Initialize plugin management
|
||||
useManagePlugins({
|
||||
const pluginCommands = useManagePlugins({
|
||||
enabled: !isRemoteSession
|
||||
});
|
||||
const tasksV2 = useTasksV2WithCollapseEffect();
|
||||
@@ -791,10 +792,8 @@ export function REPL({
|
||||
// accepts, and only then is the REPL component mounted and this effect runs.
|
||||
// This ensures that plugin installations from repository and user settings only
|
||||
// happen after explicit user consent to trust the current working directory.
|
||||
useEffect(() => {
|
||||
if (isRemoteSession) return;
|
||||
void performStartupChecks(setAppState);
|
||||
}, [setAppState, isRemoteSession]);
|
||||
// Deferring startup checks is handled below (after promptTypingSuppressionActive
|
||||
// is declared) to avoid temporal dead zone issues.
|
||||
|
||||
// Allow Claude in Chrome MCP to send prompts through MCP notifications
|
||||
// and sync permission mode changes to the Chrome extension
|
||||
@@ -826,10 +825,16 @@ export function REPL({
|
||||
}, [mainThreadAgentDefinition, mergedTools]);
|
||||
|
||||
// Merge commands from local state, plugins, and MCP
|
||||
const commandsWithPlugins = useMergedCommands(localCommands, plugins.commands as Command[]);
|
||||
const commandsWithPlugins = useMergedCommands(localCommands, pluginCommands as Command[]);
|
||||
const mergedCommands = useMergedCommands(commandsWithPlugins, mcp.commands as Command[]);
|
||||
// Keep plugin commands out of render-time command props. Feeding the full
|
||||
// execution set into PromptInput/Messages reintroduced the startup repaint
|
||||
// freeze, while transcript rendering still round-trips plugin skills via the
|
||||
// SkillTool's `skill` payload without needing plugin command objects here.
|
||||
const renderMergedCommands = useMergedCommands(localCommands, mcp.commands as Command[]);
|
||||
// Filter out all commands if disableSlashCommands is true
|
||||
const commands = useMemo(() => disableSlashCommands ? [] : mergedCommands, [disableSlashCommands, mergedCommands]);
|
||||
const renderCommands = useMemo(() => disableSlashCommands ? [] : renderMergedCommands, [disableSlashCommands, renderMergedCommands]);
|
||||
useIdeLogging(isRemoteSession ? EMPTY_MCP_CLIENTS : mcp.clients);
|
||||
useIdeSelection(isRemoteSession ? EMPTY_MCP_CLIENTS : mcp.clients, setIDESelection);
|
||||
const [streamMode, setStreamMode] = useState<SpinnerMode>('responding');
|
||||
@@ -1336,6 +1341,7 @@ export function REPL({
|
||||
const [inputValue, setInputValueRaw] = useState(() => consumeEarlyInput());
|
||||
const inputValueRef = useRef(inputValue);
|
||||
inputValueRef.current = inputValue;
|
||||
const promptTypingSuppressionActive = isPromptTypingSuppressionActive(isPromptInputActive, inputValue);
|
||||
const insertTextRef = useRef<{
|
||||
insert: (text: string) => void;
|
||||
setInputWithCursor: (value: string, cursor: number) => void;
|
||||
@@ -1427,6 +1433,25 @@ export function REPL({
|
||||
const activeRemote = sshRemote.isRemoteMode ? sshRemote : directConnect.isRemoteMode ? directConnect : remoteSession;
|
||||
const [pastedContents, setPastedContents] = useState<Record<number, PastedContent>>({});
|
||||
const [submitCount, setSubmitCount] = useState(0);
|
||||
|
||||
// Defer startup checks until the user has submitted their first message.
|
||||
// A timeout or grace period is insufficient (issue #363): if the user pauses
|
||||
// before typing, startup checks can still fire and recommendation dialogs
|
||||
// steal focus. Only the user's first submission guarantees the prompt was
|
||||
// the first thing they interacted with.
|
||||
const startupChecksStartedRef = React.useRef(false);
|
||||
const hasHadFirstSubmission = (submitCount ?? 0) > 0;
|
||||
useEffect(() => {
|
||||
if (isRemoteSession) return;
|
||||
if (startupChecksStartedRef.current) return;
|
||||
if (!shouldRunStartupChecks({
|
||||
isRemoteSession,
|
||||
hasStarted: startupChecksStartedRef.current,
|
||||
hasHadFirstSubmission,
|
||||
})) return;
|
||||
startupChecksStartedRef.current = true;
|
||||
void performStartupChecks(setAppState);
|
||||
}, [setAppState, isRemoteSession, hasHadFirstSubmission]);
|
||||
// Ref instead of state to avoid triggering React re-renders on every
|
||||
// streaming text_delta. The spinner reads this via its animation timer.
|
||||
const responseLengthRef = useRef(0);
|
||||
@@ -2028,7 +2053,7 @@ export function REPL({
|
||||
if (isMessageSelectorVisible) return 'message-selector';
|
||||
|
||||
// Suppress interrupt dialogs while user is actively typing
|
||||
if (isPromptInputActive) return undefined;
|
||||
if (promptTypingSuppressionActive) return undefined;
|
||||
if (sandboxPermissionRequestQueue[0]) return 'sandbox-permission';
|
||||
|
||||
// Permission/interactive dialogs (show unless blocked by toolJSX)
|
||||
@@ -2059,19 +2084,20 @@ export function REPL({
|
||||
if (allowDialogsWithAnimation && showRemoteCallout) return 'remote-callout';
|
||||
|
||||
// LSP plugin recommendation (lowest priority - non-blocking suggestion)
|
||||
if (allowDialogsWithAnimation && lspRecommendation) return 'lsp-recommendation';
|
||||
// Suppress during startup window to prevent stealing focus from the prompt (issue #363)
|
||||
if (allowDialogsWithAnimation && lspRecommendation && startupChecksStartedRef.current) return 'lsp-recommendation';
|
||||
|
||||
// Plugin hint from CLI/SDK stderr (same priority band as LSP rec)
|
||||
if (allowDialogsWithAnimation && hintRecommendation) return 'plugin-hint';
|
||||
if (allowDialogsWithAnimation && hintRecommendation && startupChecksStartedRef.current) return 'plugin-hint';
|
||||
|
||||
// Desktop app upsell (max 3 launches, lowest priority)
|
||||
if (allowDialogsWithAnimation && showDesktopUpsellStartup) return 'desktop-upsell';
|
||||
if (allowDialogsWithAnimation && showDesktopUpsellStartup && startupChecksStartedRef.current) return 'desktop-upsell';
|
||||
return undefined;
|
||||
}
|
||||
const focusedInputDialog = getFocusedInputDialog();
|
||||
|
||||
// True when permission prompts exist but are hidden because the user is typing
|
||||
const hasSuppressedDialogs = isPromptInputActive && (sandboxPermissionRequestQueue[0] || toolUseConfirmQueue[0] || promptQueue[0] || workerSandboxPermissions.queue[0] || elicitation.queue[0] || showingCostDialog);
|
||||
const hasSuppressedDialogs = promptTypingSuppressionActive && (sandboxPermissionRequestQueue[0] || toolUseConfirmQueue[0] || promptQueue[0] || workerSandboxPermissions.queue[0] || elicitation.queue[0] || showingCostDialog);
|
||||
|
||||
// Keep ref in sync so timer callbacks can read the current value
|
||||
focusedInputDialogRef.current = focusedInputDialog;
|
||||
@@ -4406,7 +4432,7 @@ export function REPL({
|
||||
// and transcript-mode are mutually exclusive (this early return), so
|
||||
// only one ScrollBox is ever mounted at a time.
|
||||
const transcriptScrollRef = isFullscreenEnvEnabled() && !disableVirtualScroll && !dumpMode ? scrollRef : undefined;
|
||||
const transcriptMessagesElement = <Messages messages={transcriptMessages} tools={tools} commands={commands} verbose={true} toolJSX={null} toolUseConfirmQueue={[]} inProgressToolUseIDs={inProgressToolUseIDs} isMessageSelectorVisible={false} conversationId={conversationId} screen={screen} agentDefinitions={agentDefinitions} streamingToolUses={transcriptStreamingToolUses} showAllInTranscript={showAllInTranscript} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} hidePastThinking={true} streamingThinking={streamingThinking} scrollRef={transcriptScrollRef} jumpRef={jumpRef} onSearchMatchesChange={onSearchMatchesChange} scanElement={scanElement} setPositions={setPositions} disableRenderCap={dumpMode} />;
|
||||
const transcriptMessagesElement = <Messages messages={transcriptMessages} tools={tools} commands={renderCommands} verbose={true} toolJSX={null} toolUseConfirmQueue={[]} inProgressToolUseIDs={inProgressToolUseIDs} isMessageSelectorVisible={false} conversationId={conversationId} screen={screen} agentDefinitions={agentDefinitions} streamingToolUses={transcriptStreamingToolUses} showAllInTranscript={showAllInTranscript} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} hidePastThinking={true} streamingThinking={streamingThinking} scrollRef={transcriptScrollRef} jumpRef={jumpRef} onSearchMatchesChange={onSearchMatchesChange} scanElement={scanElement} setPositions={setPositions} disableRenderCap={dumpMode} />;
|
||||
const transcriptToolJSX = toolJSX && <Box flexDirection="column" width="100%">
|
||||
{toolJSX.jsx}
|
||||
</Box>;
|
||||
@@ -4574,7 +4600,7 @@ export function REPL({
|
||||
jumpToNew(scrollRef.current);
|
||||
}} scrollable={<>
|
||||
<TeammateViewHeader />
|
||||
<Messages messages={displayedMessages} tools={tools} commands={commands} verbose={verbose} toolJSX={toolJSX} toolUseConfirmQueue={toolUseConfirmQueue} inProgressToolUseIDs={viewedTeammateTask ? viewedTeammateTask.inProgressToolUseIDs ?? new Set() : inProgressToolUseIDs} isMessageSelectorVisible={isMessageSelectorVisible} conversationId={conversationId} screen={screen} streamingToolUses={streamingToolUses} showAllInTranscript={showAllInTranscript} agentDefinitions={agentDefinitions} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} streamingText={isLoading && !viewedAgentTask ? visibleStreamingText : null} isBriefOnly={viewedAgentTask ? false : isBriefOnly} unseenDivider={viewedAgentTask ? undefined : unseenDivider} scrollRef={isFullscreenEnvEnabled() ? scrollRef : undefined} trackStickyPrompt={isFullscreenEnvEnabled() ? true : undefined} cursor={cursor} setCursor={setCursor} cursorNavRef={cursorNavRef} />
|
||||
<Messages messages={displayedMessages} tools={tools} commands={renderCommands} verbose={verbose} toolJSX={toolJSX} toolUseConfirmQueue={toolUseConfirmQueue} inProgressToolUseIDs={viewedTeammateTask ? viewedTeammateTask.inProgressToolUseIDs ?? new Set() : inProgressToolUseIDs} isMessageSelectorVisible={isMessageSelectorVisible} conversationId={conversationId} screen={screen} streamingToolUses={streamingToolUses} showAllInTranscript={showAllInTranscript} agentDefinitions={agentDefinitions} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} streamingText={isLoading && !viewedAgentTask ? visibleStreamingText : null} isBriefOnly={viewedAgentTask ? false : isBriefOnly} unseenDivider={viewedAgentTask ? undefined : unseenDivider} scrollRef={isFullscreenEnvEnabled() ? scrollRef : undefined} trackStickyPrompt={isFullscreenEnvEnabled() ? true : undefined} cursor={cursor} setCursor={setCursor} cursorNavRef={cursorNavRef} />
|
||||
<AwsAuthStatusBox />
|
||||
{/* Hide the processing placeholder while a modal is showing —
|
||||
it would sit at the last visible transcript row right above
|
||||
@@ -4907,7 +4933,7 @@ export function REPL({
|
||||
{"external" === 'ant' && skillImprovementSurvey.suggestion && <SkillImprovementSurvey isOpen={skillImprovementSurvey.isOpen} skillName={skillImprovementSurvey.suggestion.skillName} updates={skillImprovementSurvey.suggestion.updates} handleSelect={skillImprovementSurvey.handleSelect} inputValue={inputValue} setInputValue={setInputValue} />}
|
||||
{showIssueFlagBanner && <IssueFlagBanner />}
|
||||
{ }
|
||||
<PromptInput debug={debug} ideSelection={ideSelection} hasSuppressedDialogs={!!hasSuppressedDialogs} isLocalJSXCommandActive={isShowingLocalJSXCommand} getToolUseContext={getToolUseContext} toolPermissionContext={toolPermissionContext} setToolPermissionContext={setToolPermissionContext} apiKeyStatus={apiKeyStatus} commands={commands} agents={agentDefinitions.activeAgents} isLoading={isLoading} onExit={handleExit} verbose={verbose} messages={messages} onAutoUpdaterResult={setAutoUpdaterResult} autoUpdaterResult={autoUpdaterResult} input={inputValue} onInputChange={setInputValue} mode={inputMode} onModeChange={setInputMode} stashedPrompt={stashedPrompt} setStashedPrompt={setStashedPrompt} submitCount={submitCount} onShowMessageSelector={handleShowMessageSelector} onMessageActionsEnter={
|
||||
<PromptInput debug={debug} ideSelection={ideSelection} hasSuppressedDialogs={!!hasSuppressedDialogs} isLocalJSXCommandActive={isShowingLocalJSXCommand} getToolUseContext={getToolUseContext} toolPermissionContext={toolPermissionContext} setToolPermissionContext={setToolPermissionContext} apiKeyStatus={apiKeyStatus} commands={renderCommands} agents={agentDefinitions.activeAgents} isLoading={isLoading} onExit={handleExit} verbose={verbose} messages={messages} onAutoUpdaterResult={setAutoUpdaterResult} autoUpdaterResult={autoUpdaterResult} input={inputValue} onInputChange={setInputValue} mode={inputMode} onModeChange={setInputMode} stashedPrompt={stashedPrompt} setStashedPrompt={setStashedPrompt} submitCount={submitCount} onShowMessageSelector={handleShowMessageSelector} onMessageActionsEnter={
|
||||
// Works during isLoading — edit cancels first; uuid selection survives appends.
|
||||
feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? enterMessageActions : undefined} mcpClients={mcpClients} pastedContents={pastedContents} setPastedContents={setPastedContents} vimMode={vimMode} setVimMode={setVimMode} showBashesDialog={showBashesDialog} setShowBashesDialog={setShowBashesDialog} onSubmit={onSubmit} onAgentSubmit={onAgentSubmit} isSearchingHistory={isSearchingHistory} setIsSearchingHistory={setIsSearchingHistory} helpOpen={isHelpOpen} setHelpOpen={setIsHelpOpen} insertTextRef={feature('VOICE_MODE') ? insertTextRef : undefined} voiceInterimRange={voice.interimRange} />
|
||||
<SessionBackgroundHint onBackgroundSession={handleBackgroundSession} isLoading={isLoading} />
|
||||
|
||||
18
src/screens/replInputSuppression.test.ts
Normal file
18
src/screens/replInputSuppression.test.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
|
||||
import { isPromptTypingSuppressionActive } from './replInputSuppression.js'
|
||||
|
||||
describe('isPromptTypingSuppressionActive', () => {
|
||||
it('suppresses dialogs when early input already exists', () => {
|
||||
expect(isPromptTypingSuppressionActive(false, 'hello')).toBe(true)
|
||||
})
|
||||
|
||||
it('does not suppress dialogs for empty or whitespace-only input', () => {
|
||||
expect(isPromptTypingSuppressionActive(false, '')).toBe(false)
|
||||
expect(isPromptTypingSuppressionActive(false, ' ')).toBe(false)
|
||||
})
|
||||
|
||||
it('keeps suppression active while the typing flag is set', () => {
|
||||
expect(isPromptTypingSuppressionActive(true, '')).toBe(true)
|
||||
})
|
||||
})
|
||||
6
src/screens/replInputSuppression.ts
Normal file
6
src/screens/replInputSuppression.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export function isPromptTypingSuppressionActive(
|
||||
isPromptInputActive: boolean,
|
||||
inputValue: string,
|
||||
): boolean {
|
||||
return isPromptInputActive || inputValue.trim().length > 0
|
||||
}
|
||||
53
src/screens/replStartupGates.test.ts
Normal file
53
src/screens/replStartupGates.test.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { shouldRunStartupChecks } from './replStartupGates.js'
|
||||
|
||||
describe('shouldRunStartupChecks', () => {
|
||||
test('runs checks after first message submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(true)
|
||||
})
|
||||
|
||||
test('skips checks in remote sessions even after submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: true,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('skips checks if already started', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: true,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('does not run checks before first submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('does not run checks when idle before first submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('skips checks in remote session regardless of other conditions', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: true,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
})
|
||||
35
src/screens/replStartupGates.ts
Normal file
35
src/screens/replStartupGates.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
/**
|
||||
* Startup gates for the REPL.
|
||||
*
|
||||
* Prevents startup plugin checks and recommendation dialogs from stealing
|
||||
* focus before the user has interacted with the prompt.
|
||||
*
|
||||
* This addresses the root cause of issue #363: on mount, performStartupChecks
|
||||
* triggers plugin loading, which populates trackedFiles, which triggers
|
||||
* useLspPluginRecommendation to surface an LSP recommendation dialog. Since
|
||||
* promptTypingSuppressionActive is false before the user has typed anything,
|
||||
* getFocusedInputDialog() returns the dialog, unmounting PromptInput entirely.
|
||||
*
|
||||
* The fix gates startup checks on actual prompt interaction. A pure timeout
|
||||
* or grace period is insufficient because pausing before typing would still
|
||||
* allow dialogs to steal focus. Only the user's first submission guarantees
|
||||
* the prompt is no longer in the vulnerable pre-interaction window.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Determines whether startup checks should run.
|
||||
*
|
||||
* Startup checks are deferred until the user has submitted their first
|
||||
* message. This guarantees the prompt was the first thing the user interacted
|
||||
* with, so no recommendation dialog can steal focus before the first keystroke.
|
||||
*/
|
||||
export function shouldRunStartupChecks(options: {
|
||||
isRemoteSession: boolean;
|
||||
hasStarted: boolean;
|
||||
hasHadFirstSubmission: boolean;
|
||||
}): boolean {
|
||||
if (options.isRemoteSession) return false;
|
||||
if (options.hasStarted) return false;
|
||||
if (!options.hasHadFirstSubmission) return false;
|
||||
return true;
|
||||
}
|
||||
@@ -14,7 +14,16 @@ import { lazySchema } from '../../utils/lazySchema.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js'
|
||||
import type { ModelOption } from '../../utils/model/modelOptions.js'
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
listOpenAICompatibleModels,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
import {
|
||||
getAdditionalModelOptionsCacheScope,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const bootstrapResponseSchema = lazySchema(() =>
|
||||
z.object({
|
||||
@@ -39,6 +48,12 @@ const bootstrapResponseSchema = lazySchema(() =>
|
||||
|
||||
type BootstrapResponse = z.infer<ReturnType<typeof bootstrapResponseSchema>>
|
||||
|
||||
type BootstrapCachePayload = {
|
||||
clientData: Record<string, unknown> | null
|
||||
additionalModelOptions: ModelOption[]
|
||||
additionalModelOptionsScope: string
|
||||
}
|
||||
|
||||
async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
if (isEssentialTrafficOnly()) {
|
||||
logForDebugging('[Bootstrap] Skipped: Nonessential traffic disabled')
|
||||
@@ -108,22 +123,70 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchLocalOpenAIModelOptions(): Promise<BootstrapCachePayload | null> {
|
||||
const scope = getAdditionalModelOptionsCacheScope()
|
||||
if (!scope?.startsWith('openai:')) {
|
||||
return null
|
||||
}
|
||||
|
||||
const { baseUrl } = resolveProviderRequest()
|
||||
const models = await listOpenAICompatibleModels({
|
||||
baseUrl,
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
})
|
||||
|
||||
if (models === null) {
|
||||
logForDebugging('[Bootstrap] Local OpenAI model discovery failed')
|
||||
return null
|
||||
}
|
||||
|
||||
const providerLabel = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
|
||||
return {
|
||||
clientData: getGlobalConfig().clientDataCache ?? null,
|
||||
additionalModelOptionsScope: scope,
|
||||
additionalModelOptions: models.map(model => ({
|
||||
value: model,
|
||||
label: model,
|
||||
description: `Detected from ${providerLabel}`,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch bootstrap data from the API and persist to disk cache.
|
||||
*/
|
||||
export async function fetchBootstrapData(): Promise<void> {
|
||||
try {
|
||||
const response = await fetchBootstrapAPI()
|
||||
if (!response) return
|
||||
const scope = getAdditionalModelOptionsCacheScope()
|
||||
let payload: BootstrapCachePayload | null = null
|
||||
|
||||
const clientData = response.client_data ?? null
|
||||
const additionalModelOptions = response.additional_model_options ?? []
|
||||
if (scope === 'firstParty') {
|
||||
const response = await fetchBootstrapAPI()
|
||||
if (!response) return
|
||||
|
||||
payload = {
|
||||
clientData: response.client_data ?? null,
|
||||
additionalModelOptions: response.additional_model_options ?? [],
|
||||
additionalModelOptionsScope: scope,
|
||||
}
|
||||
} else if (scope?.startsWith('openai:')) {
|
||||
payload = await fetchLocalOpenAIModelOptions()
|
||||
if (!payload) return
|
||||
} else {
|
||||
logForDebugging('[Bootstrap] Skipped: no additional model source')
|
||||
return
|
||||
}
|
||||
|
||||
const { clientData, additionalModelOptions, additionalModelOptionsScope } =
|
||||
payload
|
||||
|
||||
// Only persist if data actually changed — avoids a config write on every startup.
|
||||
const config = getGlobalConfig()
|
||||
if (
|
||||
isEqual(config.clientDataCache, clientData) &&
|
||||
isEqual(config.additionalModelOptionsCache, additionalModelOptions)
|
||||
isEqual(config.additionalModelOptionsCache, additionalModelOptions) &&
|
||||
config.additionalModelOptionsCacheScope === additionalModelOptionsScope
|
||||
) {
|
||||
logForDebugging('[Bootstrap] Cache unchanged, skipping write')
|
||||
return
|
||||
@@ -134,6 +197,7 @@ export async function fetchBootstrapData(): Promise<void> {
|
||||
...current,
|
||||
clientDataCache: clientData,
|
||||
additionalModelOptionsCache: additionalModelOptions,
|
||||
additionalModelOptionsCacheScope: additionalModelOptionsScope,
|
||||
}))
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
|
||||
@@ -14,16 +14,27 @@ type ShimClient = {
|
||||
const originalFetch = globalThis.fetch
|
||||
const originalMacro = (globalThis as Record<string, unknown>).MACRO
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
GEMINI_API_KEY: process.env.GEMINI_API_KEY,
|
||||
GEMINI_MODEL: process.env.GEMINI_MODEL,
|
||||
GEMINI_BASE_URL: process.env.GEMINI_BASE_URL,
|
||||
GEMINI_AUTH_MODE: process.env.GEMINI_AUTH_MODE,
|
||||
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
|
||||
ANTHROPIC_AUTH_TOKEN: process.env.ANTHROPIC_AUTH_TOKEN,
|
||||
ANTHROPIC_CUSTOM_HEADERS: process.env.ANTHROPIC_CUSTOM_HEADERS,
|
||||
}
|
||||
|
||||
function restoreEnv(key: string, value: string | undefined): void {
|
||||
if (value === undefined) {
|
||||
delete process.env[key]
|
||||
} else {
|
||||
process.env[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -32,27 +43,33 @@ beforeEach(() => {
|
||||
process.env.GEMINI_API_KEY = 'gemini-test-key'
|
||||
process.env.GEMINI_MODEL = 'gemini-2.0-flash'
|
||||
process.env.GEMINI_BASE_URL = 'https://gemini.example/v1beta/openai'
|
||||
process.env.GEMINI_AUTH_MODE = 'api-key'
|
||||
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.GOOGLE_API_KEY
|
||||
delete process.env.OPENAI_API_KEY
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.ANTHROPIC_API_KEY
|
||||
delete process.env.ANTHROPIC_AUTH_TOKEN
|
||||
delete process.env.ANTHROPIC_CUSTOM_HEADERS
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
;(globalThis as Record<string, unknown>).MACRO = originalMacro
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.GEMINI_API_KEY = originalEnv.GEMINI_API_KEY
|
||||
process.env.GEMINI_MODEL = originalEnv.GEMINI_MODEL
|
||||
process.env.GEMINI_BASE_URL = originalEnv.GEMINI_BASE_URL
|
||||
process.env.GOOGLE_API_KEY = originalEnv.GOOGLE_API_KEY
|
||||
process.env.OPENAI_API_KEY = originalEnv.OPENAI_API_KEY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
process.env.ANTHROPIC_API_KEY = originalEnv.ANTHROPIC_API_KEY
|
||||
process.env.ANTHROPIC_AUTH_TOKEN = originalEnv.ANTHROPIC_AUTH_TOKEN
|
||||
restoreEnv('CLAUDE_CODE_USE_OPENAI', originalEnv.CLAUDE_CODE_USE_OPENAI)
|
||||
restoreEnv('CLAUDE_CODE_USE_GEMINI', originalEnv.CLAUDE_CODE_USE_GEMINI)
|
||||
restoreEnv('GEMINI_API_KEY', originalEnv.GEMINI_API_KEY)
|
||||
restoreEnv('GEMINI_MODEL', originalEnv.GEMINI_MODEL)
|
||||
restoreEnv('GEMINI_BASE_URL', originalEnv.GEMINI_BASE_URL)
|
||||
restoreEnv('GEMINI_AUTH_MODE', originalEnv.GEMINI_AUTH_MODE)
|
||||
restoreEnv('GOOGLE_API_KEY', originalEnv.GOOGLE_API_KEY)
|
||||
restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY)
|
||||
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
|
||||
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
|
||||
restoreEnv('ANTHROPIC_API_KEY', originalEnv.ANTHROPIC_API_KEY)
|
||||
restoreEnv('ANTHROPIC_AUTH_TOKEN', originalEnv.ANTHROPIC_AUTH_TOKEN)
|
||||
restoreEnv('ANTHROPIC_CUSTOM_HEADERS', originalEnv.ANTHROPIC_CUSTOM_HEADERS)
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
@@ -119,3 +136,135 @@ test('routes Gemini provider requests through the OpenAI-compatible shim', async
|
||||
model: 'gemini-2.0-flash',
|
||||
})
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific custom headers before sending OpenAI-compatible shim requests', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_API_KEY = 'openai-test-key'
|
||||
process.env.OPENAI_BASE_URL = 'http://example.test/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
||||
process.env.ANTHROPIC_CUSTOM_HEADERS = [
|
||||
'anthropic-version: 2023-06-01',
|
||||
'anthropic-beta: prompt-caching-2024-07-31',
|
||||
'x-anthropic-additional-protection: true',
|
||||
'x-claude-remote-session-id: remote-123',
|
||||
'x-app: cli',
|
||||
'x-safe-header: keep-me',
|
||||
].join('\n')
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-openai',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = (await getAnthropicClient({
|
||||
maxRetries: 0,
|
||||
model: 'gpt-4o',
|
||||
})) as unknown as ShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'gpt-4o',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-app')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer openai-test-key')
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific custom headers on providerOverride shim requests too', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.ANTHROPIC_CUSTOM_HEADERS = [
|
||||
'anthropic-version: 2023-06-01',
|
||||
'anthropic-beta: prompt-caching-2024-07-31',
|
||||
'x-claude-remote-session-id: remote-123',
|
||||
'x-safe-header: keep-me',
|
||||
].join('\n')
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-provider-override',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = (await getAnthropicClient({
|
||||
maxRetries: 0,
|
||||
providerOverride: {
|
||||
model: 'gpt-4o',
|
||||
baseURL: 'http://example.test/v1',
|
||||
apiKey: 'provider-test-key',
|
||||
},
|
||||
})) as unknown as ShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'unused',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer provider-test-key')
|
||||
})
|
||||
|
||||
@@ -177,7 +177,8 @@ export async function getAnthropicClient({
|
||||
if (
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
) {
|
||||
const { createOpenAIShimClient } = await import('./openaiShim.js')
|
||||
return createOpenAIShimClient({
|
||||
|
||||
@@ -14,8 +14,22 @@ import {
|
||||
} from './providerConfig.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
if (originalEnv.OPENAI_BASE_URL === undefined) delete process.env.OPENAI_BASE_URL
|
||||
else process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
|
||||
if (originalEnv.OPENAI_API_BASE === undefined) delete process.env.OPENAI_API_BASE
|
||||
else process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
|
||||
if (originalEnv.CLAUDE_CODE_USE_GITHUB === undefined) delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
else process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
while (tempDirs.length > 0) {
|
||||
const dir = tempDirs.pop()
|
||||
if (dir) rmSync(dir, { recursive: true, force: true })
|
||||
@@ -62,12 +76,27 @@ describe('Codex provider config', () => {
|
||||
})
|
||||
|
||||
test('resolves codexplan alias to Codex transport with reasoning', () => {
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
const resolved = resolveProviderRequest({ model: 'codexplan' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
expect(resolved.reasoning).toEqual({ effort: 'high' })
|
||||
})
|
||||
|
||||
test('does not force Codex transport when a local non-Codex base URL is explicit', () => {
|
||||
const resolved = resolveProviderRequest({
|
||||
model: 'codexplan',
|
||||
baseUrl: 'http://127.0.0.1:8080/v1',
|
||||
})
|
||||
|
||||
expect(resolved.transport).toBe('chat_completions')
|
||||
expect(resolved.baseUrl).toBe('http://127.0.0.1:8080/v1')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
})
|
||||
|
||||
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', () => {
|
||||
// On Windows, env vars can leak as the literal string "undefined" instead of
|
||||
// the JS value undefined when not properly unset (issue #336).
|
||||
@@ -180,6 +209,117 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('preserves Grep tool pattern field in Codex strict schemas', () => {
|
||||
const tools = convertToolsToResponsesTools([
|
||||
{
|
||||
name: 'Grep',
|
||||
description: 'Search file contents',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: { type: 'string', description: 'Search pattern' },
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
expect(tools).toEqual([
|
||||
{
|
||||
type: 'function',
|
||||
name: 'Grep',
|
||||
description: 'Search file contents',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: { type: 'string', description: 'Search pattern' },
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern', 'path'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
strict: true,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('preserves Glob tool pattern field in Codex strict schemas', () => {
|
||||
const tools = convertToolsToResponsesTools([
|
||||
{
|
||||
name: 'Glob',
|
||||
description: 'Find files by pattern',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: { type: 'string', description: 'Glob pattern' },
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
expect(tools).toEqual([
|
||||
{
|
||||
type: 'function',
|
||||
name: 'Glob',
|
||||
description: 'Find files by pattern',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: { type: 'string', description: 'Glob pattern' },
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern', 'path'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
strict: true,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('strips validator pattern keyword but keeps string field named pattern in Codex schemas', () => {
|
||||
const tools = convertToolsToResponsesTools([
|
||||
{
|
||||
name: 'RegexProbe',
|
||||
description: 'Probe regex schema handling',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: {
|
||||
type: 'string',
|
||||
pattern: '^[a-z]+$',
|
||||
},
|
||||
},
|
||||
required: ['pattern'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
expect(tools).toEqual([
|
||||
{
|
||||
type: 'function',
|
||||
name: 'RegexProbe',
|
||||
description: 'Probe regex schema handling',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['pattern'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
strict: true,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('removes unsupported uri format from strict Responses schemas', () => {
|
||||
const tools = convertToolsToResponsesTools([
|
||||
{
|
||||
@@ -325,6 +465,37 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('strips leaked reasoning preamble from completed Codex text responses', () => {
|
||||
const message = convertCodexResponseToAnthropicMessage(
|
||||
{
|
||||
id: 'resp_1',
|
||||
model: 'gpt-5.4',
|
||||
output: [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'output_text',
|
||||
text:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
usage: { input_tokens: 12, output_tokens: 4 },
|
||||
},
|
||||
'gpt-5.4',
|
||||
)
|
||||
|
||||
expect(message.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'Hey! How can I help you today?',
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('translates Codex SSE text stream into Anthropic events', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
@@ -355,4 +526,44 @@ describe('Codex request translation', () => {
|
||||
'message_stop',
|
||||
])
|
||||
})
|
||||
|
||||
test('strips leaked reasoning preamble from Codex SSE text stream', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
|
||||
'',
|
||||
'event: response.content_part.added',
|
||||
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
|
||||
'',
|
||||
'event: response.output_text.delta',
|
||||
'data: {"type":"response.output_text.delta","content_index":0,"delta":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?","item_id":"msg_1","output_index":0,"sequence_number":2}',
|
||||
'',
|
||||
'event: response.output_item.done',
|
||||
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}],"role":"assistant"},"output_index":0,"sequence_number":3}',
|
||||
'',
|
||||
'event: response.completed',
|
||||
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
|
||||
'',
|
||||
].join('\n')
|
||||
|
||||
const stream = new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(responseText))
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of codexStreamToAnthropic(
|
||||
new Response(stream),
|
||||
'gpt-5.4',
|
||||
)) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -4,6 +4,11 @@ import type {
|
||||
ResolvedProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
import { sanitizeSchemaForOpenAICompat } from './openaiSchemaSanitizer.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
|
||||
export interface AnthropicUsage {
|
||||
input_tokens: number
|
||||
@@ -75,12 +80,17 @@ type CodexSseEvent = {
|
||||
function makeUsage(usage?: {
|
||||
input_tokens?: number
|
||||
output_tokens?: number
|
||||
input_tokens_details?: { cached_tokens?: number }
|
||||
prompt_tokens_details?: { cached_tokens?: number }
|
||||
}): AnthropicUsage {
|
||||
return {
|
||||
input_tokens: usage?.input_tokens ?? 0,
|
||||
output_tokens: usage?.output_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
cache_read_input_tokens:
|
||||
usage?.input_tokens_details?.cached_tokens ??
|
||||
usage?.prompt_tokens_details?.cached_tokens ??
|
||||
0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -678,17 +688,34 @@ export async function* codexStreamToAnthropic(
|
||||
{ index: number; toolUseId: string }
|
||||
>()
|
||||
let activeTextBlockIndex: number | null = null
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
let nextContentBlockIndex = 0
|
||||
let sawToolUse = false
|
||||
let finalResponse: Record<string, any> | undefined
|
||||
|
||||
const closeActiveTextBlock = async function* () {
|
||||
if (activeTextBlockIndex === null) return
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: sanitized,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: activeTextBlockIndex,
|
||||
}
|
||||
activeTextBlockIndex = null
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
const startTextBlockIfNeeded = async function* () {
|
||||
@@ -764,7 +791,36 @@ export async function* codexStreamToAnthropic(
|
||||
|
||||
if (event.event === 'response.output_text.delta') {
|
||||
yield* startTextBlockIfNeeded()
|
||||
activeTextBuffer += payload.delta ?? ''
|
||||
if (activeTextBlockIndex !== null) {
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
},
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
@@ -839,8 +895,16 @@ export async function* codexStreamToAnthropic(
|
||||
stop_sequence: null,
|
||||
},
|
||||
usage: {
|
||||
input_tokens: finalResponse?.usage?.input_tokens ?? 0,
|
||||
// Subtract cached tokens: OpenAI includes them in input_tokens,
|
||||
// but Anthropic convention treats input_tokens as non-cached only.
|
||||
input_tokens: (finalResponse?.usage?.input_tokens ?? 0) -
|
||||
(finalResponse?.usage?.input_tokens_details?.cached_tokens ??
|
||||
finalResponse?.usage?.prompt_tokens_details?.cached_tokens ?? 0),
|
||||
output_tokens: finalResponse?.usage?.output_tokens ?? 0,
|
||||
cache_read_input_tokens:
|
||||
finalResponse?.usage?.input_tokens_details?.cached_tokens ??
|
||||
finalResponse?.usage?.prompt_tokens_details?.cached_tokens ??
|
||||
0,
|
||||
},
|
||||
}
|
||||
yield { type: 'message_stop' }
|
||||
@@ -859,7 +923,7 @@ export function convertCodexResponseToAnthropicMessage(
|
||||
if (part?.type === 'output_text') {
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: part.text ?? '',
|
||||
text: stripLeakedReasoningPreamble(part.text ?? ''),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -557,8 +557,12 @@ export function getAssistantMessageFromError(
|
||||
const stripped = error.message.replace(/^429\s+/, '')
|
||||
const innerMessage = stripped.match(/"message"\s*:\s*"([^"]*)"/)?.[1]
|
||||
const detail = innerMessage || stripped
|
||||
const retryAfter = (error as APIError).headers?.get?.('retry-after')
|
||||
const retryHint = retryAfter && !isNaN(Number(retryAfter))
|
||||
? `Try again in ${retryAfter} seconds.`
|
||||
: 'Try again in a few seconds.'
|
||||
return createAssistantAPIErrorMessage({
|
||||
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || `this may be a temporary capacity issue${getAPIProvider() === 'firstParty' ? ' — check status.anthropic.com' : ''}`}`,
|
||||
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || 'this may be a temporary capacity issue'} — ${retryHint}`,
|
||||
error: 'rate_limit',
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,9 +15,9 @@
|
||||
* OPENAI_MODEL=gpt-4o — default model override
|
||||
* CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark
|
||||
*
|
||||
* GitHub Models (models.github.ai), OpenAI-compatible:
|
||||
* GitHub Copilot API (api.githubcopilot.com), OpenAI-compatible:
|
||||
* CLAUDE_CODE_USE_GITHUB=1 — enable GitHub inference (no need for USE_OPENAI)
|
||||
* GITHUB_TOKEN or GH_TOKEN — PAT with models access (mapped to Bearer auth)
|
||||
* GITHUB_TOKEN or GH_TOKEN — Copilot API token (mapped to Bearer auth)
|
||||
* OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs
|
||||
*/
|
||||
|
||||
@@ -26,10 +26,17 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { resolveGeminiCredential } from '../../utils/geminiAuth.js'
|
||||
import { hydrateGeminiAccessTokenFromSecureStorage } from '../../utils/geminiCredentials.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
import {
|
||||
codexStreamToAnthropic,
|
||||
collectCodexCompletedResponse,
|
||||
convertAnthropicMessagesToResponsesInput,
|
||||
convertCodexResponseToAnthropicMessage,
|
||||
convertToolsToResponsesTools,
|
||||
performCodexRequest,
|
||||
type AnthropicStreamEvent,
|
||||
type AnthropicUsage,
|
||||
@@ -39,9 +46,14 @@ import {
|
||||
isLocalProviderUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
getGithubEndpointType,
|
||||
} from './providerConfig.js'
|
||||
import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js'
|
||||
import { redactSecretValueForDisplay } from '../../utils/providerProfile.js'
|
||||
import {
|
||||
normalizeToolArguments,
|
||||
hasToolFieldMapping,
|
||||
} from './toolArgumentNormalization.js'
|
||||
|
||||
type SecretValueSource = Partial<{
|
||||
OPENAI_API_KEY: string
|
||||
@@ -49,18 +61,66 @@ type SecretValueSource = Partial<{
|
||||
GEMINI_API_KEY: string
|
||||
GOOGLE_API_KEY: string
|
||||
GEMINI_ACCESS_TOKEN: string
|
||||
MISTRAL_API_KEY: string
|
||||
}>
|
||||
|
||||
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
|
||||
const GITHUB_API_VERSION = '2022-11-28'
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
const GITHUB_429_MAX_RETRIES = 3
|
||||
const GITHUB_429_BASE_DELAY_SEC = 1
|
||||
const GITHUB_429_MAX_DELAY_SEC = 32
|
||||
const GEMINI_API_HOST = 'generativelanguage.googleapis.com'
|
||||
|
||||
const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
function isGithubModelsMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
}
|
||||
|
||||
function isMistralMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
}
|
||||
|
||||
function filterAnthropicHeaders(
|
||||
headers: Record<string, string> | undefined,
|
||||
): Record<string, string> {
|
||||
if (!headers) return {}
|
||||
|
||||
const filtered: Record<string, string> = {}
|
||||
for (const [key, value] of Object.entries(headers)) {
|
||||
const lower = key.toLowerCase()
|
||||
if (
|
||||
lower.startsWith('x-anthropic') ||
|
||||
lower.startsWith('anthropic-') ||
|
||||
lower.startsWith('x-claude') ||
|
||||
lower === 'x-app' ||
|
||||
lower === 'x-client-app' ||
|
||||
lower === 'authorization' ||
|
||||
lower === 'x-api-key' ||
|
||||
lower === 'api-key'
|
||||
) {
|
||||
continue
|
||||
}
|
||||
filtered[key] = value
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
function hasGeminiApiHost(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
|
||||
try {
|
||||
return new URL(baseUrl).hostname.toLowerCase() === GEMINI_API_HOST
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
function formatRetryAfterHint(response: Response): string {
|
||||
const ra = response.headers.get('retry-after')
|
||||
return ra ? ` (Retry-After: ${ra})` : ''
|
||||
@@ -180,10 +240,12 @@ function convertContentBlocks(
|
||||
// handled separately
|
||||
break
|
||||
case 'thinking':
|
||||
// Append thinking as text with a marker for models that support reasoning
|
||||
if (block.thinking) {
|
||||
parts.push({ type: 'text', text: `<thinking>${block.thinking}</thinking>` })
|
||||
}
|
||||
case 'redacted_thinking':
|
||||
// Strip thinking blocks for OpenAI-compatible providers.
|
||||
// These are Anthropic-specific content types that 3P providers
|
||||
// don't understand. Serializing them as <thinking> text corrupts
|
||||
// multi-turn context: the model sees the tags as part of its
|
||||
// previous reply and may mimic or misattribute them.
|
||||
break
|
||||
default:
|
||||
if (block.text) {
|
||||
@@ -197,6 +259,13 @@ function convertContentBlocks(
|
||||
return parts
|
||||
}
|
||||
|
||||
function isGeminiMode(): boolean {
|
||||
return (
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
hasGeminiApiHost(process.env.OPENAI_BASE_URL)
|
||||
)
|
||||
}
|
||||
|
||||
function convertMessages(
|
||||
messages: Array<{ role: string; message?: { role?: string; content?: unknown }; content?: unknown }>,
|
||||
system: unknown,
|
||||
@@ -248,6 +317,7 @@ function convertMessages(
|
||||
// Check for tool_use blocks
|
||||
if (Array.isArray(content)) {
|
||||
const toolUses = content.filter((b: { type?: string }) => b.type === 'tool_use')
|
||||
const thinkingBlock = content.find((b: { type?: string }) => b.type === 'thinking')
|
||||
const textContent = content.filter(
|
||||
(b: { type?: string }) => b.type !== 'tool_use' && b.type !== 'thinking',
|
||||
)
|
||||
@@ -267,18 +337,46 @@ function convertMessages(
|
||||
name?: string
|
||||
input?: unknown
|
||||
extra_content?: Record<string, unknown>
|
||||
}) => ({
|
||||
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tu.name ?? 'unknown',
|
||||
arguments:
|
||||
typeof tu.input === 'string'
|
||||
? tu.input
|
||||
: JSON.stringify(tu.input ?? {}),
|
||||
},
|
||||
...(tu.extra_content ? { extra_content: tu.extra_content } : {}),
|
||||
}),
|
||||
signature?: string
|
||||
}, index) => {
|
||||
const toolCall: NonNullable<OpenAIMessage['tool_calls']>[number] = {
|
||||
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tu.name ?? 'unknown',
|
||||
arguments:
|
||||
typeof tu.input === 'string'
|
||||
? tu.input
|
||||
: JSON.stringify(tu.input ?? {}),
|
||||
},
|
||||
}
|
||||
|
||||
// Preserve existing extra_content if present
|
||||
if (tu.extra_content) {
|
||||
toolCall.extra_content = { ...tu.extra_content }
|
||||
}
|
||||
|
||||
// Handle Gemini thought_signature
|
||||
if (isGeminiMode()) {
|
||||
// If the model provided a signature in the tool_use block itself (e.g. from a previous Turn/Step)
|
||||
// Use thinkingBlock.signature for ALL tool calls in the same assistant turn if available.
|
||||
// The API requires the same signature on every replayed function call part in a parallel set.
|
||||
const signature = tu.signature ?? (thinkingBlock as any)?.signature
|
||||
|
||||
// Merge into existing google-specific metadata if present
|
||||
const existingGoogle = (toolCall.extra_content?.google as Record<string, unknown>) ?? {}
|
||||
|
||||
toolCall.extra_content = {
|
||||
...toolCall.extra_content,
|
||||
google: {
|
||||
...existingGoogle,
|
||||
thought_signature: signature ?? "skip_thought_signature_validator"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return toolCall
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -295,7 +393,41 @@ function convertMessages(
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
// Coalescing pass: merge consecutive messages of the same role.
|
||||
// OpenAI/vLLM/Ollama require strict user↔assistant alternation.
|
||||
// Multiple consecutive tool messages are allowed (assistant → tool* → user).
|
||||
// Consecutive user or assistant messages must be merged to avoid Jinja
|
||||
// template errors like "roles must alternate" (Devstral, Mistral models).
|
||||
const coalesced: OpenAIMessage[] = []
|
||||
for (const msg of result) {
|
||||
const prev = coalesced[coalesced.length - 1]
|
||||
|
||||
if (prev && prev.role === msg.role && msg.role !== 'tool' && msg.role !== 'system') {
|
||||
const prevContent = prev.content
|
||||
const curContent = msg.content
|
||||
|
||||
if (typeof prevContent === 'string' && typeof curContent === 'string') {
|
||||
prev.content = prevContent + (prevContent && curContent ? '\n' : '') + curContent
|
||||
} else {
|
||||
const toArray = (
|
||||
c: string | Array<{ type: string; text?: string; image_url?: { url: string } }> | undefined,
|
||||
): Array<{ type: string; text?: string; image_url?: { url: string } }> => {
|
||||
if (!c) return []
|
||||
if (typeof c === 'string') return c ? [{ type: 'text', text: c }] : []
|
||||
return c
|
||||
}
|
||||
prev.content = [...toArray(prevContent), ...toArray(curContent)]
|
||||
}
|
||||
|
||||
if (msg.tool_calls?.length) {
|
||||
prev.tool_calls = [...(prev.tool_calls ?? []), ...msg.tool_calls]
|
||||
}
|
||||
} else {
|
||||
coalesced.push(msg)
|
||||
}
|
||||
}
|
||||
|
||||
return coalesced
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -325,11 +457,13 @@ function normalizeSchemaForOpenAI(
|
||||
record.properties = normalizedProps
|
||||
|
||||
if (strict) {
|
||||
// OpenAI strict mode requires every property to be listed in required[]
|
||||
const allKeys = Object.keys(normalizedProps)
|
||||
record.required = Array.from(new Set([...existingRequired, ...allKeys]))
|
||||
// OpenAI strict mode requires additionalProperties: false on all object
|
||||
// schemas — override unconditionally to ensure nested objects comply.
|
||||
// Keep only the properties that were originally marked required in the schema.
|
||||
// Adding every property to required[] (the previous behaviour) caused strict
|
||||
// OpenAI-compatible providers (Groq, Azure, etc.) to reject tool calls because
|
||||
// the model correctly omits optional arguments — but the provider treats them
|
||||
// as missing required fields and returns a 400 / tool_use_failed error.
|
||||
record.required = existingRequired.filter(k => k in normalizedProps)
|
||||
// additionalProperties: false is still required by strict-mode providers.
|
||||
record.additionalProperties = false
|
||||
} else {
|
||||
// For Gemini: keep only existing required keys that are present in properties
|
||||
@@ -363,7 +497,7 @@ function normalizeSchemaForOpenAI(
|
||||
function convertTools(
|
||||
tools: Array<{ name: string; description?: string; input_schema?: Record<string, unknown> }>,
|
||||
): OpenAITool[] {
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const isGemini = isGeminiMode()
|
||||
|
||||
return tools
|
||||
.filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI
|
||||
@@ -405,6 +539,7 @@ interface OpenAIStreamChunk {
|
||||
delta: {
|
||||
role?: string
|
||||
content?: string | null
|
||||
reasoning_content?: string | null
|
||||
tool_calls?: Array<{
|
||||
index: number
|
||||
id?: string
|
||||
@@ -434,11 +569,38 @@ function convertChunkUsage(
|
||||
): Partial<AnthropicUsage> | undefined {
|
||||
if (!usage) return undefined
|
||||
|
||||
const cached = usage.prompt_tokens_details?.cached_tokens ?? 0
|
||||
return {
|
||||
input_tokens: usage.prompt_tokens ?? 0,
|
||||
// Subtract cached tokens: OpenAI includes them in prompt_tokens,
|
||||
// but Anthropic convention treats input_tokens as non-cached only.
|
||||
input_tokens: (usage.prompt_tokens ?? 0) - cached,
|
||||
output_tokens: usage.completion_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: usage.prompt_tokens_details?.cached_tokens ?? 0,
|
||||
cache_read_input_tokens: cached,
|
||||
}
|
||||
}
|
||||
|
||||
const JSON_REPAIR_SUFFIXES = [
|
||||
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
|
||||
]
|
||||
|
||||
function repairPossiblyTruncatedObjectJson(raw: string): string | null {
|
||||
try {
|
||||
const parsed = JSON.parse(raw)
|
||||
return parsed && typeof parsed === 'object' && !Array.isArray(parsed)
|
||||
? raw
|
||||
: null
|
||||
} catch {
|
||||
for (const combo of JSON_REPAIR_SUFFIXES) {
|
||||
try {
|
||||
const repaired = raw + combo
|
||||
const parsed = JSON.parse(repaired)
|
||||
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||
return repaired
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,8 +614,21 @@ async function* openaiStreamToAnthropic(
|
||||
): AsyncGenerator<AnthropicStreamEvent> {
|
||||
const messageId = makeMessageId()
|
||||
let contentBlockIndex = 0
|
||||
const activeToolCalls = new Map<number, { id: string; name: string; index: number; jsonBuffer: string }>()
|
||||
const activeToolCalls = new Map<
|
||||
number,
|
||||
{
|
||||
id: string
|
||||
name: string
|
||||
index: number
|
||||
jsonBuffer: string
|
||||
normalizeAtStop: boolean
|
||||
}
|
||||
>()
|
||||
let hasEmittedContentStart = false
|
||||
let hasEmittedThinkingStart = false
|
||||
let hasClosedThinking = false
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||
let hasEmittedFinalUsage = false
|
||||
let hasProcessedFinishReason = false
|
||||
@@ -484,6 +659,30 @@ async function* openaiStreamToAnthropic(
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
const closeActiveContentBlock = async function* () {
|
||||
if (!hasEmittedContentStart) return
|
||||
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'text_delta', text: sanitized },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
@@ -510,9 +709,35 @@ async function* openaiStreamToAnthropic(
|
||||
for (const choice of chunk.choices ?? []) {
|
||||
const delta = choice.delta
|
||||
|
||||
// Reasoning models (e.g. GLM-5, DeepSeek) may stream chain-of-thought
|
||||
// in `reasoning_content` before the actual reply appears in `content`.
|
||||
// Emit reasoning as a thinking block and content as a text block.
|
||||
if (delta.reasoning_content != null && delta.reasoning_content !== '') {
|
||||
if (!hasEmittedThinkingStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: contentBlockIndex,
|
||||
content_block: { type: 'thinking', thinking: '' },
|
||||
}
|
||||
hasEmittedThinkingStart = true
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'thinking_delta', thinking: delta.reasoning_content },
|
||||
}
|
||||
}
|
||||
|
||||
// Text content — use != null to distinguish absent field from empty string,
|
||||
// some providers send "" as first delta to signal streaming start
|
||||
if (delta.content != null) {
|
||||
if (delta.content != null && delta.content !== '') {
|
||||
// Close thinking block if transitioning from reasoning to content
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
activeTextBuffer += delta.content
|
||||
if (!hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
@@ -521,6 +746,35 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
hasEmittedContentStart = true
|
||||
}
|
||||
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
},
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
@@ -532,22 +786,25 @@ async function* openaiStreamToAnthropic(
|
||||
if (delta.tool_calls) {
|
||||
for (const tc of delta.tool_calls) {
|
||||
if (tc.id && tc.function?.name) {
|
||||
// New tool call starting
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
// New tool call starting — close any open thinking block first
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
hasClosedThinking = true
|
||||
}
|
||||
if (hasEmittedContentStart) {
|
||||
yield* closeActiveContentBlock()
|
||||
}
|
||||
|
||||
const toolBlockIndex = contentBlockIndex
|
||||
const initialArguments = tc.function.arguments ?? ''
|
||||
const normalizeAtStop = hasToolFieldMapping(tc.function.name)
|
||||
activeToolCalls.set(tc.index, {
|
||||
id: tc.id,
|
||||
name: tc.function.name,
|
||||
index: toolBlockIndex,
|
||||
jsonBuffer: tc.function.arguments ?? '',
|
||||
jsonBuffer: initialArguments,
|
||||
normalizeAtStop,
|
||||
})
|
||||
|
||||
yield {
|
||||
@@ -559,12 +816,19 @@ async function* openaiStreamToAnthropic(
|
||||
name: tc.function.name,
|
||||
input: {},
|
||||
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
||||
// Extract Gemini signature from extra_content
|
||||
...((tc.extra_content?.google as any)?.thought_signature
|
||||
? {
|
||||
signature: (tc.extra_content.google as any)
|
||||
.thought_signature,
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
}
|
||||
contentBlockIndex++
|
||||
|
||||
// Emit any initial arguments
|
||||
if (tc.function.arguments) {
|
||||
if (tc.function.arguments && !normalizeAtStop) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: toolBlockIndex,
|
||||
@@ -581,6 +845,11 @@ async function* openaiStreamToAnthropic(
|
||||
if (tc.function.arguments) {
|
||||
active.jsonBuffer += tc.function.arguments
|
||||
}
|
||||
|
||||
if (active.normalizeAtStop) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: active.index,
|
||||
@@ -599,25 +868,56 @@ async function* openaiStreamToAnthropic(
|
||||
if (choice.finish_reason && !hasProcessedFinishReason) {
|
||||
hasProcessedFinishReason = true
|
||||
|
||||
// Close any open thinking block that wasn't closed by content transition
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
// Close any open content blocks
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
yield* closeActiveContentBlock()
|
||||
}
|
||||
// Close active tool calls
|
||||
for (const [, tc] of activeToolCalls) {
|
||||
if (tc.normalizeAtStop) {
|
||||
let partialJson: string
|
||||
if (choice.finish_reason === 'length') {
|
||||
// Truncated by max tokens — preserve raw buffer to avoid
|
||||
// turning an incomplete tool call into an executable command
|
||||
partialJson = tc.jsonBuffer
|
||||
} else {
|
||||
const repairedStructuredJson = repairPossiblyTruncatedObjectJson(
|
||||
tc.jsonBuffer,
|
||||
)
|
||||
if (repairedStructuredJson) {
|
||||
partialJson = repairedStructuredJson
|
||||
} else {
|
||||
partialJson = JSON.stringify(
|
||||
normalizeToolArguments(tc.name, tc.jsonBuffer),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: tc.index,
|
||||
delta: {
|
||||
type: 'input_json_delta',
|
||||
partial_json: partialJson,
|
||||
},
|
||||
}
|
||||
yield { type: 'content_block_stop', index: tc.index }
|
||||
continue
|
||||
}
|
||||
|
||||
let suffixToAdd = ''
|
||||
if (tc.jsonBuffer) {
|
||||
try {
|
||||
JSON.parse(tc.jsonBuffer)
|
||||
} catch {
|
||||
const str = tc.jsonBuffer.trimEnd()
|
||||
const combinations = [
|
||||
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
|
||||
]
|
||||
for (const combo of combinations) {
|
||||
for (const combo of JSON_REPAIR_SUFFIXES) {
|
||||
try {
|
||||
JSON.parse(str + combo)
|
||||
suffixToAdd = combo
|
||||
@@ -723,7 +1023,7 @@ class OpenAIShimMessages {
|
||||
private providerOverride?: { model: string; baseURL: string; apiKey: string }
|
||||
|
||||
constructor(defaultHeaders: Record<string, string>, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh', providerOverride?: { model: string; baseURL: string; apiKey: string }) {
|
||||
this.defaultHeaders = defaultHeaders
|
||||
this.defaultHeaders = filterAnthropicHeaders(defaultHeaders)
|
||||
this.reasoningEffort = reasoningEffort
|
||||
this.providerOverride = providerOverride
|
||||
}
|
||||
@@ -742,8 +1042,9 @@ class OpenAIShimMessages {
|
||||
httpResponse = response
|
||||
|
||||
if (params.stream) {
|
||||
const isResponsesStream = response.url?.includes('/responses')
|
||||
return new OpenAIShimStream(
|
||||
request.transport === 'codex_responses'
|
||||
(request.transport === 'codex_responses' || isResponsesStream)
|
||||
? codexStreamToAnthropic(response, request.resolvedModel)
|
||||
: openaiStreamToAnthropic(response, request.resolvedModel),
|
||||
)
|
||||
@@ -757,8 +1058,38 @@ class OpenAIShimMessages {
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return self._convertNonStreamingResponse(data, request.resolvedModel)
|
||||
const isResponsesNonStream = response.url?.includes('/responses')
|
||||
if (isResponsesNonStream || (request.transport === 'chat_completions' && isGithubModelsMode())) {
|
||||
const contentType = response.headers.get('content-type') ?? ''
|
||||
if (contentType.includes('application/json')) {
|
||||
const parsed = await response.json() as Record<string, unknown>
|
||||
if (
|
||||
parsed &&
|
||||
typeof parsed === 'object' &&
|
||||
('output' in parsed || 'incomplete_details' in parsed)
|
||||
) {
|
||||
return convertCodexResponseToAnthropicMessage(
|
||||
parsed,
|
||||
request.resolvedModel,
|
||||
)
|
||||
}
|
||||
return self._convertNonStreamingResponse(parsed, request.resolvedModel)
|
||||
}
|
||||
}
|
||||
|
||||
const contentType = response.headers.get('content-type') ?? ''
|
||||
if (contentType.includes('application/json')) {
|
||||
const data = await response.json()
|
||||
return self._convertNonStreamingResponse(data, request.resolvedModel)
|
||||
}
|
||||
|
||||
const textBody = await response.text().catch(() => '')
|
||||
throw APIError.generate(
|
||||
response.status,
|
||||
undefined,
|
||||
`OpenAI API error ${response.status}: unexpected response: ${textBody.slice(0, 500)}`,
|
||||
response.headers as unknown as Headers,
|
||||
)
|
||||
})()
|
||||
|
||||
; (promise as unknown as Record<string, unknown>).withResponse =
|
||||
@@ -780,7 +1111,36 @@ class OpenAIShimMessages {
|
||||
params: ShimCreateParams,
|
||||
options?: { signal?: AbortSignal; headers?: Record<string, string> },
|
||||
): Promise<Response> {
|
||||
if (request.transport === 'codex_responses') {
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubMode = isGithubModelsMode()
|
||||
const isGithubWithCodexTransport = isGithubMode && request.transport === 'codex_responses'
|
||||
const isGithubCopilotEndpoint = isGithubMode && githubEndpointType === 'copilot'
|
||||
|
||||
if (isGithubWithCodexTransport) {
|
||||
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
'GitHub Copilot auth is required. Run /onboard-github to sign in.',
|
||||
)
|
||||
}
|
||||
|
||||
return performCodexRequest({
|
||||
request,
|
||||
credentials: {
|
||||
apiKey,
|
||||
source: 'env',
|
||||
},
|
||||
params,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders,
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
...COPILOT_HEADERS,
|
||||
},
|
||||
signal: options?.signal,
|
||||
})
|
||||
}
|
||||
|
||||
if (request.transport === 'codex_responses' && !isGithubMode) {
|
||||
const credentials = resolveCodexApiCredentials()
|
||||
if (!credentials.apiKey) {
|
||||
const authHint = credentials.authPath
|
||||
@@ -805,7 +1165,7 @@ class OpenAIShimMessages {
|
||||
params,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders,
|
||||
...(options?.headers ?? {}),
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
},
|
||||
signal: options?.signal,
|
||||
})
|
||||
@@ -832,6 +1192,7 @@ class OpenAIShimMessages {
|
||||
model: request.resolvedModel,
|
||||
messages: openaiMessages,
|
||||
stream: params.stream ?? false,
|
||||
store: false,
|
||||
}
|
||||
// Convert max_tokens to max_completion_tokens for OpenAI API compatibility.
|
||||
// Azure OpenAI requires max_completion_tokens and does not accept max_tokens.
|
||||
@@ -854,11 +1215,22 @@ class OpenAIShimMessages {
|
||||
}
|
||||
|
||||
const isGithub = isGithubModelsMode()
|
||||
if (isGithub && body.max_completion_tokens !== undefined) {
|
||||
const isMistral = isMistralMode()
|
||||
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubCopilot = isGithub && githubEndpointType === 'copilot'
|
||||
const isGithubModels = isGithub && (githubEndpointType === 'models' || githubEndpointType === 'custom')
|
||||
|
||||
if ((isGithub || isMistral) && body.max_completion_tokens !== undefined) {
|
||||
body.max_tokens = body.max_completion_tokens
|
||||
delete body.max_completion_tokens
|
||||
}
|
||||
|
||||
// mistral also doesn't recognize body.store
|
||||
if (isMistral) {
|
||||
delete body.store
|
||||
}
|
||||
|
||||
if (params.temperature !== undefined) body.temperature = params.temperature
|
||||
if (params.top_p !== undefined) body.top_p = params.top_p
|
||||
|
||||
@@ -893,12 +1265,11 @@ class OpenAIShimMessages {
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
...this.defaultHeaders,
|
||||
...(options?.headers ?? {}),
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
}
|
||||
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const apiKey =
|
||||
this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
||||
// path segments like https://evil.com/cognitiveservices.azure.com/
|
||||
let isAzure = false
|
||||
@@ -919,15 +1290,17 @@ class OpenAIShimMessages {
|
||||
const geminiCredential = await resolveGeminiCredential(process.env)
|
||||
if (geminiCredential.kind !== 'none') {
|
||||
headers.Authorization = `Bearer ${geminiCredential.credential}`
|
||||
if (geminiCredential.projectId) {
|
||||
if (geminiCredential.kind !== 'api-key' && 'projectId' in geminiCredential && geminiCredential.projectId) {
|
||||
headers['x-goog-user-project'] = geminiCredential.projectId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isGithub) {
|
||||
headers.Accept = 'application/vnd.github.v3+json'
|
||||
headers['X-GitHub-Api-Version'] = GITHUB_API_VERSION
|
||||
if (isGithubCopilot) {
|
||||
Object.assign(headers, COPILOT_HEADERS)
|
||||
} else if (isGithubModels) {
|
||||
headers['Accept'] = 'application/vnd.github+json'
|
||||
headers['X-GitHub-Api-Version'] = '2022-11-28'
|
||||
}
|
||||
|
||||
// Build the chat completions URL
|
||||
@@ -979,9 +1352,83 @@ class OpenAIShimMessages {
|
||||
await sleepMs(delaySec * 1000)
|
||||
continue
|
||||
}
|
||||
// Read body exactly once here — Response body is a stream that can only
|
||||
// be consumed a single time.
|
||||
const errorBody = await response.text().catch(() => 'unknown error')
|
||||
const rateHint =
|
||||
isGithub && response.status === 429 ? formatRetryAfterHint(response) : ''
|
||||
|
||||
// If GitHub Copilot returns error about /chat/completions,
|
||||
// try the /responses endpoint (needed for GPT-5+ models)
|
||||
if (isGithub && response.status === 400) {
|
||||
if (errorBody.includes('/chat/completions') || errorBody.includes('not accessible')) {
|
||||
const responsesUrl = `${request.baseUrl}/responses`
|
||||
const responsesBody: Record<string, unknown> = {
|
||||
model: request.resolvedModel,
|
||||
input: convertAnthropicMessagesToResponsesInput(
|
||||
params.messages as Array<{
|
||||
role?: string
|
||||
message?: { role?: string; content?: unknown }
|
||||
content?: unknown
|
||||
}>,
|
||||
),
|
||||
stream: params.stream ?? false,
|
||||
store: false,
|
||||
}
|
||||
|
||||
if (!Array.isArray(responsesBody.input) || responsesBody.input.length === 0) {
|
||||
responsesBody.input = [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'user',
|
||||
content: [{ type: 'input_text', text: '' }],
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
const systemText = convertSystemPrompt(params.system)
|
||||
if (systemText) {
|
||||
responsesBody.instructions = systemText
|
||||
}
|
||||
|
||||
if (body.max_tokens !== undefined) {
|
||||
responsesBody.max_output_tokens = body.max_tokens
|
||||
}
|
||||
|
||||
if (params.tools && params.tools.length > 0) {
|
||||
const convertedTools = convertToolsToResponsesTools(
|
||||
params.tools as Array<{
|
||||
name?: string
|
||||
description?: string
|
||||
input_schema?: Record<string, unknown>
|
||||
}>,
|
||||
)
|
||||
if (convertedTools.length > 0) {
|
||||
responsesBody.tools = convertedTools
|
||||
}
|
||||
}
|
||||
|
||||
const responsesResponse = await fetch(responsesUrl, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(responsesBody),
|
||||
signal: options?.signal,
|
||||
})
|
||||
if (responsesResponse.ok) {
|
||||
return responsesResponse
|
||||
}
|
||||
const responsesErrorBody = await responsesResponse.text().catch(() => 'unknown error')
|
||||
let responsesErrorResponse: object | undefined
|
||||
try { responsesErrorResponse = JSON.parse(responsesErrorBody) } catch { /* raw text */ }
|
||||
throw APIError.generate(
|
||||
responsesResponse.status,
|
||||
responsesErrorResponse,
|
||||
`OpenAI API error ${responsesResponse.status}: ${responsesErrorBody}`,
|
||||
responsesResponse.headers,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
let errorResponse: object | undefined
|
||||
try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ }
|
||||
throw APIError.generate(
|
||||
@@ -1009,6 +1456,7 @@ class OpenAIShimMessages {
|
||||
| string
|
||||
| null
|
||||
| Array<{ type?: string; text?: string }>
|
||||
reasoning_content?: string | null
|
||||
tool_calls?: Array<{
|
||||
id: string
|
||||
function: { name: string; arguments: string }
|
||||
@@ -1030,9 +1478,22 @@ class OpenAIShimMessages {
|
||||
const choice = data.choices?.[0]
|
||||
const content: Array<Record<string, unknown>> = []
|
||||
|
||||
const rawContent = choice?.message?.content
|
||||
// Some reasoning models (e.g. GLM-5) put their chain-of-thought in
|
||||
// reasoning_content while content stays null. Preserve it as a thinking
|
||||
// block, but do not surface it as visible assistant text.
|
||||
const reasoningText = choice?.message?.reasoning_content
|
||||
if (typeof reasoningText === 'string' && reasoningText) {
|
||||
content.push({ type: 'thinking', thinking: reasoningText })
|
||||
}
|
||||
const rawContent =
|
||||
choice?.message?.content !== '' && choice?.message?.content != null
|
||||
? choice?.message?.content
|
||||
: null
|
||||
if (typeof rawContent === 'string' && rawContent) {
|
||||
content.push({ type: 'text', text: rawContent })
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(rawContent),
|
||||
})
|
||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||
const parts: string[] = []
|
||||
for (const part of rawContent) {
|
||||
@@ -1047,24 +1508,29 @@ class OpenAIShimMessages {
|
||||
}
|
||||
const joined = parts.join('\n')
|
||||
if (joined) {
|
||||
content.push({ type: 'text', text: joined })
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(joined),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (choice?.message?.tool_calls) {
|
||||
for (const tc of choice.message.tool_calls) {
|
||||
let input: unknown
|
||||
try {
|
||||
input = JSON.parse(tc.function.arguments)
|
||||
} catch {
|
||||
input = { raw: tc.function.arguments }
|
||||
}
|
||||
const input = normalizeToolArguments(
|
||||
tc.function.name,
|
||||
tc.function.arguments,
|
||||
)
|
||||
content.push({
|
||||
type: 'tool_use',
|
||||
id: tc.id,
|
||||
name: tc.function.name,
|
||||
input,
|
||||
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
||||
// Extract Gemini signature from extra_content
|
||||
...((tc.extra_content?.google as any)?.thought_signature
|
||||
? { signature: (tc.extra_content.google as any).thought_signature }
|
||||
: {}),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1135,8 +1601,15 @@ export function createOpenAIShimClient(options: {
|
||||
if (process.env.GEMINI_MODEL && !process.env.OPENAI_MODEL) {
|
||||
process.env.OPENAI_MODEL = process.env.GEMINI_MODEL
|
||||
}
|
||||
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
process.env.OPENAI_BASE_URL =
|
||||
process.env.MISTRAL_BASE_URL ?? 'https://api.mistral.ai/v1'
|
||||
process.env.OPENAI_API_KEY = process.env.MISTRAL_API_KEY
|
||||
if (process.env.MISTRAL_MODEL) {
|
||||
process.env.OPENAI_MODEL = process.env.MISTRAL_MODEL
|
||||
}
|
||||
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
process.env.OPENAI_BASE_URL ??= GITHUB_MODELS_DEFAULT_BASE
|
||||
process.env.OPENAI_BASE_URL ??= GITHUB_COPILOT_BASE
|
||||
process.env.OPENAI_API_KEY ??=
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? ''
|
||||
}
|
||||
|
||||
@@ -23,6 +23,9 @@ test.each([
|
||||
['github:gpt-4o', 'gpt-4o'],
|
||||
['gpt-4o', 'gpt-4o'],
|
||||
['github:copilot?reasoning=high', DEFAULT_GITHUB_MODELS_API_MODEL],
|
||||
// normalizeGithubModelsApiModel preserves provider prefix for models.github.ai compatibility
|
||||
['github:openai/gpt-4.1', 'openai/gpt-4.1'],
|
||||
['openai/gpt-4.1', 'openai/gpt-4.1'],
|
||||
] as const)('normalizeGithubModelsApiModel(%s) -> %s', (input, expected) => {
|
||||
expect(normalizeGithubModelsApiModel(input)).toBe(expected)
|
||||
})
|
||||
@@ -34,6 +37,20 @@ test('resolveProviderRequest applies GitHub normalization when CLAUDE_CODE_USE_G
|
||||
expect(r.transport).toBe('chat_completions')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest routes GitHub GPT-5 codex models to responses transport', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const r = resolveProviderRequest({ model: 'gpt-5.3-codex' })
|
||||
expect(r.resolvedModel).toBe('gpt-5.3-codex')
|
||||
expect(r.transport).toBe('codex_responses')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest keeps gpt-5-mini on chat_completions for GitHub', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const r = resolveProviderRequest({ model: 'gpt-5-mini' })
|
||||
expect(r.resolvedModel).toBe('gpt-5-mini')
|
||||
expect(r.transport).toBe('chat_completions')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest leaves model unchanged without GitHub flag', () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
const r = resolveProviderRequest({ model: 'github:gpt-4o' })
|
||||
|
||||
@@ -1,6 +1,22 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { isLocalProviderUrl } from './providerConfig.js'
|
||||
import {
|
||||
getAdditionalModelOptionsCacheScope,
|
||||
isLocalProviderUrl,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
test('treats localhost endpoints as local', () => {
|
||||
expect(isLocalProviderUrl('http://localhost:11434/v1')).toBe(true)
|
||||
@@ -33,3 +49,37 @@ test('treats public hosts as remote', () => {
|
||||
expect(isLocalProviderUrl('https://example.com/v1')).toBe(false)
|
||||
expect(isLocalProviderUrl('http://[2001:4860:4860::8888]:11434/v1')).toBe(false)
|
||||
})
|
||||
|
||||
test('creates a cache scope for local openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:1234/v1'
|
||||
process.env.OPENAI_MODEL = 'llama-3.2-3b-instruct'
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe(
|
||||
'openai:http://localhost:1234/v1',
|
||||
)
|
||||
})
|
||||
|
||||
test('keeps codex alias models on chat completions for local openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-5.4'
|
||||
|
||||
expect(resolveProviderRequest()).toMatchObject({
|
||||
transport: 'chat_completions',
|
||||
requestedModel: 'gpt-5.4',
|
||||
resolvedModel: 'gpt-5.4',
|
||||
baseUrl: 'http://127.0.0.1:8080/v1',
|
||||
})
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe(
|
||||
'openai:http://127.0.0.1:8080/v1',
|
||||
)
|
||||
})
|
||||
|
||||
test('skips local model cache scope for remote openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBeNull()
|
||||
})
|
||||
|
||||
@@ -7,8 +7,9 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
||||
/** Default GitHub Models API model when user selects copilot / github:copilot */
|
||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'openai/gpt-4.1'
|
||||
export const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1'
|
||||
/** Default GitHub Copilot API model when user selects copilot / github:copilot */
|
||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'gpt-4o'
|
||||
|
||||
const CODEX_ALIAS_MODELS: Record<
|
||||
string,
|
||||
@@ -219,6 +220,29 @@ export function isCodexAlias(model: string): boolean {
|
||||
return base in CODEX_ALIAS_MODELS
|
||||
}
|
||||
|
||||
export function shouldUseCodexTransport(
|
||||
model: string,
|
||||
baseUrl: string | undefined,
|
||||
): boolean {
|
||||
const explicitBaseUrl = asEnvUrl(baseUrl)
|
||||
return isCodexBaseUrl(explicitBaseUrl) || (!explicitBaseUrl && isCodexAlias(model))
|
||||
}
|
||||
|
||||
function shouldUseGithubResponsesApi(model: string): boolean {
|
||||
const normalized = model.trim().toLowerCase()
|
||||
|
||||
// Codex-branded models require /responses.
|
||||
if (normalized.includes('codex')) return true
|
||||
|
||||
// GPT-5+ models use /responses, except gpt-5-mini.
|
||||
const match = /^gpt-(\d+)/.exec(normalized)
|
||||
if (!match) return false
|
||||
const major = Number(match[1])
|
||||
if (major < 5) return false
|
||||
if (normalized.startsWith('gpt-5-mini')) return false
|
||||
return true
|
||||
}
|
||||
|
||||
export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
try {
|
||||
@@ -272,19 +296,61 @@ export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize user model string for GitHub Models inference (models.github.ai).
|
||||
* Mirrors runtime devsper `github._normalize_model_id`.
|
||||
* Normalize user model string for GitHub Copilot API inference.
|
||||
* Mirrors how Copilot resolves model IDs internally.
|
||||
*/
|
||||
export function normalizeGithubModelsApiModel(requestedModel: string): string {
|
||||
export function normalizeGithubCopilotModel(requestedModel: string): string {
|
||||
const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel
|
||||
const segment =
|
||||
noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim()
|
||||
if (!segment || segment.toLowerCase() === 'copilot') {
|
||||
return DEFAULT_GITHUB_MODELS_API_MODEL
|
||||
}
|
||||
// Strip provider prefix if present (e.g., "openai/gpt-4o" -> "gpt-4o")
|
||||
const slashIndex = segment.indexOf('/')
|
||||
if (slashIndex !== -1) {
|
||||
return segment.slice(slashIndex + 1)
|
||||
}
|
||||
return segment
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize user model string for GitHub Models API inference.
|
||||
* Only normalizes the default alias, preserves provider-qualified models.
|
||||
*/
|
||||
export function normalizeGithubModelsApiModel(requestedModel: string): string {
|
||||
const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel
|
||||
const segment =
|
||||
noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim()
|
||||
// Only normalize the default alias for GitHub Models
|
||||
if (!segment || segment.toLowerCase() === 'copilot') {
|
||||
return DEFAULT_GITHUB_MODELS_API_MODEL
|
||||
}
|
||||
// Preserve provider prefix for GitHub Models (e.g., "openai/gpt-4.1" stays as-is)
|
||||
return segment
|
||||
}
|
||||
|
||||
export const GITHUB_COPILOT_BASE_URL = 'https://api.githubcopilot.com'
|
||||
export const GITHUB_MODELS_BASE_URL = 'https://models.github.ai/inference'
|
||||
|
||||
export function getGithubEndpointType(
|
||||
baseUrl: string | undefined,
|
||||
): 'copilot' | 'models' | 'custom' {
|
||||
if (!baseUrl) return 'copilot'
|
||||
try {
|
||||
const hostname = new URL(baseUrl).hostname.toLowerCase()
|
||||
if (hostname === 'api.githubcopilot.com') {
|
||||
return 'copilot'
|
||||
}
|
||||
if (hostname === 'models.github.ai' || hostname.endsWith('.github.ai')) {
|
||||
return 'models'
|
||||
}
|
||||
return 'custom'
|
||||
} catch {
|
||||
return 'copilot'
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveProviderRequest(options?: {
|
||||
model?: string
|
||||
baseUrl?: string
|
||||
@@ -292,51 +358,94 @@ export function resolveProviderRequest(options?: {
|
||||
reasoningEffortOverride?: ReasoningEffort
|
||||
}): ResolvedProviderRequest {
|
||||
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const isMistralMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
const requestedModel =
|
||||
options?.model?.trim() ||
|
||||
process.env.OPENAI_MODEL?.trim() ||
|
||||
(isMistralMode
|
||||
? process.env.MISTRAL_MODEL?.trim()
|
||||
: process.env.OPENAI_MODEL?.trim()) ||
|
||||
options?.fallbackModel?.trim() ||
|
||||
(isGithubMode ? 'github:copilot' : 'gpt-4o')
|
||||
const descriptor = parseModelDescriptor(requestedModel)
|
||||
const rawBaseUrl =
|
||||
asEnvUrl(options?.baseUrl) ??
|
||||
asEnvUrl(process.env.OPENAI_BASE_URL) ??
|
||||
asEnvUrl(
|
||||
isMistralMode ? (process.env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL) : process.env.OPENAI_BASE_URL,
|
||||
) ??
|
||||
asEnvUrl(process.env.OPENAI_API_BASE)
|
||||
// Use Codex transport only when:
|
||||
// - the base URL is explicitly the Codex endpoint, OR
|
||||
// - the model is a Codex alias AND no custom base URL has been set
|
||||
// A custom OPENAI_BASE_URL (e.g. Azure, OpenRouter) always wins over
|
||||
// model-name-based Codex detection to prevent auth failures (#200, #203).
|
||||
|
||||
const githubEndpointType = isGithubMode
|
||||
? getGithubEndpointType(rawBaseUrl)
|
||||
: 'custom'
|
||||
const isGithubCopilot = isGithubMode && githubEndpointType === 'copilot'
|
||||
const isGithubModels = isGithubMode && githubEndpointType === 'models'
|
||||
const isGithubCustom = isGithubMode && githubEndpointType === 'custom'
|
||||
|
||||
const githubResolvedModel = isGithubMode
|
||||
? normalizeGithubModelsApiModel(requestedModel)
|
||||
: requestedModel
|
||||
|
||||
const transport: ProviderTransport =
|
||||
isCodexBaseUrl(rawBaseUrl) || (!rawBaseUrl && isCodexAlias(requestedModel))
|
||||
shouldUseCodexTransport(requestedModel, rawBaseUrl) ||
|
||||
(isGithubCopilot && shouldUseGithubResponsesApi(githubResolvedModel))
|
||||
? 'codex_responses'
|
||||
: 'chat_completions'
|
||||
|
||||
const resolvedModel =
|
||||
transport === 'chat_completions' &&
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
? normalizeGithubModelsApiModel(requestedModel)
|
||||
: descriptor.baseModel
|
||||
// For GitHub Copilot API, normalize to real model ID (e.g., "github:copilot" -> "gpt-4o")
|
||||
// For GitHub Models/custom endpoints:
|
||||
// - Normalize default alias (github:copilot -> gpt-4o)
|
||||
// - Preserve provider-qualified models (openai/gpt-4.1 stays as-is)
|
||||
const resolvedModel = isGithubCopilot
|
||||
? normalizeGithubCopilotModel(descriptor.baseModel)
|
||||
: (isGithubModels || isGithubCustom
|
||||
? normalizeGithubModelsApiModel(descriptor.baseModel)
|
||||
: descriptor.baseModel)
|
||||
|
||||
const reasoning = options?.reasoningEffortOverride
|
||||
? { effort: options.reasoningEffortOverride }
|
||||
: descriptor.reasoning
|
||||
|
||||
|
||||
return {
|
||||
transport,
|
||||
requestedModel,
|
||||
resolvedModel,
|
||||
baseUrl:
|
||||
(rawBaseUrl ??
|
||||
(transport === 'codex_responses'
|
||||
? DEFAULT_CODEX_BASE_URL
|
||||
: DEFAULT_OPENAI_BASE_URL)
|
||||
(isGithubCopilot && transport === 'codex_responses'
|
||||
? GITHUB_COPILOT_BASE_URL
|
||||
: (isGithubMode
|
||||
? GITHUB_COPILOT_BASE_URL
|
||||
: DEFAULT_OPENAI_BASE_URL))
|
||||
).replace(/\/+$/, ''),
|
||||
reasoning,
|
||||
}
|
||||
}
|
||||
|
||||
export function getAdditionalModelOptionsCacheScope(): string | null {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)) {
|
||||
return 'firstParty'
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const request = resolveProviderRequest()
|
||||
if (request.transport !== 'chat_completions') {
|
||||
return null
|
||||
}
|
||||
|
||||
if (!isLocalProviderUrl(request.baseUrl)) {
|
||||
return null
|
||||
}
|
||||
|
||||
return `openai:${request.baseUrl.toLowerCase()}`
|
||||
}
|
||||
|
||||
export function resolveCodexAuthPath(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
|
||||
46
src/services/api/reasoningLeakSanitizer.test.ts
Normal file
46
src/services/api/reasoningLeakSanitizer.test.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.ts'
|
||||
|
||||
describe('reasoning leak sanitizer', () => {
|
||||
test('strips explicit internal reasoning preambles', () => {
|
||||
const text =
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(true)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(
|
||||
'Hey! How can I help you today?',
|
||||
)
|
||||
})
|
||||
|
||||
test('does not strip normal user-facing advice that mentions "the user should"', () => {
|
||||
const text =
|
||||
'The user should reset their password immediately.\n\nHere are the steps...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
|
||||
test('does not strip legitimate first-person advice about responding to an incident', () => {
|
||||
const text =
|
||||
'I need to respond to this security incident immediately. The system is compromised.\n\nHere are the remediation steps...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
|
||||
test('does not strip legitimate first-person advice about answering a support ticket', () => {
|
||||
const text =
|
||||
'I need to answer the support ticket before end of day. The customer is waiting.\n\nHere is the response I drafted...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
})
|
||||
54
src/services/api/reasoningLeakSanitizer.ts
Normal file
54
src/services/api/reasoningLeakSanitizer.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
const EXPLICIT_REASONING_START_RE =
|
||||
/^\s*(i should\b|i need to\b|let me think\b|the task\b|the request\b)/i
|
||||
|
||||
const EXPLICIT_REASONING_META_RE =
|
||||
/\b(user|request|question|prompt|message|task|greeting|small talk|briefly|friendly|concise)\b/i
|
||||
|
||||
const USER_META_START_RE =
|
||||
/^\s*the user\s+(just\s+)?(said|asked|is asking|wants|wanted|mentioned|seems|appears)\b/i
|
||||
|
||||
const USER_REASONING_RE =
|
||||
/^\s*the user\s+(just\s+)?(said|asked|is asking|wants|wanted|mentioned|seems|appears)\b[\s\S]*\b(i should|i need to|let me think|respond|reply|answer|greeting|small talk|briefly|friendly|concise)\b/i
|
||||
|
||||
export function shouldBufferPotentialReasoningPrefix(text: string): boolean {
|
||||
const normalized = text.trim()
|
||||
if (!normalized) return false
|
||||
|
||||
if (looksLikeLeakedReasoningPrefix(normalized)) {
|
||||
return true
|
||||
}
|
||||
|
||||
const hasParagraphBoundary = /\n\s*\n/.test(normalized)
|
||||
if (hasParagraphBoundary) {
|
||||
return false
|
||||
}
|
||||
|
||||
return (
|
||||
EXPLICIT_REASONING_START_RE.test(normalized) ||
|
||||
USER_META_START_RE.test(normalized)
|
||||
)
|
||||
}
|
||||
|
||||
export function looksLikeLeakedReasoningPrefix(text: string): boolean {
|
||||
const normalized = text.trim()
|
||||
if (!normalized) return false
|
||||
return (
|
||||
(EXPLICIT_REASONING_START_RE.test(normalized) &&
|
||||
EXPLICIT_REASONING_META_RE.test(normalized)) ||
|
||||
USER_REASONING_RE.test(normalized)
|
||||
)
|
||||
}
|
||||
|
||||
export function stripLeakedReasoningPreamble(text: string): string {
|
||||
const normalized = text.replace(/\r\n/g, '\n')
|
||||
const parts = normalized.split(/\n\s*\n/)
|
||||
if (parts.length < 2) return text
|
||||
|
||||
const first = parts[0]?.trim() ?? ''
|
||||
if (!looksLikeLeakedReasoningPrefix(first)) {
|
||||
return text
|
||||
}
|
||||
|
||||
const remainder = parts.slice(1).join('\n\n').trim()
|
||||
return remainder || text
|
||||
}
|
||||
180
src/services/api/toolArgumentNormalization.test.ts
Normal file
180
src/services/api/toolArgumentNormalization.test.ts
Normal file
@@ -0,0 +1,180 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { normalizeToolArguments } from './toolArgumentNormalization'
|
||||
|
||||
describe('normalizeToolArguments', () => {
|
||||
describe('Bash tool', () => {
|
||||
test('wraps plain string into { command }', () => {
|
||||
expect(normalizeToolArguments('Bash', 'pwd')).toEqual({ command: 'pwd' })
|
||||
})
|
||||
|
||||
test('wraps multi-word command', () => {
|
||||
expect(normalizeToolArguments('Bash', 'ls -la /tmp')).toEqual({
|
||||
command: 'ls -la /tmp',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Bash', '{"command":"echo hi"}'),
|
||||
).toEqual({ command: 'echo hi' })
|
||||
})
|
||||
|
||||
test('returns empty object for blank string', () => {
|
||||
expect(normalizeToolArguments('Bash', '')).toEqual({})
|
||||
expect(normalizeToolArguments('Bash', ' ')).toEqual({})
|
||||
})
|
||||
|
||||
test('returns parsed blank for JSON-encoded blank string', () => {
|
||||
expect(normalizeToolArguments('Bash', '""')).toEqual('')
|
||||
expect(normalizeToolArguments('Bash', '" "')).toEqual(' ')
|
||||
})
|
||||
|
||||
test('returns empty object for malformed structured object literal', () => {
|
||||
expect(normalizeToolArguments('Bash', '{ "command": "pwd"')).toEqual({})
|
||||
})
|
||||
|
||||
test.each([
|
||||
['{command:"pwd"}'],
|
||||
["{'command':'pwd'}"],
|
||||
['{command: pwd}'],
|
||||
])(
|
||||
'returns empty object for malformed object-shaped string %s (does not wrap into command)',
|
||||
(input) => {
|
||||
expect(normalizeToolArguments('Bash', input)).toEqual({})
|
||||
},
|
||||
)
|
||||
|
||||
test.each([
|
||||
['false', false],
|
||||
['null', null],
|
||||
['[]', [] as unknown[]],
|
||||
['0', 0],
|
||||
['true', true],
|
||||
['123', 123],
|
||||
])(
|
||||
'preserves JSON literal %s as-is (does not wrap into command)',
|
||||
(input, expected) => {
|
||||
expect(normalizeToolArguments('Bash', input)).toEqual(expected)
|
||||
},
|
||||
)
|
||||
|
||||
test('wraps JSON-encoded string into { command }', () => {
|
||||
expect(normalizeToolArguments('Bash', '"pwd"')).toEqual({
|
||||
command: 'pwd',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('undefined arguments', () => {
|
||||
test('returns empty object for undefined', () => {
|
||||
expect(normalizeToolArguments('Bash', undefined)).toEqual({})
|
||||
expect(normalizeToolArguments('UnknownTool', undefined)).toEqual({})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Read tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Read', '/home/user/file.txt')).toEqual({
|
||||
file_path: '/home/user/file.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('wraps JSON-encoded string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Read', '"/home/user/file.txt"')).toEqual({
|
||||
file_path: '/home/user/file.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Read', '{"file_path":"/tmp/f.txt","limit":10}'),
|
||||
).toEqual({ file_path: '/tmp/f.txt', limit: 10 })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Write tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Write', '/tmp/out.txt')).toEqual({
|
||||
file_path: '/tmp/out.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments(
|
||||
'Write',
|
||||
'{"file_path":"/tmp/out.txt","content":"hello"}',
|
||||
),
|
||||
).toEqual({ file_path: '/tmp/out.txt', content: 'hello' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Edit tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Edit', '/tmp/edit.ts')).toEqual({
|
||||
file_path: '/tmp/edit.ts',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments(
|
||||
'Edit',
|
||||
'{"file_path":"/tmp/f.ts","old_string":"a","new_string":"b"}',
|
||||
),
|
||||
).toEqual({ file_path: '/tmp/f.ts', old_string: 'a', new_string: 'b' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Glob tool', () => {
|
||||
test('wraps plain string into { pattern }', () => {
|
||||
expect(normalizeToolArguments('Glob', '**/*.ts')).toEqual({
|
||||
pattern: '**/*.ts',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Glob', '{"pattern":"*.js","path":"/src"}'),
|
||||
).toEqual({ pattern: '*.js', path: '/src' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Grep tool', () => {
|
||||
test('wraps plain string into { pattern }', () => {
|
||||
expect(normalizeToolArguments('Grep', 'TODO')).toEqual({
|
||||
pattern: 'TODO',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Grep', '{"pattern":"fixme","path":"/src"}'),
|
||||
).toEqual({ pattern: 'fixme', path: '/src' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('unknown tools', () => {
|
||||
test('returns empty object for plain string (no known field mapping)', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', 'some value')).toEqual({})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('UnknownTool', '{"key":"val"}'),
|
||||
).toEqual({ key: 'val' })
|
||||
})
|
||||
|
||||
test('preserves JSON literals as-is', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', 'false')).toEqual(false)
|
||||
expect(normalizeToolArguments('UnknownTool', 'null')).toEqual(null)
|
||||
expect(normalizeToolArguments('UnknownTool', '[]')).toEqual([])
|
||||
})
|
||||
|
||||
test('returns parsed string for JSON-encoded string on unknown tools', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', '"hello"')).toEqual(
|
||||
'hello',
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
69
src/services/api/toolArgumentNormalization.ts
Normal file
69
src/services/api/toolArgumentNormalization.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
const STRING_ARGUMENT_TOOL_FIELDS: Record<string, string> = {
|
||||
Bash: 'command',
|
||||
Read: 'file_path',
|
||||
Write: 'file_path',
|
||||
Edit: 'file_path',
|
||||
Glob: 'pattern',
|
||||
Grep: 'pattern',
|
||||
}
|
||||
|
||||
function isBlankString(value: string): boolean {
|
||||
return value.trim().length === 0
|
||||
}
|
||||
|
||||
function isLikelyStructuredObjectLiteral(value: string): boolean {
|
||||
// Match object-like patterns with key-value syntax:
|
||||
// {"key":, {key:, {'key':, { "key" :, etc.
|
||||
// But NOT bash compound commands like { pwd; } or { echo hi; }
|
||||
return /^\s*\{\s*['"]?\w+['"]?\s*:/.test(value)
|
||||
}
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return typeof value === 'object' && value !== null && !Array.isArray(value)
|
||||
}
|
||||
|
||||
function getPlainStringToolArgumentField(toolName: string): string | null {
|
||||
return STRING_ARGUMENT_TOOL_FIELDS[toolName] ?? null
|
||||
}
|
||||
|
||||
export function hasToolFieldMapping(toolName: string): boolean {
|
||||
return toolName in STRING_ARGUMENT_TOOL_FIELDS
|
||||
}
|
||||
|
||||
function wrapPlainStringToolArguments(
|
||||
toolName: string,
|
||||
value: string,
|
||||
): Record<string, string> | null {
|
||||
const field = getPlainStringToolArgumentField(toolName)
|
||||
if (!field) return null
|
||||
return { [field]: value }
|
||||
}
|
||||
|
||||
export function normalizeToolArguments(
|
||||
toolName: string,
|
||||
rawArguments: string | undefined,
|
||||
): unknown {
|
||||
if (rawArguments === undefined) return {}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(rawArguments)
|
||||
if (isRecord(parsed)) {
|
||||
return parsed
|
||||
}
|
||||
// Parsed as a non-object JSON value (string, number, boolean, null, array)
|
||||
if (typeof parsed === 'string' && !isBlankString(parsed)) {
|
||||
return wrapPlainStringToolArguments(toolName, parsed) ?? parsed
|
||||
}
|
||||
// For blank strings, booleans, null, arrays — pass through as-is
|
||||
// and let Zod schema validation produce a meaningful error
|
||||
return parsed
|
||||
} catch {
|
||||
// rawArguments is not valid JSON — treat as a plain string
|
||||
if (isBlankString(rawArguments) || isLikelyStructuredObjectLiteral(rawArguments)) {
|
||||
// Blank or looks like a malformed object literal — don't wrap into
|
||||
// a tool field to avoid turning garbage into executable input
|
||||
return {}
|
||||
}
|
||||
return wrapPlainStringToolArguments(toolName, rawArguments) ?? {}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { APIError } from '@anthropic-ai/sdk'
|
||||
|
||||
// Helper to build a mock APIError with specific headers
|
||||
@@ -15,15 +15,27 @@ function makeError(headers: Record<string, string>): APIError {
|
||||
|
||||
// Save/restore env vars between tests
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
const envKeys = [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'OPENAI_MODEL',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_BASE',
|
||||
] as const
|
||||
|
||||
beforeEach(() => {
|
||||
for (const key of envKeys) {
|
||||
delete process.env[key]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const key of [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
]) {
|
||||
for (const key of envKeys) {
|
||||
if (originalEnv[key] === undefined) delete process.env[key]
|
||||
else process.env[key] = originalEnv[key]
|
||||
}
|
||||
|
||||
106
src/services/autoFix/autoFixConfig.test.ts
Normal file
106
src/services/autoFix/autoFixConfig.test.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { AutoFixConfigSchema, getAutoFixConfig, type AutoFixConfig } from './autoFixConfig.js'
|
||||
|
||||
describe('AutoFixConfigSchema', () => {
|
||||
test('parses valid full config', () => {
|
||||
const input = {
|
||||
enabled: true,
|
||||
lint: 'eslint . --fix',
|
||||
test: 'bun test',
|
||||
maxRetries: 3,
|
||||
timeout: 30000,
|
||||
}
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.data.enabled).toBe(true)
|
||||
expect(result.data.lint).toBe('eslint . --fix')
|
||||
expect(result.data.test).toBe('bun test')
|
||||
expect(result.data.maxRetries).toBe(3)
|
||||
expect(result.data.timeout).toBe(30000)
|
||||
}
|
||||
})
|
||||
|
||||
test('parses minimal config with defaults', () => {
|
||||
const input = { enabled: true, lint: 'eslint .' }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.data.maxRetries).toBe(3)
|
||||
expect(result.data.timeout).toBe(30000)
|
||||
expect(result.data.test).toBeUndefined()
|
||||
}
|
||||
})
|
||||
|
||||
test('rejects config with enabled but no lint or test', () => {
|
||||
const input = { enabled: true }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
test('accepts disabled config without commands', () => {
|
||||
const input = { enabled: false }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
})
|
||||
|
||||
test('rejects negative maxRetries', () => {
|
||||
const input = { enabled: true, lint: 'eslint .', maxRetries: -1 }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
test('rejects maxRetries above 10', () => {
|
||||
const input = { enabled: true, lint: 'eslint .', maxRetries: 11 }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAutoFixConfig', () => {
|
||||
test('returns null when settings have no autoFix', () => {
|
||||
const result = getAutoFixConfig(undefined)
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test('returns null when autoFix is disabled', () => {
|
||||
const result = getAutoFixConfig({ enabled: false })
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test('returns parsed config when valid and enabled', () => {
|
||||
const result = getAutoFixConfig({ enabled: true, lint: 'eslint .' })
|
||||
expect(result).not.toBeNull()
|
||||
expect(result!.enabled).toBe(true)
|
||||
expect(result!.lint).toBe('eslint .')
|
||||
})
|
||||
})
|
||||
|
||||
describe('SettingsSchema autoFix integration', () => {
|
||||
test('SettingsSchema accepts autoFix field', async () => {
|
||||
const { SettingsSchema } = await import('../../utils/settings/types.js')
|
||||
const settings = {
|
||||
autoFix: {
|
||||
enabled: true,
|
||||
lint: 'eslint .',
|
||||
test: 'bun test',
|
||||
maxRetries: 3,
|
||||
timeout: 30000,
|
||||
},
|
||||
}
|
||||
const result = SettingsSchema().safeParse(settings)
|
||||
expect(result.success).toBe(true)
|
||||
})
|
||||
|
||||
test('SettingsSchema rejects invalid autoFix', async () => {
|
||||
const { SettingsSchema } = await import('../../utils/settings/types.js')
|
||||
const settings = {
|
||||
autoFix: {
|
||||
enabled: true,
|
||||
// missing lint and test - should fail refine
|
||||
},
|
||||
}
|
||||
const result = SettingsSchema().safeParse(settings)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
})
|
||||
52
src/services/autoFix/autoFixConfig.ts
Normal file
52
src/services/autoFix/autoFixConfig.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import { z } from 'zod/v4'
|
||||
|
||||
export const AutoFixConfigSchema = z
|
||||
.object({
|
||||
enabled: z.boolean().describe('Whether auto-fix is enabled'),
|
||||
lint: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Lint command to run after file edits (e.g. "eslint . --fix")'),
|
||||
test: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Test command to run after file edits (e.g. "bun test")'),
|
||||
maxRetries: z
|
||||
.number()
|
||||
.int()
|
||||
.min(0)
|
||||
.max(10)
|
||||
.default(3)
|
||||
.describe('Maximum number of auto-fix retry attempts (default: 3)'),
|
||||
timeout: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1000)
|
||||
.max(300000)
|
||||
.default(30000)
|
||||
.describe('Timeout in ms for each lint/test command (default: 30000)'),
|
||||
})
|
||||
.refine(
|
||||
data => !data.enabled || data.lint !== undefined || data.test !== undefined,
|
||||
{
|
||||
message: 'At least one of "lint" or "test" must be set when enabled',
|
||||
},
|
||||
)
|
||||
|
||||
export type AutoFixConfig = z.infer<typeof AutoFixConfigSchema>
|
||||
|
||||
export function getAutoFixConfig(
|
||||
rawConfig: unknown,
|
||||
): AutoFixConfig | null {
|
||||
if (!rawConfig || typeof rawConfig !== 'object') {
|
||||
return null
|
||||
}
|
||||
const parsed = AutoFixConfigSchema.safeParse(rawConfig)
|
||||
if (!parsed.success) {
|
||||
return null
|
||||
}
|
||||
if (!parsed.data.enabled) {
|
||||
return null
|
||||
}
|
||||
return parsed.data
|
||||
}
|
||||
63
src/services/autoFix/autoFixHook.test.ts
Normal file
63
src/services/autoFix/autoFixHook.test.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
shouldRunAutoFix,
|
||||
buildAutoFixContext,
|
||||
} from './autoFixHook.js'
|
||||
|
||||
describe('shouldRunAutoFix', () => {
|
||||
test('returns true for file_edit tool when autoFix enabled', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_edit', config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true for file_write tool when autoFix enabled', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_write', config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false for bash tool', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('bash', config)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for file_read tool', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_read', config)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false when config is null', () => {
|
||||
expect(shouldRunAutoFix('file_edit', null)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('buildAutoFixContext', () => {
|
||||
test('formats lint errors as AI-readable context', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: true,
|
||||
lintOutput: 'src/foo.ts:10:5 error no-unused-vars',
|
||||
lintExitCode: 1,
|
||||
errorSummary: 'Lint errors (exit code 1):\nsrc/foo.ts:10:5 error no-unused-vars',
|
||||
})
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('no-unused-vars')
|
||||
expect(context).toContain('Please fix')
|
||||
})
|
||||
|
||||
test('returns null when no errors', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: false,
|
||||
})
|
||||
expect(context).toBeNull()
|
||||
})
|
||||
|
||||
test('formats test failures as AI-readable context', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: true,
|
||||
testOutput: 'FAIL src/foo.test.ts\n expected true, got false',
|
||||
testExitCode: 1,
|
||||
errorSummary: 'Test failures (exit code 1):\nFAIL src/foo.test.ts',
|
||||
})
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('FAIL')
|
||||
})
|
||||
})
|
||||
25
src/services/autoFix/autoFixHook.ts
Normal file
25
src/services/autoFix/autoFixHook.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import type { AutoFixConfig } from './autoFixConfig.js'
|
||||
import type { AutoFixResult } from './autoFixRunner.js'
|
||||
|
||||
const AUTO_FIX_TOOLS = new Set(['file_edit', 'file_write'])
|
||||
|
||||
export function shouldRunAutoFix(
|
||||
toolName: string,
|
||||
config: AutoFixConfig | null,
|
||||
): boolean {
|
||||
if (!config) return false
|
||||
return AUTO_FIX_TOOLS.has(toolName)
|
||||
}
|
||||
|
||||
export function buildAutoFixContext(result: AutoFixResult): string | null {
|
||||
if (!result.hasErrors || !result.errorSummary) return null
|
||||
|
||||
return (
|
||||
`<auto_fix_feedback>\n` +
|
||||
`AUTO-FIX: The file you just edited has errors. Please fix them:\n\n` +
|
||||
`${result.errorSummary}\n\n` +
|
||||
`Please fix these errors in the files you just edited. ` +
|
||||
`Do not ask the user — just apply the fix.\n` +
|
||||
`</auto_fix_feedback>`
|
||||
)
|
||||
}
|
||||
48
src/services/autoFix/autoFixIntegration.test.ts
Normal file
48
src/services/autoFix/autoFixIntegration.test.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { getAutoFixConfig } from './autoFixConfig.js'
|
||||
import { shouldRunAutoFix, buildAutoFixContext } from './autoFixHook.js'
|
||||
import { runAutoFixCheck } from './autoFixRunner.js'
|
||||
|
||||
describe('autoFix end-to-end flow', () => {
|
||||
test('full flow: config → shouldRun → check → context', async () => {
|
||||
const config = getAutoFixConfig({
|
||||
enabled: true,
|
||||
lint: 'echo "error: unused" && exit 1',
|
||||
maxRetries: 2,
|
||||
timeout: 5000,
|
||||
})
|
||||
expect(config).not.toBeNull()
|
||||
expect(shouldRunAutoFix('file_edit', config)).toBe(true)
|
||||
|
||||
const result = await runAutoFixCheck({
|
||||
lint: config!.lint,
|
||||
test: config!.test,
|
||||
timeout: config!.timeout,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
|
||||
const context = buildAutoFixContext(result)
|
||||
expect(context).not.toBeNull()
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('unused')
|
||||
})
|
||||
|
||||
test('full flow: no errors = no context', async () => {
|
||||
const config = getAutoFixConfig({
|
||||
enabled: true,
|
||||
lint: 'echo "all clean"',
|
||||
timeout: 5000,
|
||||
})
|
||||
const result = await runAutoFixCheck({
|
||||
lint: config!.lint,
|
||||
timeout: config!.timeout,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
const context = buildAutoFixContext(result)
|
||||
expect(context).toBeNull()
|
||||
})
|
||||
})
|
||||
103
src/services/autoFix/autoFixRunner.test.ts
Normal file
103
src/services/autoFix/autoFixRunner.test.ts
Normal file
@@ -0,0 +1,103 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
runAutoFixCheck,
|
||||
type AutoFixResult,
|
||||
type AutoFixCheckOptions,
|
||||
} from './autoFixRunner.js'
|
||||
|
||||
describe('runAutoFixCheck', () => {
|
||||
test('returns success when lint command exits 0', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "all clean"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
expect(result.lintOutput).toContain('all clean')
|
||||
expect(result.testOutput).toBeUndefined()
|
||||
})
|
||||
|
||||
test('returns errors when lint command exits non-zero', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "error: unused var" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.lintOutput).toContain('unused var')
|
||||
expect(result.lintExitCode).toBe(1)
|
||||
})
|
||||
|
||||
test('returns errors when test command exits non-zero', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
test: 'echo "FAIL test_foo" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.testOutput).toContain('FAIL test_foo')
|
||||
expect(result.testExitCode).toBe(1)
|
||||
})
|
||||
|
||||
test('runs both lint and test commands', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "lint ok"',
|
||||
test: 'echo "test ok"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
expect(result.lintOutput).toContain('lint ok')
|
||||
expect(result.testOutput).toContain('test ok')
|
||||
})
|
||||
|
||||
test('skips test if lint fails', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "lint error" && exit 1',
|
||||
test: 'echo "should not run"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.lintOutput).toContain('lint error')
|
||||
expect(result.testOutput).toBeUndefined()
|
||||
})
|
||||
|
||||
test('handles timeout gracefully', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'sleep 10',
|
||||
timeout: 100,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.timedOut).toBe(true)
|
||||
})
|
||||
|
||||
test('returns success with no commands configured', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
})
|
||||
|
||||
test('formats error summary for AI consumption', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "src/foo.ts:10:5 error no-unused-vars" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
const summary = result.errorSummary
|
||||
expect(summary).toContain('Lint errors')
|
||||
expect(summary).toContain('no-unused-vars')
|
||||
})
|
||||
})
|
||||
169
src/services/autoFix/autoFixRunner.ts
Normal file
169
src/services/autoFix/autoFixRunner.ts
Normal file
@@ -0,0 +1,169 @@
|
||||
import { spawn } from 'child_process'
|
||||
|
||||
export interface AutoFixCheckOptions {
|
||||
lint?: string
|
||||
test?: string
|
||||
timeout: number
|
||||
cwd: string
|
||||
signal?: AbortSignal
|
||||
}
|
||||
|
||||
export interface AutoFixResult {
|
||||
hasErrors: boolean
|
||||
lintOutput?: string
|
||||
lintExitCode?: number
|
||||
testOutput?: string
|
||||
testExitCode?: number
|
||||
timedOut?: boolean
|
||||
errorSummary?: string
|
||||
}
|
||||
|
||||
async function runCommand(
|
||||
command: string,
|
||||
cwd: string,
|
||||
timeout: number,
|
||||
signal?: AbortSignal,
|
||||
): Promise<{ stdout: string; stderr: string; exitCode: number; timedOut: boolean }> {
|
||||
return new Promise((resolve) => {
|
||||
if (signal?.aborted) {
|
||||
resolve({ stdout: '', stderr: 'Aborted', exitCode: 1, timedOut: false })
|
||||
return
|
||||
}
|
||||
|
||||
let timedOut = false
|
||||
let stdout = ''
|
||||
let stderr = ''
|
||||
|
||||
const isWindows = process.platform === 'win32'
|
||||
const proc = spawn(command, [], {
|
||||
cwd,
|
||||
env: { ...process.env },
|
||||
shell: true,
|
||||
windowsHide: true,
|
||||
// On Unix, create a process group so we can kill child processes on timeout/abort
|
||||
detached: !isWindows,
|
||||
})
|
||||
|
||||
const killTree = () => {
|
||||
try {
|
||||
if (!isWindows && proc.pid) {
|
||||
// Kill the entire process group
|
||||
process.kill(-proc.pid, 'SIGTERM')
|
||||
} else {
|
||||
proc.kill('SIGTERM')
|
||||
}
|
||||
} catch {
|
||||
// Process may have already exited
|
||||
}
|
||||
}
|
||||
|
||||
const onAbort = () => {
|
||||
killTree()
|
||||
}
|
||||
signal?.addEventListener('abort', onAbort, { once: true })
|
||||
|
||||
proc.stdout?.on('data', (data: Buffer) => {
|
||||
stdout += data.toString()
|
||||
})
|
||||
proc.stderr?.on('data', (data: Buffer) => {
|
||||
stderr += data.toString()
|
||||
})
|
||||
|
||||
const timer = setTimeout(() => {
|
||||
timedOut = true
|
||||
killTree()
|
||||
}, timeout)
|
||||
|
||||
proc.on('close', (code) => {
|
||||
clearTimeout(timer)
|
||||
signal?.removeEventListener('abort', onAbort)
|
||||
resolve({
|
||||
stdout: stdout.slice(0, 10000),
|
||||
stderr: stderr.slice(0, 10000),
|
||||
exitCode: code ?? 1,
|
||||
timedOut,
|
||||
})
|
||||
})
|
||||
|
||||
proc.on('error', () => {
|
||||
clearTimeout(timer)
|
||||
signal?.removeEventListener('abort', onAbort)
|
||||
resolve({
|
||||
stdout,
|
||||
stderr: stderr || 'Command failed to start',
|
||||
exitCode: 1,
|
||||
timedOut: false,
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function buildErrorSummary(result: AutoFixResult): string | undefined {
|
||||
if (!result.hasErrors) return undefined
|
||||
const parts: string[] = []
|
||||
|
||||
if (result.timedOut) {
|
||||
parts.push('Command timed out.')
|
||||
}
|
||||
if (result.lintExitCode !== undefined && result.lintExitCode !== 0) {
|
||||
parts.push(`Lint errors (exit code ${result.lintExitCode}):\n${result.lintOutput ?? ''}`)
|
||||
}
|
||||
if (result.testExitCode !== undefined && result.testExitCode !== 0) {
|
||||
parts.push(`Test failures (exit code ${result.testExitCode}):\n${result.testOutput ?? ''}`)
|
||||
}
|
||||
|
||||
return parts.join('\n\n')
|
||||
}
|
||||
|
||||
export async function runAutoFixCheck(
|
||||
options: AutoFixCheckOptions,
|
||||
): Promise<AutoFixResult> {
|
||||
const { lint, test, timeout, cwd, signal } = options
|
||||
|
||||
if (!lint && !test) {
|
||||
return { hasErrors: false }
|
||||
}
|
||||
|
||||
if (signal?.aborted) {
|
||||
return { hasErrors: false }
|
||||
}
|
||||
|
||||
const result: AutoFixResult = { hasErrors: false }
|
||||
|
||||
// Run lint first
|
||||
if (lint) {
|
||||
const lintResult = await runCommand(lint, cwd, timeout, signal)
|
||||
result.lintOutput = (lintResult.stdout + '\n' + lintResult.stderr).trim()
|
||||
result.lintExitCode = lintResult.exitCode
|
||||
|
||||
if (lintResult.timedOut) {
|
||||
result.hasErrors = true
|
||||
result.timedOut = true
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
|
||||
if (lintResult.exitCode !== 0) {
|
||||
result.hasErrors = true
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// Run tests only if lint passed (or no lint configured)
|
||||
if (test) {
|
||||
const testResult = await runCommand(test, cwd, timeout, signal)
|
||||
result.testOutput = (testResult.stdout + '\n' + testResult.stderr).trim()
|
||||
result.testExitCode = testResult.exitCode
|
||||
|
||||
if (testResult.timedOut) {
|
||||
result.hasErrors = true
|
||||
result.timedOut = true
|
||||
} else if (testResult.exitCode !== 0) {
|
||||
result.hasErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
127
src/services/compact/microCompact.test.ts
Normal file
127
src/services/compact/microCompact.test.ts
Normal file
@@ -0,0 +1,127 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import type { Message } from '../../types/message.js'
|
||||
import { createAssistantMessage, createUserMessage } from '../../utils/messages.js'
|
||||
|
||||
// We test the exported collectCompactableToolIds behavior indirectly via
|
||||
// the public microcompactMessages + time-based path. But first we need to
|
||||
// verify the core predicate: MCP tools (prefixed 'mcp__') should be
|
||||
// compactable alongside the built-in tool set.
|
||||
|
||||
// Import internals we can test
|
||||
import { evaluateTimeBasedTrigger } from './microCompact.js'
|
||||
|
||||
/**
|
||||
* Helper: build a minimal assistant message with a tool_use block.
|
||||
*/
|
||||
function assistantWithToolUse(toolName: string, toolId: string): Message {
|
||||
return createAssistantMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use' as const,
|
||||
id: toolId,
|
||||
name: toolName,
|
||||
input: {},
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: build a user message with a tool_result block.
|
||||
*/
|
||||
function userWithToolResult(toolId: string, output: string): Message {
|
||||
return createUserMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result' as const,
|
||||
tool_use_id: toolId,
|
||||
content: output,
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
describe('microCompact MCP tool compaction', () => {
|
||||
// We can't easily unit-test the private isCompactableTool directly,
|
||||
// but we can test the full time-based microcompact path which exercises
|
||||
// collectCompactableToolIds → isCompactableTool under the hood.
|
||||
// The time-based path is the simplest to trigger: it content-clears
|
||||
// old tool results when the gap since last assistant message exceeds
|
||||
// the threshold.
|
||||
|
||||
// However, evaluateTimeBasedTrigger depends on config (GrowthBook).
|
||||
// So instead, let's test the observable behavior by importing the
|
||||
// microcompactMessages function and checking that MCP tool_use blocks
|
||||
// are collected.
|
||||
|
||||
// Since collectCompactableToolIds is not exported, we test the predicate
|
||||
// behavior by verifying that the module loads without error and that
|
||||
// built-in and MCP tools are treated consistently.
|
||||
|
||||
test('module exports load correctly', async () => {
|
||||
const mod = await import('./microCompact.js')
|
||||
expect(mod.microcompactMessages).toBeFunction()
|
||||
expect(mod.estimateMessageTokens).toBeFunction()
|
||||
expect(mod.evaluateTimeBasedTrigger).toBeFunction()
|
||||
})
|
||||
|
||||
test('estimateMessageTokens counts MCP tool_use blocks', async () => {
|
||||
const { estimateMessageTokens } = await import('./microCompact.js')
|
||||
|
||||
const builtinMessages: Message[] = [
|
||||
assistantWithToolUse('Read', 'tool-builtin-1'),
|
||||
userWithToolResult('tool-builtin-1', 'file contents here'),
|
||||
]
|
||||
|
||||
const mcpMessages: Message[] = [
|
||||
assistantWithToolUse('mcp__github__get_file_contents', 'tool-mcp-1'),
|
||||
userWithToolResult('tool-mcp-1', 'file contents here'),
|
||||
]
|
||||
|
||||
const builtinTokens = estimateMessageTokens(builtinMessages)
|
||||
const mcpTokens = estimateMessageTokens(mcpMessages)
|
||||
|
||||
// Both should produce non-zero estimates
|
||||
expect(builtinTokens).toBeGreaterThan(0)
|
||||
expect(mcpTokens).toBeGreaterThan(0)
|
||||
|
||||
// The tool_result content is identical, so token estimates should be
|
||||
// similar (tool_use name differs slightly, so not exactly equal)
|
||||
expect(Math.abs(builtinTokens - mcpTokens)).toBeLessThan(50)
|
||||
})
|
||||
|
||||
test('microcompactMessages processes MCP tools without error', async () => {
|
||||
const { microcompactMessages } = await import('./microCompact.js')
|
||||
|
||||
const messages: Message[] = [
|
||||
assistantWithToolUse('mcp__slack__send_message', 'tool-mcp-2'),
|
||||
userWithToolResult('tool-mcp-2', 'Message sent successfully'),
|
||||
assistantWithToolUse('mcp__github__create_pull_request', 'tool-mcp-3'),
|
||||
userWithToolResult('tool-mcp-3', JSON.stringify({ number: 42, url: 'https://github.com/org/repo/pull/42' })),
|
||||
]
|
||||
|
||||
// Should not throw — MCP tools should be handled gracefully
|
||||
const result = await microcompactMessages(messages)
|
||||
expect(result).toBeDefined()
|
||||
expect(result.messages).toBeDefined()
|
||||
expect(result.messages.length).toBe(messages.length)
|
||||
})
|
||||
|
||||
test('microcompactMessages processes mixed built-in and MCP tools', async () => {
|
||||
const { microcompactMessages } = await import('./microCompact.js')
|
||||
|
||||
const messages: Message[] = [
|
||||
assistantWithToolUse('Read', 'tool-read-1'),
|
||||
userWithToolResult('tool-read-1', 'some file content'),
|
||||
assistantWithToolUse('mcp__playwright__screenshot', 'tool-mcp-4'),
|
||||
userWithToolResult('tool-mcp-4', 'base64-encoded-screenshot-data'.repeat(100)),
|
||||
assistantWithToolUse('Bash', 'tool-bash-1'),
|
||||
userWithToolResult('tool-bash-1', 'command output'),
|
||||
]
|
||||
|
||||
const result = await microcompactMessages(messages)
|
||||
expect(result).toBeDefined()
|
||||
expect(result.messages.length).toBe(messages.length)
|
||||
})
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user