Compare commits

..

1 Commits

Author SHA1 Message Date
gnanam1990
149b1eb8fb fix: surface actionable error when DuckDuckGo web search is rate-limited
Non-Anthropic / non-codex providers (minimax, kimi, generic OpenAI-compatible)
fell through to the DDG adapter when no paid search key was configured. DDG's
scraper is blocked on most IPs, so web_search surfaced an opaque "anomaly in
the request" error. Catch that response in the DDG provider and rethrow with
the exact env vars that would unblock the tool, or the option to switch to a
native-search provider.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-22 22:22:59 +05:30
230 changed files with 762 additions and 14249 deletions

View File

@@ -145,44 +145,9 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
# CLAUDE_CODE_USE_OPENAI=1
# OPENAI_API_KEY=sk-your-key-here
# OPENAI_MODEL=gpt-4o
# For DeepSeek, set:
# OPENAI_BASE_URL=https://api.deepseek.com/v1
# OPENAI_MODEL=deepseek-v4-flash
# Optional: OPENAI_MODEL=deepseek-v4-pro
# Legacy aliases also work: deepseek-chat and deepseek-reasoner
# For Z.AI GLM Coding Plan, set:
# OPENAI_BASE_URL=https://api.z.ai/api/coding/paas/v4
# OPENAI_MODEL=GLM-5.1
# Optional: OPENAI_MODEL=GLM-5-Turbo, GLM-4.7, or GLM-4.5-Air
# Use a custom OpenAI-compatible endpoint (optional — defaults to api.openai.com)
# OPENAI_BASE_URL=https://api.openai.com/v1
# Choose the OpenAI-compatible API surface (optional — defaults to chat_completions)
# Supported: chat_completions, responses
# OPENAI_API_FORMAT=chat_completions
# Choose a custom auth header for OpenAI-compatible providers (optional).
# Authorization defaults to Bearer; custom headers default to the raw API key.
# Set OPENAI_AUTH_HEADER_VALUE when the header value differs from OPENAI_API_KEY.
# OPENAI_AUTH_HEADER=api-key
# OPENAI_AUTH_SCHEME=raw
# OPENAI_AUTH_HEADER_VALUE=your-header-value-here
# Fallback context window size (tokens) when the model is not found in the
# built-in table (default: 128000). Increase this for models with larger
# context windows (e.g. 200000 for Claude-sized contexts).
# CLAUDE_CODE_OPENAI_FALLBACK_CONTEXT_WINDOW=128000
# Per-model context window overrides as a JSON object.
# Takes precedence over the built-in table, so you can register new or
# custom models without patching source.
# Example: CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS={"my-corp/llm-v3":262144,"gpt-4o-mini":128000}
# CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS=
# Per-model maximum output token overrides as a JSON object.
# Use this alongside CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS when your model
# supports a different output limit than what the built-in table specifies.
# Example: CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS={"my-corp/llm-v3":8192}
# CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS=
# -----------------------------------------------------------------------------
@@ -307,25 +272,6 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
# trigger "Extra required key ... supplied" errors from OpenAI-compatible endpoints
# OPENCLAUDE_DISABLE_STRICT_TOOLS=1
# Disable hidden <system-reminder> messages injected into tool output
# Suppresses the file-read cyber-risk reminder and the todo/task tool nudges
# Useful for users who want full transparency over what the model sees
# OPENCLAUDE_DISABLE_TOOL_REMINDERS=1
# Log structured per-request token usage (including cache metrics) to stderr.
# Useful for auditing cache hit rate / debugging cost spikes outside the REPL.
# Any truthy value enables it ("verbose", "1", "true").
#
# Complements (does NOT replace) CLAUDE_CODE_ENABLE_TOKEN_USAGE_ATTACHMENT —
# they serve different audiences:
# - OPENCLAUDE_LOG_TOKEN_USAGE is user-facing: one JSON line per API
# request on stderr, intended for humans inspecting cost/caching.
# - CLAUDE_CODE_ENABLE_TOKEN_USAGE_ATTACHMENT is model-facing: injects
# a context-usage attachment INTO the prompt so the model can reason
# about its own remaining context. Does not touch stderr.
# Turn on whichever audience you're debugging; both can run together.
# OPENCLAUDE_LOG_TOKEN_USAGE=verbose
# Custom timeout for API requests in milliseconds (default: varies)
# API_TIMEOUT_MS=60000

View File

@@ -1,3 +1,3 @@
{
".": "0.7.0"
".": "0.6.0"
}

View File

@@ -1,39 +1,5 @@
# Changelog
## [0.7.0](https://github.com/Gitlawb/openclaude/compare/v0.6.0...v0.7.0) (2026-04-26)
### Features
* add model-specific tokenizers and compression ratio detection ([#799](https://github.com/Gitlawb/openclaude/issues/799)) ([e92e527](https://github.com/Gitlawb/openclaude/commit/e92e5274b223d935d380b1fbd234cb631ab03211))
* add OPENCLAUDE_DISABLE_TOOL_REMINDERS env var to suppress hidden tool-output reminders ([#837](https://github.com/Gitlawb/openclaude/issues/837)) ([28de94d](https://github.com/Gitlawb/openclaude/commit/28de94df5dcd7718cb334e2e793e9472f5b291c5)), closes [#809](https://github.com/Gitlawb/openclaude/issues/809)
* add streaming optimizer and structured request logging ([#703](https://github.com/Gitlawb/openclaude/issues/703)) ([5b9cd21](https://github.com/Gitlawb/openclaude/commit/5b9cd21e373823a77fd552d6e02f5d4b68ae06b1))
* add xAI as official provider ([#865](https://github.com/Gitlawb/openclaude/issues/865)) ([2586a9c](https://github.com/Gitlawb/openclaude/commit/2586a9cddbd2512826bca81cb5deb3ec97f00f0f))
* **api:** expose cache metrics in REPL + normalize across providers ([#813](https://github.com/Gitlawb/openclaude/issues/813)) ([9e23c2b](https://github.com/Gitlawb/openclaude/commit/9e23c2bec43697187762601db5b1585c9b0fb1a3))
* implement Hook Chains runtime integration for self-healing agent mesh MVP ([#711](https://github.com/Gitlawb/openclaude/issues/711)) ([44a2c30](https://github.com/Gitlawb/openclaude/commit/44a2c30d5f9b98027e454466c680360f6b4625fc))
* **memory:** implement persistent project-level Knowledge Graph and RAG ([#899](https://github.com/Gitlawb/openclaude/issues/899)) ([29f7579](https://github.com/Gitlawb/openclaude/commit/29f757937732be0f8cca2bc0627a27eeafc2a992))
* **minimax:** add /usage support and fix MiniMax quota parsing ([#869](https://github.com/Gitlawb/openclaude/issues/869)) ([26413f6](https://github.com/Gitlawb/openclaude/commit/26413f6d307928a4f14c9c61c9860a28f8d81358))
* **model:** add GPT-5.5 support for Codex provider ([#880](https://github.com/Gitlawb/openclaude/issues/880)) ([038f715](https://github.com/Gitlawb/openclaude/commit/038f715b7ab9714340bda421b73a86d8590cf531))
* **tools:** resilient web search and fetch across all providers ([#836](https://github.com/Gitlawb/openclaude/issues/836)) ([531e3f1](https://github.com/Gitlawb/openclaude/commit/531e3f10592a73d81f26675c2479d46a3d5b55f5))
* **zai:** add Z.AI GLM Coding Plan provider preset ([#896](https://github.com/Gitlawb/openclaude/issues/896)) ([a0d657e](https://github.com/Gitlawb/openclaude/commit/a0d657ee188f52f8a4ceaad1658c81343a32fdad))
### Bug Fixes
* **agent:** provider-aware fallback for haiku/sonnet aliases ([#908](https://github.com/Gitlawb/openclaude/issues/908)) ([a3e728a](https://github.com/Gitlawb/openclaude/commit/a3e728a114f6379b80daefc8abcac17a752c5f96))
* bugs ([#885](https://github.com/Gitlawb/openclaude/issues/885)) ([c6c5f06](https://github.com/Gitlawb/openclaude/commit/c6c5f0608cf6509b412b121954547d72b3f3a411))
* make OpenAI fallback context window configurable + support external model lookup ([#861](https://github.com/Gitlawb/openclaude/issues/861)) ([b750e9e](https://github.com/Gitlawb/openclaude/commit/b750e9e97d15926d094d435772b2d6d12e5e545c))
* **mcp:** disable MCP_SKILLS feature flag — source not mirrored ([#872](https://github.com/Gitlawb/openclaude/issues/872)) ([dcbe295](https://github.com/Gitlawb/openclaude/commit/dcbe29558ab9c74d335b138488005a6509aa906a))
* normalize /provider multi-model selection and semicolon parsing ([#841](https://github.com/Gitlawb/openclaude/issues/841)) ([c4cb98a](https://github.com/Gitlawb/openclaude/commit/c4cb98a4f092062da02a4728cf59fed0fc3a6d3f))
* **openai-shim:** echo reasoning_content on assistant tool-call messages for Moonshot ([#828](https://github.com/Gitlawb/openclaude/issues/828)) ([67de6bd](https://github.com/Gitlawb/openclaude/commit/67de6bd2cffc3381f0f28fd3ffce043970611667))
* **query:** restore system prompt structure and add missing config import ([#907](https://github.com/Gitlawb/openclaude/issues/907)) ([818689b](https://github.com/Gitlawb/openclaude/commit/818689b2ee71cb6966cb4dc5a5ebd90fd22b0fcb))
* **shell:** recover when CWD path was replaced by a non-directory ([#871](https://github.com/Gitlawb/openclaude/issues/871)) ([a4c6757](https://github.com/Gitlawb/openclaude/commit/a4c67570238794317d049a225396672b465fdbfc))
* **startup:** show --model flag override on startup screen ([#898](https://github.com/Gitlawb/openclaude/issues/898)) ([d45628c](https://github.com/Gitlawb/openclaude/commit/d45628c41300b83b466e6a97983099615a50e7d7))
* **startup:** url authoritative over model name in banner provider detect ([#864](https://github.com/Gitlawb/openclaude/issues/864)) ([e346b8d](https://github.com/Gitlawb/openclaude/commit/e346b8d5ec2d58a4e8db337918d52d844ee52766)), closes [#855](https://github.com/Gitlawb/openclaude/issues/855)
* surface actionable error when DuckDuckGo web search is rate-limited ([#834](https://github.com/Gitlawb/openclaude/issues/834)) ([3c4d843](https://github.com/Gitlawb/openclaude/commit/3c4d8435c42e1ee04f9defd31c4c589017f524c5))
* **test:** add missing teammate exports to hookChains integration mock ([#840](https://github.com/Gitlawb/openclaude/issues/840)) ([23e8cfb](https://github.com/Gitlawb/openclaude/commit/23e8cfbd5b22179684276bef4131e26b830ce69c)), closes [#839](https://github.com/Gitlawb/openclaude/issues/839)
* **update:** show real package version and give actionable guidance ([#870](https://github.com/Gitlawb/openclaude/issues/870)) ([6e58b81](https://github.com/Gitlawb/openclaude/commit/6e58b819370128b923dda4fcc774bb556f4b951a))
## [0.6.0](https://github.com/Gitlawb/openclaude/compare/v0.5.2...v0.6.0) (2026-04-22)

View File

@@ -132,7 +132,7 @@ Cause:
Fix:
```powershell
cd <PATH>
cd C:\Users\Lucas Pedry\Documents\openclaude\openclaude
bun run dev:profile
```
@@ -189,7 +189,7 @@ Or pick a local Ollama profile automatically by goal:
bun run profile:init -- --provider ollama --goal balanced
```
## 6.5 Placeholder key (`YOUR_KEY`) error
## 6.5 Placeholder key (`SUA_CHAVE`) error
Cause:

View File

@@ -13,25 +13,7 @@ Use OpenAI-compatible APIs, Gemini, GitHub Models, Codex OAuth, Codex, Ollama, A
OpenClaude is also mirrored to GitLawb:
[gitlawb.com/node/repos/z6MkqDnb/openclaude](https://gitlawb.com/node/repos/z6MkqDnb/openclaude)
[Quick Start](#quick-start) | [Setup Guides](#setup-guides) | [Providers](#supported-providers) | [Source Build](#source-build-and-local-development) | [VS Code Extension](#vs-code-extension) | [Sponsors](#sponsors) | [Community](#community)
## Sponsors
<p align="center">
<a href="https://gitlawb.com">
<img src="https://gitlawb.com/logo.png" alt="GitLawb logo" width="96">
</a>
&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://bankr.bot">
<img src="https://bankr.bot/favicon.svg" alt="Bankr.bot logo" width="96">
</a>
</p>
<p align="center">
<a href="https://gitlawb.com"><strong>GitLawb</strong></a>
&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://bankr.bot"><strong>Bankr.bot</strong></a>
</p>
[Quick Start](#quick-start) | [Setup Guides](#setup-guides) | [Providers](#supported-providers) | [Source Build](#source-build-and-local-development) | [VS Code Extension](#vs-code-extension) | [Community](#community)
## Star History
@@ -154,7 +136,6 @@ Advanced and source-build guides:
- **Images**: URL and base64 image inputs for providers that support vision
- **Provider profiles**: Guided setup plus saved `.openclaude-profile.json` support
- **Local and remote model backends**: Cloud APIs, local servers, and Apple Silicon local inference
- **Codebase intelligence (repo map)**: Structural map of the repository ranked by PageRank importance, auto-injected into context when the `REPO_MAP` flag is enabled. Inspect with `/repomap`. See [docs/repo-map.md](docs/repo-map.md) for details.
## Provider Notes
@@ -171,12 +152,12 @@ For best results, use models with strong tool/function calling support.
OpenClaude can route different agents to different models through settings-based routing. This is useful for cost optimization or splitting work by model strength.
Add to `~/.openclaude.json`:
Add to `~/.claude/settings.json`:
```json
{
"agentModels": {
"deepseek-v4-flash": {
"deepseek-chat": {
"base_url": "https://api.deepseek.com/v1",
"api_key": "sk-your-key"
},
@@ -186,10 +167,10 @@ Add to `~/.openclaude.json`:
}
},
"agentRouting": {
"Explore": "deepseek-v4-flash",
"Explore": "deepseek-chat",
"Plan": "gpt-4o",
"general-purpose": "gpt-4o",
"frontend-dev": "deepseek-v4-flash",
"frontend-dev": "deepseek-chat",
"default": "gpt-4o"
}
}

View File

@@ -49,13 +49,9 @@
"fuse.js": "7.1.0",
"get-east-asian-width": "1.5.0",
"google-auth-library": "9.15.1",
"graphology": "^0.26.0",
"graphology-operators": "^1.6.0",
"graphology-pagerank": "^1.1.0",
"https-proxy-agent": "7.0.6",
"ignore": "7.0.5",
"indent-string": "5.0.0",
"js-tiktoken": "^1.0.16",
"jsonc-parser": "3.3.1",
"lodash-es": "4.18.1",
"lru-cache": "11.2.7",
@@ -75,13 +71,11 @@
"strip-ansi": "7.2.0",
"supports-hyperlinks": "3.2.0",
"tree-kill": "1.2.2",
"tree-sitter-wasms": "^0.1.12",
"turndown": "7.2.2",
"type-fest": "4.41.0",
"undici": "7.24.6",
"usehooks-ts": "3.1.1",
"vscode-languageserver-protocol": "3.17.5",
"web-tree-sitter": "^0.25.0",
"wrap-ansi": "9.0.2",
"ws": "8.20.0",
"xss": "1.0.15",
@@ -595,8 +589,6 @@
"etag": ["etag@1.8.1", "", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="],
"events": ["events@3.3.0", "", {}, "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q=="],
"eventsource": ["eventsource@3.0.7", "", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="],
"eventsource-parser": ["eventsource-parser@3.0.6", "", {}, "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg=="],
@@ -665,16 +657,6 @@
"graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="],
"graphology": ["graphology@0.26.0", "", { "dependencies": { "events": "^3.3.0" }, "peerDependencies": { "graphology-types": ">=0.24.0" } }, "sha512-8SSImzgUUYC89Z042s+0r/vMibY7GX/Emz4LDO5e7jYXhuoWfHISPFJYjpRLUSJGq6UQ6xlenvX1p/hJdfXuXg=="],
"graphology-operators": ["graphology-operators@1.6.1", "", { "dependencies": { "graphology-utils": "^2.0.0" }, "peerDependencies": { "graphology-types": ">=0.20.0" } }, "sha512-ZKGcaN+6L5hv0VelrDgkZ2IQL1c7nrqkTRiHDwBCjmbkS56vWh/iQNDnvd/c9YIpoygtEK0mgGOr/m4i7BOYrw=="],
"graphology-pagerank": ["graphology-pagerank@1.1.0", "", { "dependencies": { "graphology-utils": "^1.3.0", "lodash": "^4.17.5" } }, "sha512-ubhzN7HDKYSaFFvzqQsqQp14LIgCPNGaioWVZgc5E49NEKUOtCVehWEDF/9QXDUiK+4cMzj/yRoneJbYR0Rc3A=="],
"graphology-types": ["graphology-types@0.24.8", "", {}, "sha512-hDRKYXa8TsoZHjgEaysSRyPdT6uB78Ci8WnjgbStlQysz7xR52PInxNsmnB7IBOM1BhikxkNyCVEFgmPKnpx3Q=="],
"graphology-utils": ["graphology-utils@2.5.2", "", { "peerDependencies": { "graphology-types": ">=0.23.0" } }, "sha512-ckHg8MXrXJkOARk56ZaSCM1g1Wihe2d6iTmz1enGOz4W/l831MBCKSayeFQfowgF8wd+PQ4rlch/56Vs/VZLDQ=="],
"gtoken": ["gtoken@7.1.0", "", { "dependencies": { "gaxios": "^6.0.0", "jws": "^4.0.0" } }, "sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw=="],
"has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="],
@@ -723,8 +705,6 @@
"jose": ["jose@6.2.2", "", {}, "sha512-d7kPDd34KO/YnzaDOlikGpOurfF0ByC2sEV4cANCtdqLlTfBlw2p14O/5d/zv40gJPbIQxfES3nSx1/oYNyuZQ=="],
"js-tiktoken": ["js-tiktoken@1.0.21", "", { "dependencies": { "base64-js": "^1.5.1" } }, "sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g=="],
"json-bigint": ["json-bigint@1.0.0", "", { "dependencies": { "bignumber.js": "^9.0.0" } }, "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ=="],
"json-schema-to-ts": ["json-schema-to-ts@3.1.1", "", { "dependencies": { "@babel/runtime": "^7.18.3", "ts-algebra": "^2.0.0" } }, "sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g=="],
@@ -741,8 +721,6 @@
"locate-path": ["locate-path@5.0.0", "", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="],
"lodash": ["lodash@4.18.1", "", {}, "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q=="],
"lodash-es": ["lodash-es@4.18.1", "", {}, "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A=="],
"lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="],
@@ -915,8 +893,6 @@
"tree-kill": ["tree-kill@1.2.2", "", { "bin": { "tree-kill": "cli.js" } }, "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A=="],
"tree-sitter-wasms": ["tree-sitter-wasms@0.1.13", "", { "dependencies": { "tree-sitter-wasms": "^0.1.11" } }, "sha512-wT+cR6DwaIz80/vho3AvSF0N4txuNx/5bcRKoXouOfClpxh/qqrF4URNLQXbbt8MaAxeksZcZd1j8gcGjc+QxQ=="],
"ts-algebra": ["ts-algebra@2.0.0", "", {}, "sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw=="],
"tslib": ["tslib@1.14.1", "", {}, "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="],
@@ -953,8 +929,6 @@
"vscode-languageserver-types": ["vscode-languageserver-types@3.17.5", "", {}, "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg=="],
"web-tree-sitter": ["web-tree-sitter@0.25.10", "", { "peerDependencies": { "@types/emscripten": "^1.40.0" }, "optionalPeers": ["@types/emscripten"] }, "sha512-Y09sF44/13XvgVKgO2cNDw5rGk6s26MgoZPXLESvMXeefBf7i6/73eFurre0IsTW6E14Y0ArIzhUMmjoc7xyzA=="],
"webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
"whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
@@ -1411,8 +1385,6 @@
"gaxios/is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="],
"graphology-pagerank/graphology-utils": ["graphology-utils@1.8.0", "", { "peerDependencies": { "graphology-types": ">=0.19.0" } }, "sha512-Pa7SW30OMm8fVtyH49b3GJ/uxlMHGfXly50wIhlcc7ZoX9ahZa7sPBz+obo4WZClrRV6wh3tIu0GJoI42eao1A=="],
"needle/iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="],
"npm-run-path/path-key": ["path-key@4.0.0", "", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="],

View File

@@ -68,11 +68,9 @@ openclaude
export CLAUDE_CODE_USE_OPENAI=1
export OPENAI_API_KEY=sk-...
export OPENAI_BASE_URL=https://api.deepseek.com/v1
export OPENAI_MODEL=deepseek-v4-flash
export OPENAI_MODEL=deepseek-chat
```
Use `deepseek-v4-pro` when you want the stronger model. `deepseek-chat` and `deepseek-reasoner` remain available as DeepSeek's legacy API aliases.
### Google Gemini via OpenRouter
```bash
@@ -171,13 +169,12 @@ export OPENAI_MODEL=gpt-4o
|----------|----------|-------------|
| `CLAUDE_CODE_USE_OPENAI` | Yes | Set to `1` to enable the OpenAI provider |
| `OPENAI_API_KEY` | Yes* | Your API key (`*` not needed for local models like Ollama or Atomic Chat) |
| `OPENAI_MODEL` | Yes | Model name such as `gpt-4o`, `deepseek-v4-flash`, or `llama3.3:70b` |
| `OPENAI_MODEL` | Yes | Model name such as `gpt-4o`, `deepseek-chat`, or `llama3.3:70b` |
| `OPENAI_BASE_URL` | No | API endpoint, defaulting to `https://api.openai.com/v1` |
| `CODEX_API_KEY` | Codex only | Codex or ChatGPT access token override |
| `CODEX_AUTH_JSON_PATH` | Codex only | Path to a Codex CLI `auth.json` file |
| `CODEX_HOME` | Codex only | Alternative Codex home directory |
| `OPENCLAUDE_DISABLE_CO_AUTHORED_BY` | No | Suppress the default `Co-Authored-By` trailer in generated git commits |
| `OPENCLAUDE_LOG_TOKEN_USAGE` | No | When truthy (e.g. `verbose`), emits one JSON line on stderr per API request with input/output/cache tokens and the resolved provider. **User-facing debug output** — complements the REPL display controlled by `/config showCacheStats`. Distinct from `CLAUDE_CODE_ENABLE_TOKEN_USAGE_ATTACHMENT`, which is **model-facing** (injects context usage info into the prompt itself). Both can run together. |
You can also use `ANTHROPIC_MODEL` to override the model name. `OPENAI_MODEL` takes priority.

View File

@@ -41,13 +41,11 @@ openclaude
export CLAUDE_CODE_USE_OPENAI=1
export OPENAI_API_KEY=sk-your-key-here
export OPENAI_BASE_URL=https://api.deepseek.com/v1
export OPENAI_MODEL=deepseek-v4-flash
export OPENAI_MODEL=deepseek-chat
openclaude
```
Use `deepseek-v4-pro` when you want the stronger model. `deepseek-chat` and `deepseek-reasoner` still work as DeepSeek's legacy API aliases.
### Option C: Ollama
Install Ollama first from:

View File

@@ -41,13 +41,11 @@ openclaude
$env:CLAUDE_CODE_USE_OPENAI="1"
$env:OPENAI_API_KEY="sk-your-key-here"
$env:OPENAI_BASE_URL="https://api.deepseek.com/v1"
$env:OPENAI_MODEL="deepseek-v4-flash"
$env:OPENAI_MODEL="deepseek-chat"
openclaude
```
Use `deepseek-v4-pro` when you want the stronger model. `deepseek-chat` and `deepseek-reasoner` still work as DeepSeek's legacy API aliases.
### Option C: Ollama
Install Ollama first from:

View File

@@ -1,67 +0,0 @@
# Codebase Intelligence — Repo Map
The repo map feature gives the AI model structural awareness of your codebase at the start of each session. Instead of the model needing to explore the repository with `Grep`, `Glob`, and `Read` calls, it starts with a ranked summary of the most important files and their key signatures.
## How it works
1. **File enumeration** — Lists all tracked files via `git ls-files` (falls back to a manual directory walk when not in a git repo)
2. **Symbol extraction** — Parses each supported source file with tree-sitter to extract function, class, type, and interface definitions, plus cross-file references
3. **Reference graph** — Builds a directed graph where an edge from file A to file B means A references a symbol defined in B. Edges are weighted by reference count multiplied by the IDF (inverse document frequency) of the symbol name — common names like `get`, `set`, `value` contribute less
4. **PageRank** — Ranks files by structural importance using PageRank. Files imported by many others rank highest
5. **Rendering** — Walks ranked files top-down, emitting file paths and definition signatures, stopping when the token budget is reached
Results are cached to disk (`~/.openclaude/repomap-cache/`) keyed by file path, mtime, and size. Only changed files are re-parsed on subsequent runs.
## Supported languages
- TypeScript (`.ts`, `.tsx`)
- JavaScript (`.js`, `.jsx`, `.mjs`, `.cjs`)
- Python (`.py`)
Additional language grammars will be added in future releases.
## Enabling auto-injection
The repo map is gated behind the `REPO_MAP` feature flag, **off by default**. To enable auto-injection into the session context:
Set the environment variable before launching:
```bash
REPO_MAP=1 openclaude
```
Or add it to your shell profile for persistent use.
When enabled, the map is built once per session and prepended to the system context alongside git status and CLAUDE.md content. The default budget is 1024 tokens.
Auto-injection is skipped in:
- Bare mode (`--bare`)
- Remote sessions (`CLAUDE_CODE_REMOTE`)
## The /repomap slash command
The `/repomap` command is always available regardless of the feature flag. It lets you inspect and tune the map interactively.
```
/repomap # Show the map with default settings (1024 tokens)
/repomap --tokens 4096 # Increase the token budget for a larger map
/repomap --focus src/tools/ # Boost specific paths in the ranking
/repomap --focus src/context.ts # Can use multiple --focus flags
/repomap --stats # Show cache statistics
/repomap --invalidate # Clear cache and rebuild from scratch
```
## The RepoMap tool
The model can also call the `RepoMap` tool on demand during a session. This is useful when:
- The model needs structural context mid-conversation
- The user asks about specific areas (the model can pass `focus_files` or `focus_symbols`)
- A larger token budget is needed than the auto-injected default
## Known limitations
- **Signatures only** — The map shows function/class/type declarations, not implementations. The model still needs `Read` to see function bodies.
- **Cold build time** — First build on large repos (2000+ files) can take 20-30 seconds due to WASM-based parsing. Subsequent builds use the disk cache and complete in under 100ms.
- **Language coverage** — Only TypeScript, JavaScript, and Python are supported. Files in other languages are skipped.
- **TypeScript references** — The TypeScript tree-sitter query captures type annotations and `new` expressions as references, but not plain function calls. This means the ranking slightly favors type-heavy hub files.
- **Git dependency** — File enumeration uses `git ls-files` by default. Non-git repos fall back to a directory walk with hardcoded exclusions.

View File

@@ -1,7 +1,7 @@
{
"name": "@gitlawb/openclaude",
"version": "0.7.0",
"description": "OpenClaude opens coding-agent workflows to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
"version": "0.6.0",
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
"type": "module",
"bin": {
"openclaude": "./bin/openclaude"
@@ -93,15 +93,11 @@
"fflate": "0.8.2",
"figures": "6.1.0",
"fuse.js": "7.1.0",
"graphology": "^0.26.0",
"graphology-operators": "^1.6.0",
"get-east-asian-width": "1.5.0",
"google-auth-library": "9.15.1",
"https-proxy-agent": "7.0.6",
"ignore": "7.0.5",
"graphology-pagerank": "^1.1.0",
"indent-string": "5.0.0",
"js-tiktoken": "^1.0.16",
"jsonc-parser": "3.3.1",
"lodash-es": "4.18.1",
"lru-cache": "11.2.7",
@@ -121,12 +117,10 @@
"strip-ansi": "7.2.0",
"supports-hyperlinks": "3.2.0",
"tree-kill": "1.2.2",
"tree-sitter-wasms": "^0.1.12",
"turndown": "7.2.2",
"type-fest": "4.41.0",
"undici": "7.24.6",
"usehooks-ts": "3.1.1",
"web-tree-sitter": "^0.25.0",
"vscode-languageserver-protocol": "3.17.5",
"wrap-ansi": "9.0.2",
"ws": "8.20.0",

View File

@@ -34,10 +34,6 @@ const featureFlags: Record<string, boolean> = {
WEB_BROWSER_TOOL: false, // Built-in browser automation (source not mirrored)
CHICAGO_MCP: false, // Computer-use MCP (native Swift modules stubbed)
COWORKER_TYPE_TELEMETRY: false, // Telemetry for agent/coworker type classification
MCP_SKILLS: false, // Dynamic MCP skill discovery (src/skills/mcpSkills.ts not mirrored; enabling this causes "fetchMcpSkillsForClient is not a function" when MCP servers with resources connect — see #856)
// ── Disabled by default, opt-in via runtime env var ─────────────────
REPO_MAP: false, // Auto-injected codebase intelligence repo-map; users opt in with REPO_MAP=1 (the runtime gate in src/context.ts honors the env var even when this flag is false)
// ── Enabled: upstream defaults ──────────────────────────────────────
COORDINATOR_MODE: true, // Multi-agent coordinator with worker delegation
@@ -60,6 +56,7 @@ const featureFlags: Record<string, boolean> = {
EXTRACT_MEMORIES: true, // Auto-extract durable memories from conversations
FORK_SUBAGENT: true, // Implicit context-forking when omitting subagent_type
VERIFICATION_AGENT: true, // Built-in read-only agent for test/verification
MCP_SKILLS: true, // Discover skills dynamically from MCP server resources
PROMPT_CACHE_BREAK_DETECTION: true, // Detect & log unexpected prompt cache invalidations
HOOK_PROMPTS: true, // Allow tools to request interactive user prompts
}

View File

@@ -1,47 +0,0 @@
import { existsSync, readFileSync } from 'fs'
import { join } from 'path'
import { expect, test } from 'bun:test'
// Regression guard for #856. Several build feature flags require source files
// that are not mirrored into the open build. When such a flag is set to `true`
// without the source present, the bundler falls back to a missing-module stub
// that only exports `default`, which causes runtime errors like
// `fetchMcpSkillsForClient is not a function` when downstream code reaches
// through the `require()` to a named export.
//
// This test fails fast at test-time if someone re-enables one of these flags
// without first mirroring the corresponding source file.
const BUILD_SCRIPT = join(import.meta.dir, 'build.ts')
const REPO_ROOT = join(import.meta.dir, '..')
type FlagGuard = {
flag: string
source: string // path relative to repo root
}
const FLAG_REQUIRES_SOURCE: FlagGuard[] = [
{ flag: 'MCP_SKILLS', source: 'src/skills/mcpSkills.ts' },
]
test('build feature flags are not enabled without their source files', () => {
const buildScript = readFileSync(BUILD_SCRIPT, 'utf-8')
for (const { flag, source } of FLAG_REQUIRES_SOURCE) {
const enabledRe = new RegExp(`^\\s*${flag}\\s*:\\s*true\\b`, 'm')
const isEnabled = enabledRe.test(buildScript)
const sourceExists = existsSync(join(REPO_ROOT, source))
if (isEnabled && !sourceExists) {
throw new Error(
`Feature flag ${flag} is enabled in scripts/build.ts, but its required source file "${source}" does not exist. ` +
`Enabling this flag without the source will cause runtime errors (missing named exports from the missing-module stub). ` +
`Either mirror the source file or set ${flag}: false.`,
)
}
// When the source IS present, the flag can be either true or false; either
// is fine. We only care about the "enabled but missing" combination.
expect(true).toBe(true)
}
})

View File

@@ -169,14 +169,6 @@ describe('Web search result count improvements', () => {
expect(content).toMatch(/max_uses:\s*15/)
})
test('codex web search path guarantees a non-empty result body', async () => {
const content = await file(
'tools/WebSearchTool/WebSearchTool.ts',
).text()
expect(content).toContain("results.push('No results found.')")
})
})
// ---------------------------------------------------------------------------

View File

@@ -70,13 +70,13 @@ export async function isBridgeEnabledBlocking(): Promise<boolean> {
export async function getBridgeDisabledReason(): Promise<string | null> {
if (feature('BRIDGE_MODE')) {
if (!isClaudeAISubscriber()) {
return 'Remote Control requires a claude.ai subscription. Run `openclaude auth login` to sign in with your claude.ai account.'
return 'Remote Control requires a claude.ai subscription. Run `claude auth login` to sign in with your claude.ai account.'
}
if (!hasProfileScope()) {
return 'Remote Control requires a full-scope login token. Long-lived tokens (from `openclaude setup-token` or CLAUDE_CODE_OAUTH_TOKEN) are limited to inference-only for security reasons. Run `openclaude auth login` to use Remote Control.'
return 'Remote Control requires a full-scope login token. Long-lived tokens (from `claude setup-token` or CLAUDE_CODE_OAUTH_TOKEN) are limited to inference-only for security reasons. Run `claude auth login` to use Remote Control.'
}
if (!getOauthAccountInfo()?.organizationUuid) {
return 'Unable to determine your organization for Remote Control eligibility. Run `openclaude auth login` to refresh your account information.'
return 'Unable to determine your organization for Remote Control eligibility. Run `claude auth login` to refresh your account information.'
}
if (!(await checkGate_CACHED_OR_BLOCKING('tengu_ccr_bridge'))) {
return 'Remote Control is not yet enabled for your account.'
@@ -166,7 +166,7 @@ export function checkBridgeMinVersion(): string | null {
minVersion: string
}>('tengu_bridge_min_version', { minVersion: '0.0.0' })
if (config.minVersion && lt(MACRO.VERSION, config.minVersion)) {
return `Your version of OpenClaude (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${config.minVersion} or higher is required. Run \`openclaude update\` to update.`
return `Your version of Claude Code (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${config.minVersion} or higher is required. Run \`claude update\` to update.`
}
}
return null

View File

@@ -2248,7 +2248,7 @@ export async function bridgeMain(args: string[]): Promise<void> {
})
// biome-ignore lint/suspicious/noConsole: intentional dialog output
console.log(
`\nClaude Remote Control is launching in spawn mode which lets you create new sessions in this project from OpenClaude on the web or your mobile app. Learn more here: https://code.claude.com/docs/en/remote-control\n\n` +
`\nClaude Remote Control is launching in spawn mode which lets you create new sessions in this project from Claude Code on Web or your Mobile app. Learn more here: https://code.claude.com/docs/en/remote-control\n\n` +
`Spawn mode for this project:\n` +
` [1] same-dir \u2014 sessions share the current directory (default)\n` +
` [2] worktree \u2014 each session gets an isolated git worktree\n\n` +

View File

@@ -147,7 +147,7 @@ export async function getEnvLessBridgeConfig(): Promise<EnvLessBridgeConfig> {
export async function checkEnvLessBridgeMinVersion(): Promise<string | null> {
const cfg = await getEnvLessBridgeConfig()
if (cfg.min_version && lt(MACRO.VERSION, cfg.min_version)) {
return `Your version of OpenClaude (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${cfg.min_version} or higher is required. Run \`openclaude update\` to update.`
return `Your version of Claude Code (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${cfg.min_version} or higher is required. Run \`claude update\` to update.`
}
return null
}

View File

@@ -415,7 +415,7 @@ export async function initReplBridge(
`[bridge:repl] Skipping: ${versionError}`,
true,
)
onStateChange?.('failed', 'run `openclaude update` to upgrade')
onStateChange?.('failed', 'run `claude update` to upgrade')
return null
}
logForDebugging(
@@ -456,7 +456,7 @@ export async function initReplBridge(
const versionError = checkBridgeMinVersion()
if (versionError) {
logBridgeSkip('version_too_old', `[bridge:repl] Skipping: ${versionError}`)
onStateChange?.('failed', 'run `openclaude update` to upgrade')
onStateChange?.('failed', 'run `claude update` to upgrade')
return null
}

View File

@@ -147,7 +147,7 @@ export async function enrollTrustedDevice(): Promise<void> {
device_id?: string
}>(
`${baseUrl}/api/auth/trusted_devices`,
{ display_name: `OpenClaude on ${hostname()} · ${process.platform}` },
{ display_name: `Claude Code on ${hostname()} · ${process.platform}` },
{
headers: {
Authorization: `Bearer ${accessToken}`,

View File

@@ -287,7 +287,7 @@ export async function authStatus(opts: {
}
if (!loggedIn) {
process.stdout.write(
'Not logged in. Run openclaude auth login to authenticate.\n',
'Not logged in. Run claude auth login to authenticate.\n',
)
}
} else {

View File

@@ -83,7 +83,7 @@ export async function autoModeCritiqueHandler(options: {
process.stdout.write(
'No custom auto mode rules found.\n\n' +
'Add rules to your settings file under autoMode.{allow, soft_deny, environment}.\n' +
'Run `openclaude auto-mode defaults` to see the default rules for reference.\n',
'Run `claude auto-mode defaults` to see the default rules for reference.\n',
)
return
}

View File

@@ -233,7 +233,7 @@ export async function mcpRemoveHandler(name: string, options: {
});
process.stderr.write('\nTo remove from a specific scope, use:\n');
scopes.forEach(scope => {
process.stderr.write(` openclaude mcp remove "${name}" -s ${scope}\n`);
process.stderr.write(` claude mcp remove "${name}" -s ${scope}\n`);
});
cliError();
}
@@ -250,7 +250,7 @@ export async function mcpListHandler(): Promise<void> {
} = await getAllMcpConfigs();
if (Object.keys(configs).length === 0) {
// biome-ignore lint/suspicious/noConsole:: intentional console output
console.log('No MCP servers configured. Use `openclaude mcp add` to add a server.');
console.log('No MCP servers configured. Use `claude mcp add` to add a server.');
} else {
// biome-ignore lint/suspicious/noConsole:: intentional console output
console.log('Checking MCP server health...\n');
@@ -374,7 +374,7 @@ export async function mcpGetHandler(name: string): Promise<void> {
}
}
// biome-ignore lint/suspicious/noConsole:: intentional console output
console.log(`\nTo remove this server, run: openclaude mcp remove "${name}" -s ${server.scope}`);
console.log(`\nTo remove this server, run: claude mcp remove "${name}" -s ${server.scope}`);
// Use gracefulShutdown to properly clean up MCP server connections
// (process.exit bypasses cleanup handlers, leaving child processes orphaned)
await gracefulShutdown(0);
@@ -455,5 +455,5 @@ export async function mcpResetChoicesHandler(): Promise<void> {
disabledMcpjsonServers: [],
enableAllProjectMcpServers: false
}));
cliOk('All project-scoped (.mcp.json) server approvals and rejections have been reset.\n' + 'You will be prompted for approval next time you start OpenClaude.');
cliOk('All project-scoped (.mcp.json) server approvals and rejections have been reset.\n' + 'You will be prompted for approval next time you start Claude Code.');
}

View File

@@ -352,7 +352,7 @@ export async function pluginListHandler(options: {
// through to the session section so the failure is visible.
if (inlineLoadErrors.length === 0) {
cliOk(
'No plugins installed. Use `openclaude plugin install` to install a plugin.',
'No plugins installed. Use `claude plugin install` to install a plugin.',
)
}
}

View File

@@ -5026,7 +5026,7 @@ async function loadInitialMessages(
)
if (!parsedSessionId) {
let errorMessage =
'Error: --resume requires a valid session ID when used with --print. Usage: openclaude -p --resume <session-id>'
'Error: --resume requires a valid session ID when used with --print. Usage: claude -p --resume <session-id>'
if (typeof options.resume === 'string') {
errorMessage += `. Session IDs must be in UUID format (e.g., 550e8400-e29b-41d4-a716-446655440000). Provided value "${options.resume}" is not a valid UUID`
}

View File

@@ -35,20 +35,15 @@ export async function update() {
// binary (without it).
if (getAPIProvider() !== 'firstParty') {
writeToStdout(
chalk.yellow(
`Auto-update is not available for third-party provider builds.\n`,
) +
`Current version: ${MACRO.DISPLAY_VERSION}\n\n` +
`To update, reinstall from npm:\n` +
chalk.bold(` npm install -g ${MACRO.PACKAGE_URL}@latest`) + '\n\n' +
`Or, if you built from source, pull and rebuild:\n` +
chalk.bold(' git pull && bun install && bun run build') + '\n',
chalk.yellow('Auto-update is not available for third-party provider builds.\n') +
'To update, pull the latest source from the repository and rebuild:\n' +
' git pull && bun install && bun run build\n',
)
await gracefulShutdown(0)
return
}
logEvent('tengu_update_check', {})
writeToStdout(`Current version: ${MACRO.DISPLAY_VERSION}\n`)
writeToStdout(`Current version: ${MACRO.VERSION}\n`)
const channel = getInitialSettings()?.autoUpdatesChannel ?? 'latest'
writeToStdout(`Checking for updates to ${channel} version...\n`)
@@ -128,14 +123,9 @@ export async function update() {
if (diagnostic.installationType === 'development') {
writeToStdout('\n')
writeToStdout(
chalk.yellow('You are running a development build — auto-update is unavailable.') + '\n',
chalk.yellow('Warning: Cannot update development build') + '\n',
)
writeToStdout('To update, pull the latest source and rebuild:\n')
writeToStdout(chalk.bold(' git pull && bun install && bun run build') + '\n')
writeToStdout('\n')
writeToStdout('Or reinstall from npm:\n')
writeToStdout(chalk.bold(` npm install -g ${MACRO.PACKAGE_URL}@latest`) + '\n')
await gracefulShutdown(0)
await gracefulShutdown(1)
}
// Check if running from a package manager
@@ -146,8 +136,8 @@ export async function update() {
if (packageManager === 'homebrew') {
writeToStdout('Claude is managed by Homebrew.\n')
const latest = await getLatestVersion(channel)
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION}${latest}\n`)
if (latest && !gte(MACRO.VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.VERSION}${latest}\n`)
writeToStdout('\n')
writeToStdout('To update, run:\n')
writeToStdout(chalk.bold(' brew upgrade claude-code') + '\n')
@@ -157,8 +147,8 @@ export async function update() {
} else if (packageManager === 'winget') {
writeToStdout('Claude is managed by winget.\n')
const latest = await getLatestVersion(channel)
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION}${latest}\n`)
if (latest && !gte(MACRO.VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.VERSION}${latest}\n`)
writeToStdout('\n')
writeToStdout('To update, run:\n')
writeToStdout(
@@ -170,8 +160,8 @@ export async function update() {
} else if (packageManager === 'apk') {
writeToStdout('Claude is managed by apk.\n')
const latest = await getLatestVersion(channel)
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION}${latest}\n`)
if (latest && !gte(MACRO.VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.VERSION}${latest}\n`)
writeToStdout('\n')
writeToStdout('To update, run:\n')
writeToStdout(chalk.bold(' apk upgrade claude-code') + '\n')
@@ -260,14 +250,14 @@ export async function update() {
await gracefulShutdown(1)
}
if (result.latestVersion === MACRO.DISPLAY_VERSION) {
if (result.latestVersion === MACRO.VERSION) {
writeToStdout(
chalk.green(`OpenClaude is up to date (${MACRO.DISPLAY_VERSION})`) + '\n',
chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
)
} else {
writeToStdout(
chalk.green(
`Successfully updated from ${MACRO.DISPLAY_VERSION} to version ${result.latestVersion}`,
`Successfully updated from ${MACRO.VERSION} to version ${result.latestVersion}`,
) + '\n',
)
await regenerateCompletionCache()
@@ -276,7 +266,7 @@ export async function update() {
} catch (error) {
process.stderr.write('Error: Failed to install native update\n')
process.stderr.write(String(error) + '\n')
process.stderr.write('Try running "openclaude doctor" for diagnostics\n')
process.stderr.write('Try running "claude doctor" for diagnostics\n')
await gracefulShutdown(1)
}
}
@@ -330,15 +320,15 @@ export async function update() {
}
// Check if versions match exactly, including any build metadata (like SHA)
if (latestVersion === MACRO.DISPLAY_VERSION) {
if (latestVersion === MACRO.VERSION) {
writeToStdout(
chalk.green(`OpenClaude is up to date (${MACRO.DISPLAY_VERSION})`) + '\n',
chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
)
await gracefulShutdown(0)
}
writeToStdout(
`New version available: ${latestVersion} (current: ${MACRO.DISPLAY_VERSION})\n`,
`New version available: ${latestVersion} (current: ${MACRO.VERSION})\n`,
)
writeToStdout('Installing update...\n')
@@ -398,7 +388,7 @@ export async function update() {
case 'success':
writeToStdout(
chalk.green(
`Successfully updated from ${MACRO.DISPLAY_VERSION} to version ${latestVersion}`,
`Successfully updated from ${MACRO.VERSION} to version ${latestVersion}`,
) + '\n',
)
await regenerateCompletionCache()

View File

@@ -21,9 +21,7 @@ import dream from './commands/dream/index.js'
import ctx_viz from './commands/ctx_viz/index.js'
import doctor from './commands/doctor/index.js'
import onboardGithub from './commands/onboard-github/index.js'
import knowledge from './commands/knowledge/index.js'
import memory from './commands/memory/index.js'
import repomap from './commands/repomap/index.js'
import help from './commands/help/index.js'
import ide from './commands/ide/index.js'
import init from './commands/init.js'
@@ -35,7 +33,6 @@ import installGitHubApp from './commands/install-github-app/index.js'
import installSlackApp from './commands/install-slack-app/index.js'
import breakCache from './commands/break-cache/index.js'
import cacheProbe from './commands/cache-probe/index.js'
import cacheStats from './commands/cacheStats/index.js'
import mcp from './commands/mcp/index.js'
import mobile from './commands/mobile/index.js'
import onboarding from './commands/onboarding/index.js'
@@ -200,7 +197,7 @@ import stats from './commands/stats/index.js'
const usageReport: Command = {
type: 'prompt',
name: 'insights',
description: 'Generate a report analyzing your OpenClaude sessions',
description: 'Generate a report analyzing your Claude Code sessions',
contentLength: 0,
progressMessage: 'analyzing your sessions',
source: 'builtin',
@@ -273,7 +270,6 @@ const COMMANDS = memoize((): Command[] => [
branch,
btw,
cacheProbe,
cacheStats,
chrome,
clear,
color,
@@ -296,7 +292,6 @@ const COMMANDS = memoize((): Command[] => [
ide,
init,
keybindings,
knowledge,
installGitHubApp,
installSlackApp,
mcp,
@@ -312,7 +307,6 @@ const COMMANDS = memoize((): Command[] => [
releaseNotes,
reloadPlugins,
rename,
repomap,
resume,
session,
skills,

View File

@@ -3,7 +3,7 @@ import type { Command } from '../../commands.js'
const buddy = {
type: 'local-jsx',
name: 'buddy',
description: 'Hatch, pet, and manage your OpenClaude companion',
description: 'Hatch, pet, and manage your Open Claude companion',
immediate: true,
argumentHint: '[status|mute|unmute|help]',
load: () => import('./buddy.js'),

View File

@@ -1,157 +0,0 @@
/**
* Tests for `/cache-stats` command rendering.
*
* The command has non-trivial string formatting (timestamp slicing, model
* label padding, conditional N/A footnote, recent-rows cap) which can
* silently regress — these snapshot tests keep it honest.
*/
import { beforeEach, describe, expect, test } from 'bun:test'
import type { CacheMetrics } from '../../services/api/cacheMetrics.js'
import {
_setHistoryCapForTesting,
recordRequest,
resetSessionCacheStats,
} from '../../services/api/cacheStatsTracker.js'
import { call } from './cacheStats.js'
function supported(partial: Partial<CacheMetrics>): CacheMetrics {
return {
read: 0,
created: 0,
total: 0,
hitRate: null,
supported: true,
...partial,
}
}
const UNSUPPORTED: CacheMetrics = {
read: 0,
created: 0,
total: 0,
hitRate: null,
supported: false,
}
// The command signature requires a LocalJSXCommandContext. Our command
// doesn't actually read it — we pass an empty stand-in so the test can
// invoke call() without dragging the whole REPL context in.
const EMPTY_CTX = {} as Parameters<typeof call>[1]
// /cache-stats always returns a text result. Narrow the union here so
// the assertions don't need to redo the discriminant check every call.
async function runCommand(): Promise<string> {
const result = await call('', EMPTY_CTX)
if (result.type !== 'text') {
throw new Error(
`cacheStats command must return type:'text', got ${result.type}`,
)
}
return result.value
}
beforeEach(() => {
resetSessionCacheStats()
_setHistoryCapForTesting(500)
})
describe('/cache-stats — empty session', () => {
test('shows friendly "no requests yet" message', async () => {
const value = await runCommand()
expect(value).toContain('No API requests yet this session')
expect(value).toContain('/cache-stats')
})
})
describe('/cache-stats — supported-only session', () => {
test('renders Cache stats header, turn and session summaries', async () => {
recordRequest(
supported({ read: 500, total: 1_000, hitRate: 0.5 }),
'claude-sonnet-4',
)
const value = await runCommand()
expect(value).toContain('Cache stats')
expect(value).toContain('Current turn:')
expect(value).toContain('Session total:')
// Compact metric line should appear in the recent-requests table.
expect(value).toContain('claude-sonnet-4')
expect(value).toContain('read')
})
test('omits the N/A footnote when every row is supported', async () => {
recordRequest(supported({ read: 200, total: 400, hitRate: 0.5 }), 'model-A')
const value = await runCommand()
expect(value).not.toContain('N/A rows')
})
})
describe('/cache-stats — mixed supported + unsupported', () => {
test('renders N/A footnote when any row is unsupported', async () => {
recordRequest(UNSUPPORTED, 'gpt-4-copilot')
recordRequest(
supported({ read: 100, total: 500, hitRate: 0.2 }),
'claude-sonnet-4',
)
const value = await runCommand()
expect(value).toContain(
'N/A rows: provider API does not expose cache usage',
)
expect(value).toContain('GitHub Copilot')
expect(value).toContain('Ollama')
})
})
describe('/cache-stats — recent-rows cap', () => {
test('caps the breakdown at 20 rows and reports omitted count', async () => {
for (let i = 0; i < 25; i++) {
recordRequest(
supported({ read: i, total: 100, hitRate: i / 100 }),
`model-${i}`,
)
}
const value = await runCommand()
// 20 shown, 5 omitted from the oldest end.
expect(value).toContain('(20 of 25, 5 older omitted)')
// Oldest rows (model-0..model-4) should not appear; newest must.
expect(value).toContain('model-24')
expect(value).not.toContain('model-0 ')
})
test('does not mention "older omitted" when all rows fit', async () => {
for (let i = 0; i < 5; i++) {
recordRequest(supported({ read: i, total: 10 }), `m${i}`)
}
const value = await runCommand()
expect(value).not.toContain('older omitted')
expect(value).toContain('(5)')
})
})
describe('/cache-stats — model label rendering', () => {
test('truncates long model labels to fit the column width', async () => {
// cacheStats.ts pads+slices the label to 28 chars for alignment.
const longLabel = 'some-extremely-long-model-identifier-that-wraps'
recordRequest(supported({ read: 10, total: 100, hitRate: 0.1 }), longLabel)
const value = await runCommand()
// Sliced to 28 chars.
expect(value).toContain(longLabel.slice(0, 28))
// And the full string should NOT appear (would mean no truncation).
expect(value).not.toContain(longLabel)
})
})
describe('/cache-stats — timestamp rendering', () => {
test('renders each row with full date and time (YYYY-MM-DD HH:MM:SS)', async () => {
recordRequest(supported({ read: 5, total: 10, hitRate: 0.5 }), 'claude-x')
const value = await runCommand()
// Match the full ISO-ish date + time the row uses. We assert the shape,
// not a specific timestamp — real clock is used, so a regex on the
// format is the right assertion.
expect(value).toMatch(/\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}/)
// Bare time-of-day alone (no date) should NOT appear in isolation — it
// must always be preceded by the date. Guards against regression if
// someone shortens the formatter again.
const timeOnlyInRow = /\n\s*#\s*\d+\s+\d{2}:\d{2}:\d{2}\s/.test(value)
expect(timeOnlyInRow).toBe(false)
})
})

View File

@@ -1,74 +0,0 @@
import {
getCacheStatsHistory,
getCurrentTurnCacheMetrics,
getSessionCacheMetrics,
type CacheStatsEntry,
} from '../../services/api/cacheStatsTracker.js'
import {
formatCacheMetricsCompact,
formatCacheMetricsFull,
type CacheMetrics,
} from '../../services/api/cacheMetrics.js'
import type { LocalCommandCall } from '../../types/command.js'
// Cap the per-request breakdown to keep output readable. Users wanting
// the full history can rely on OPENCLAUDE_LOG_TOKEN_USAGE=verbose for
// structured per-request stderr output.
const MAX_RECENT_ROWS = 20
function formatRow(entry: CacheStatsEntry, idx: number): string {
// `YYYY-MM-DD HH:MM:SS` — long-running sessions can span midnight and a
// bare time-of-day makes the wrong row look "most recent" when two
// entries on different days share the same HH:MM:SS.
const iso = new Date(entry.timestamp).toISOString()
const ts = `${iso.slice(0, 10)} ${iso.slice(11, 19)}`
const line = formatCacheMetricsCompact(entry.metrics)
return ` #${String(idx + 1).padStart(3)} ${ts} ${entry.label.padEnd(28).slice(0, 28)} ${line}`
}
function summarize(label: string, m: CacheMetrics): string {
return `${label.padEnd(18)}${formatCacheMetricsFull(m)}`
}
export const call: LocalCommandCall = async () => {
const history = getCacheStatsHistory()
const session = getSessionCacheMetrics()
const turn = getCurrentTurnCacheMetrics()
if (history.length === 0) {
return {
type: 'text',
value:
'Cache stats\n No API requests yet this session.\n Start a turn and re-run /cache-stats to see results.',
}
}
const recent = history.slice(-MAX_RECENT_ROWS)
const omitted = history.length - recent.length
const lines: string[] = ['Cache stats', '']
lines.push(summarize('Current turn:', turn))
lines.push(summarize('Session total:', session))
lines.push('')
lines.push(`Recent requests (${recent.length}${omitted > 0 ? ` of ${history.length}, ${omitted} older omitted` : ''}):`)
lines.push(` # time model cache`)
for (const [i, entry] of recent.entries()) {
lines.push(formatRow(entry, history.length - recent.length + i))
}
// Honesty footnote — providers without cache reporting (vanilla Copilot,
// Ollama) show [Cache: N/A] rather than a fake 0%. Tell the user so they
// don't read "N/A" as "broken".
const hasUnsupported = recent.some((e) => !e.metrics.supported)
if (hasUnsupported) {
lines.push('')
lines.push(
' N/A rows: provider API does not expose cache usage (GitHub Copilot, Ollama).',
)
lines.push(
' The request still ran normally — only the metric is unavailable.',
)
}
return { type: 'text', value: lines.join('\n') }
}

View File

@@ -1,24 +0,0 @@
/**
* /cache-stats — per-session cache diagnostics.
*
* Always-on diagnostic command (no toggle) that surfaces the metrics
* tracked in `cacheStatsTracker.ts`. Breaks cache usage down by request
* and also reports the session-wide aggregate — useful when the user
* suspects a cache bust (e.g. after /reload-plugins) and wants to see
* whether recent turns still hit the cache.
*
* Lazy-loaded (implementation in cacheStats.ts) to keep startup time
* minimal — same pattern used by /cost and /cache-probe.
*/
import type { Command } from '../../commands.js'
const cacheStats = {
type: 'local',
name: 'cache-stats',
description:
'Show per-turn and session cache hit/miss stats (works across all providers)',
supportsNonInteractive: true,
load: () => import('./cacheStats.js'),
} satisfies Command
export default cacheStats

View File

@@ -197,7 +197,7 @@ function ClaudeInChromeMenu(t0) {
}
let t6;
if ($[20] === Symbol.for("react.memo_cache_sentinel")) {
t6 = <Text>Claude in Chrome works with the Chrome extension to let you control your browser directly from OpenClaude. Navigate websites, fill forms, capture screenshots, record GIFs, and debug with console logs and network requests.</Text>;
t6 = <Text>Claude in Chrome works with the Chrome extension to let you control your browser directly from Claude Code. Navigate websites, fill forms, capture screenshots, record GIFs, and debug with console logs and network requests.</Text>;
$[20] = t6;
} else {
t6 = $[20];

View File

@@ -48,7 +48,7 @@ export function createMovedToPluginCommand({
text: `This command has been moved to a plugin. Tell the user:
1. To install the plugin, run:
openclaude plugin install ${pluginName}@claude-code-marketplace
claude plugin install ${pluginName}@claude-code-marketplace
2. After installation, use /${pluginName}:${pluginCommand} to run this command

View File

@@ -3,7 +3,7 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
const doctor: Command = {
name: 'doctor',
description: 'Diagnose and verify your OpenClaude installation and settings',
description: 'Diagnose and verify your Claude Code installation and settings',
isEnabled: () => !isEnvTruthy(process.env.DISABLE_DOCTOR_COMMAND),
type: 'local-jsx',
load: () => import('./doctor.js'),

View File

@@ -7,7 +7,7 @@ const feedback = {
aliases: ['bug'],
type: 'local-jsx',
name: 'feedback',
description: `Submit feedback about OpenClaude`,
description: `Submit feedback about Claude Code`,
argumentHint: '[report]',
isEnabled: () =>
!(

View File

@@ -247,7 +247,7 @@ function getSessionMetaDir(): string {
return join(getDataDir(), 'session-meta')
}
const FACET_EXTRACTION_PROMPT = `Analyze this OpenClaude session and extract structured facets.
const FACET_EXTRACTION_PROMPT = `Analyze this Claude Code session and extract structured facets.
CRITICAL GUIDELINES:
@@ -687,7 +687,7 @@ function formatTranscriptForFacets(log: LogOption): string {
return lines.join('\n')
}
const SUMMARIZE_CHUNK_PROMPT = `Summarize this portion of a OpenClaude session transcript. Focus on:
const SUMMARIZE_CHUNK_PROMPT = `Summarize this portion of a Claude Code session transcript. Focus on:
1. What the user asked for
2. What Claude did (tools used, files modified)
3. Any friction or issues
@@ -1156,12 +1156,12 @@ type InsightSection = {
const INSIGHT_SECTIONS: InsightSection[] = [
{
name: 'project_areas',
prompt: `Analyze this OpenClaude usage data and identify project areas.
prompt: `Analyze this Claude Code usage data and identify project areas.
RESPOND WITH ONLY A VALID JSON OBJECT:
{
"areas": [
{"name": "Area name", "session_count": N, "description": "2-3 sentences about what was worked on and how OpenClaude was used."}
{"name": "Area name", "session_count": N, "description": "2-3 sentences about what was worked on and how Claude Code was used."}
]
}
@@ -1170,18 +1170,18 @@ Include 4-5 areas. Skip internal CC operations.`,
},
{
name: 'interaction_style',
prompt: `Analyze this OpenClaude usage data and describe the user's interaction style.
prompt: `Analyze this Claude Code usage data and describe the user's interaction style.
RESPOND WITH ONLY A VALID JSON OBJECT:
{
"narrative": "2-3 paragraphs analyzing HOW the user interacts with OpenClaude. Use second person 'you'. Describe patterns: iterate quickly vs detailed upfront specs? Interrupt often or let Claude run? Include specific examples. Use **bold** for key insights.",
"narrative": "2-3 paragraphs analyzing HOW the user interacts with Claude Code. Use second person 'you'. Describe patterns: iterate quickly vs detailed upfront specs? Interrupt often or let Claude run? Include specific examples. Use **bold** for key insights.",
"key_pattern": "One sentence summary of most distinctive interaction style"
}`,
maxTokens: 8192,
},
{
name: 'what_works',
prompt: `Analyze this OpenClaude usage data and identify what's working well for this user. Use second person ("you").
prompt: `Analyze this Claude Code usage data and identify what's working well for this user. Use second person ("you").
RESPOND WITH ONLY A VALID JSON OBJECT:
{
@@ -1196,7 +1196,7 @@ Include 3 impressive workflows.`,
},
{
name: 'friction_analysis',
prompt: `Analyze this OpenClaude usage data and identify friction points for this user. Use second person ("you").
prompt: `Analyze this Claude Code usage data and identify friction points for this user. Use second person ("you").
RESPOND WITH ONLY A VALID JSON OBJECT:
{
@@ -1211,7 +1211,7 @@ Include 3 friction categories with 2 examples each.`,
},
{
name: 'suggestions',
prompt: `Analyze this OpenClaude usage data and suggest improvements.
prompt: `Analyze this Claude Code usage data and suggest improvements.
## CC FEATURES REFERENCE (pick from these for features_to_try):
1. **MCP Servers**: Connect Claude to external tools, databases, and APIs via Model Context Protocol.
@@ -1254,7 +1254,7 @@ IMPORTANT for features_to_try: Pick 2-3 from the CC FEATURES REFERENCE above. In
},
{
name: 'on_the_horizon',
prompt: `Analyze this OpenClaude usage data and identify future opportunities.
prompt: `Analyze this Claude Code usage data and identify future opportunities.
RESPOND WITH ONLY A VALID JSON OBJECT:
{
@@ -1271,7 +1271,7 @@ Include 3 opportunities. Think BIG - autonomous workflows, parallel agents, iter
? [
{
name: 'cc_team_improvements',
prompt: `Analyze this OpenClaude usage data and suggest product improvements for the CC team.
prompt: `Analyze this Claude Code usage data and suggest product improvements for the CC team.
RESPOND WITH ONLY A VALID JSON OBJECT:
{
@@ -1285,7 +1285,7 @@ Include 2-3 improvements based on friction patterns observed.`,
},
{
name: 'model_behavior_improvements',
prompt: `Analyze this OpenClaude usage data and suggest model behavior improvements.
prompt: `Analyze this Claude Code usage data and suggest model behavior improvements.
RESPOND WITH ONLY A VALID JSON OBJECT:
{
@@ -1301,7 +1301,7 @@ Include 2-3 improvements based on friction patterns observed.`,
: []),
{
name: 'fun_ending',
prompt: `Analyze this OpenClaude usage data and find a memorable moment.
prompt: `Analyze this Claude Code usage data and find a memorable moment.
RESPOND WITH ONLY A VALID JSON OBJECT:
{
@@ -1555,7 +1555,7 @@ async function generateParallelInsights(
.join('\n') || ''
// Now generate "At a Glance" with access to other sections' outputs
const atAGlancePrompt = `You're writing an "At a Glance" summary for a OpenClaude usage insights report for OpenClaude users. The goal is to help them understand their usage and improve how they can use Claude better, especially as models improve.
const atAGlancePrompt = `You're writing an "At a Glance" summary for a Claude Code usage insights report for Claude Code users. The goal is to help them understand their usage and improve how they can use Claude better, especially as models improve.
Use this 4-part structure:
@@ -1563,7 +1563,7 @@ Use this 4-part structure:
2. **What's hindering you** - Split into (a) Claude's fault (misunderstandings, wrong approaches, bugs) and (b) user-side friction (not providing enough context, environment issues -- ideally more general than just one project). Be honest but constructive.
3. **Quick wins to try** - Specific OpenClaude features they could try from the examples below, or a workflow technique if you think it's really compelling. (Avoid stuff like "Ask Claude to confirm before taking actions" or "Type out more context up front" which are less compelling.)
3. **Quick wins to try** - Specific Claude Code features they could try from the examples below, or a workflow technique if you think it's really compelling. (Avoid stuff like "Ask Claude to confirm before taking actions" or "Type out more context up front" which are less compelling.)
4. **Ambitious workflows for better models** - As we move to much more capable models over the next 3-6 months, what should they prepare for? What workflows that seem impossible now will become possible? Draw from the appropriate section below.
@@ -1826,7 +1826,7 @@ function generateHtmlReport(
const interactionStyle = insights.interaction_style
const interactionHtml = interactionStyle?.narrative
? `
<h2 id="section-usage">How You Use OpenClaude</h2>
<h2 id="section-usage">How You Use Claude Code</h2>
<div class="narrative">
${markdownToHtml(interactionStyle.narrative)}
${interactionStyle.key_pattern ? `<div class="key-insight"><strong>Key pattern:</strong> ${escapeHtml(interactionStyle.key_pattern)}</div>` : ''}
@@ -1890,7 +1890,7 @@ function generateHtmlReport(
<h2 id="section-features">Existing CC Features to Try</h2>
<div class="claude-md-section">
<h3>Suggested CLAUDE.md Additions</h3>
<p style="font-size: 12px; color: #64748b; margin-bottom: 12px;">Just copy this into OpenClaude to add it to your CLAUDE.md.</p>
<p style="font-size: 12px; color: #64748b; margin-bottom: 12px;">Just copy this into Claude Code to add it to your CLAUDE.md.</p>
<div class="claude-md-actions">
<button class="copy-all-btn" onclick="copyAllCheckedClaudeMd()">Copy All Checked</button>
</div>
@@ -1915,7 +1915,7 @@ function generateHtmlReport(
${
suggestions.features_to_try && suggestions.features_to_try.length > 0
? `
<p style="font-size: 13px; color: #64748b; margin-bottom: 12px;">Just copy this into OpenClaude and it'll set it up for you.</p>
<p style="font-size: 13px; color: #64748b; margin-bottom: 12px;">Just copy this into Claude Code and it'll set it up for you.</p>
<div class="features-section">
${suggestions.features_to_try
.map(
@@ -1949,8 +1949,8 @@ function generateHtmlReport(
${
suggestions.usage_patterns && suggestions.usage_patterns.length > 0
? `
<h2 id="section-patterns">New Ways to Use OpenClaude</h2>
<p style="font-size: 13px; color: #64748b; margin-bottom: 12px;">Just copy this into OpenClaude and it'll walk you through it.</p>
<h2 id="section-patterns">New Ways to Use Claude Code</h2>
<p style="font-size: 13px; color: #64748b; margin-bottom: 12px;">Just copy this into Claude Code and it'll walk you through it.</p>
<div class="patterns-section">
${suggestions.usage_patterns
.map(
@@ -1963,7 +1963,7 @@ function generateHtmlReport(
pat.copyable_prompt
? `
<div class="copyable-prompt-section">
<div class="prompt-label">Paste into OpenClaude:</div>
<div class="prompt-label">Paste into Claude Code:</div>
<div class="copyable-prompt-row">
<code class="copyable-prompt">${escapeHtml(pat.copyable_prompt)}</code>
<button class="copy-btn" onclick="copyText(this)">Copy</button>
@@ -1998,7 +1998,7 @@ function generateHtmlReport(
<div class="horizon-title">${escapeHtml(opp.title || '')}</div>
<div class="horizon-possible">${escapeHtml(opp.whats_possible || '')}</div>
${opp.how_to_try ? `<div class="horizon-tip"><strong>Getting started:</strong> ${escapeHtml(opp.how_to_try)}</div>` : ''}
${opp.copyable_prompt ? `<div class="pattern-prompt"><div class="prompt-label">Paste into OpenClaude:</div><code>${escapeHtml(opp.copyable_prompt)}</code><button class="copy-btn" onclick="copyText(this)">Copy</button></div>` : ''}
${opp.copyable_prompt ? `<div class="pattern-prompt"><div class="prompt-label">Paste into Claude Code:</div><code>${escapeHtml(opp.copyable_prompt)}</code><button class="copy-btn" onclick="copyText(this)">Copy</button></div>` : ''}
</div>
`,
)
@@ -2305,13 +2305,13 @@ function generateHtmlReport(
<html>
<head>
<meta charset="utf-8">
<title>OpenClaude Insights</title>
<title>Claude Code Insights</title>
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap" rel="stylesheet">
<style>${css}</style>
</head>
<body>
<div class="container">
<h1>OpenClaude Insights</h1>
<h1>Claude Code Insights</h1>
<p class="subtitle">${data.total_messages.toLocaleString()} messages across ${data.total_sessions} sessions${data.total_sessions_scanned && data.total_sessions_scanned > data.total_sessions ? ` (${data.total_sessions_scanned.toLocaleString()} total)` : ''} | ${data.date_range.start} to ${data.date_range.end}</p>
${atAGlanceHtml}
@@ -2377,7 +2377,7 @@ function generateHtmlReport(
data.multi_clauding.overlap_events === 0
? `
<p style="font-size: 14px; color: #64748b; padding: 8px 0;">
No parallel session usage detected. You typically work with one OpenClaude session at a time.
No parallel session usage detected. You typically work with one Claude Code session at a time.
</p>
`
: `
@@ -2396,7 +2396,7 @@ function generateHtmlReport(
</div>
</div>
<p style="font-size: 13px; color: #475569; margin-top: 12px;">
You run multiple OpenClaude sessions simultaneously. Multi-clauding is detected when sessions
You run multiple Claude Code sessions simultaneously. Multi-clauding is detected when sessions
overlap in time, suggesting parallel workflows.
</p>
`
@@ -2836,7 +2836,7 @@ function safeKeys(obj: Record<string, unknown> | undefined | null): string[] {
const usageReport: Command = {
type: 'prompt',
name: 'insights',
description: 'Generate a report analyzing your OpenClaude sessions',
description: 'Generate a report analyzing your Claude Code sessions',
contentLength: 0, // Dynamic content
progressMessage: 'analyzing your sessions',
source: 'builtin',
@@ -2874,7 +2874,7 @@ ${atAGlance.quick_wins ? `**Quick wins to try:** ${atAGlance.quick_wins} See _Fe
${atAGlance.ambitious_workflows ? `**Ambitious workflows:** ${atAGlance.ambitious_workflows} See _On the Horizon_.` : ''}`
: '_No insights generated_'
const header = `# OpenClaude Insights
const header = `# Claude Code Insights
${stats}
${data.date_range.start} to ${data.date_range.end}
@@ -2888,7 +2888,7 @@ Your full shareable insights report is ready: ${reportUrl}${uploadHint}`
return [
{
type: 'text',
text: `The user just ran /insights to generate a usage report analyzing their OpenClaude sessions.
text: `The user just ran /insights to generate a usage report analyzing their Claude Code sessions.
Here is the full insights data:
${jsonStringify(insights, null, 2)}

View File

@@ -210,12 +210,12 @@ function Install({
useEffect(() => {
if (state.type === 'success') {
// Give success message time to render before exiting
setTimeout(onDone, 2000, 'OpenClaude installation completed successfully', {
setTimeout(onDone, 2000, 'Claude Code installation completed successfully', {
display: 'system' as const
});
} else if (state.type === 'error') {
// Give error message time to render before exiting
setTimeout(onDone, 3000, 'OpenClaude installation failed', {
setTimeout(onDone, 3000, 'Claude Code installation failed', {
display: 'system' as const
});
}
@@ -226,7 +226,7 @@ function Install({
{state.type === 'cleaning-npm' && <Text color="warning">Cleaning up old npm installations...</Text>}
{state.type === 'installing' && <Text color="claude">
Installing OpenClaude native build {state.version}...
Installing Claude Code native build {state.version}...
</Text>}
{state.type === 'setting-up' && <Text color="claude">Setting up launcher and shell integration...</Text>}
@@ -237,7 +237,7 @@ function Install({
<Box>
<StatusIcon status="success" withSpace />
<Text color="success" bold>
OpenClaude successfully installed!
Claude Code successfully installed!
</Text>
</Box>
<Box marginLeft={2} flexDirection="column" gap={1}>
@@ -254,7 +254,7 @@ function Install({
<Box marginTop={1}>
<Text dimColor>Next: Run </Text>
<Text color="claude" bold>
openclaude --help
claude --help
</Text>
<Text dimColor> to get started</Text>
</Box>
@@ -279,7 +279,7 @@ function Install({
export const install = {
type: 'local-jsx' as const,
name: 'install',
description: 'Install OpenClaude native build',
description: 'Install Claude Code native build',
argumentHint: '[options]',
async call(onDone: (result: string, options?: {
display?: CommandResultDisplay;

View File

@@ -1,12 +0,0 @@
import type { Command } from '../../commands.js'
const knowledge: Command = {
type: 'local',
name: 'knowledge',
description: 'Manage native Knowledge Graph',
supportsNonInteractive: true,
argumentHint: 'enable <yes|no> | clear | status | list',
load: () => import('./knowledge.js'),
}
export default knowledge

View File

@@ -1,74 +0,0 @@
import { describe, expect, it, beforeEach } from 'bun:test'
import { call as knowledgeCall } from './knowledge.js'
import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js'
import { getArc, addEntity, resetArc } from '../../utils/conversationArc.js'
import { getGlobalGraph, resetGlobalGraph } from '../../utils/knowledgeGraph.js'
describe('knowledge command', () => {
const mockContext = {} as any
beforeEach(() => {
resetArc()
resetGlobalGraph()
})
const knowledgeCallWithCapture = async (args: string) => {
const result = await knowledgeCall(args, mockContext)
if (result.type === 'text') {
return result.value
}
return ''
}
beforeEach(() => {
// Attempt to reset config - even if mocked, we try to set our key
try {
saveGlobalConfig(current => ({
...current,
knowledgeGraphEnabled: true
}))
} catch {
// Ignore if config is heavily mocked
}
resetArc()
})
it('enables and disables knowledge graph engine', async () => {
// Test Disable
const res1 = await knowledgeCallWithCapture('enable no')
expect(res1.toLowerCase()).toContain('disabled')
// Safety check: only verify state if property is actually present (avoid CI mock interference)
const config1 = getGlobalConfig()
if (config1 && 'knowledgeGraphEnabled' in config1) {
expect(config1.knowledgeGraphEnabled).toBe(false)
}
// Test Enable
const res2 = await knowledgeCallWithCapture('enable yes')
expect(res2.toLowerCase()).toContain('enabled')
const config2 = getGlobalConfig()
if (config2 && 'knowledgeGraphEnabled' in config2) {
expect(config2.knowledgeGraphEnabled).toBe(true)
}
})
it('clears the knowledge graph', async () => {
// Add a fact first
addEntity('test', 'fact')
const graph = getGlobalGraph()
expect(Object.keys(graph.entities).length).toBe(1)
// Clear it
const res = await knowledgeCallWithCapture('clear')
const graphAfter = getGlobalGraph()
expect(Object.keys(graphAfter.entities).length).toBe(0)
expect(res.toLowerCase()).toContain('cleared')
})
it('shows error on unknown subcommand', async () => {
const res = await knowledgeCallWithCapture('invalid')
expect(res.toLowerCase()).toContain('unknown subcommand')
})
})

View File

@@ -1,63 +0,0 @@
import type { LocalCommandCall } from '../../types/command.js';
import { getArcSummary, resetArc, getArcStats } from '../../utils/conversationArc.js';
import { getGlobalGraph, resetGlobalGraph } from '../../utils/knowledgeGraph.js';
import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js';
import chalk from 'chalk';
export const call: LocalCommandCall = async (args, _context) => {
const arg = (args ? String(args) : '').trim().toLowerCase();
const splitArgs = arg.split(/\s+/).filter(Boolean);
const subCommand = splitArgs[0];
if (!subCommand || subCommand === 'status') {
const config = getGlobalConfig();
const stats = getArcStats();
const graph = getGlobalGraph();
const entityCount = Object.keys(graph.entities).length;
const statusText = (config.knowledgeGraphEnabled !== false)
? chalk.green('ENABLED')
: chalk.red('DISABLED');
let output = `${chalk.bold('Knowledge Graph Engine')}: ${statusText}\n`;
if (stats) {
output += `• Stats: ${stats.goalCount} goals, ${stats.milestoneCount} milestones, ${entityCount} technical facts learned`;
}
return { type: 'text', value: output };
}
if (subCommand === 'enable') {
const val = splitArgs[1];
const isEnabled = val === 'yes' || val === 'true';
const isDisabled = val === 'no' || val === 'false';
if (!isEnabled && !isDisabled) {
return { type: 'text', value: 'Usage: /knowledge enable <yes|no>' };
}
saveGlobalConfig(current => ({ ...current, knowledgeGraphEnabled: isEnabled }));
return {
type: 'text',
value: `✨ Knowledge Graph engine ${isEnabled ? chalk.green('enabled') : chalk.red('disabled')}.`
};
}
if (subCommand === 'clear') {
resetArc();
resetGlobalGraph();
return {
type: 'text',
value: '🗑️ Knowledge graph memory has been cleared for this session.'
};
}
if (subCommand === 'list') {
return { type: 'text', value: getArcSummary() };
}
return {
type: 'text',
value: `Unknown subcommand: ${subCommand}. Available: enable, clear, status, list`
};
};

View File

@@ -34,16 +34,16 @@ export function registerMcpAddCommand(mcp: Command): void {
mcp
.command('add <name> <commandOrUrl> [args...]')
.description(
'Add an MCP server to OpenClaude.\n\n' +
'Add an MCP server to Claude Code.\n\n' +
'Examples:\n' +
' # Add HTTP server:\n' +
' openclaude mcp add --transport http sentry https://mcp.sentry.dev/mcp\n\n' +
' claude mcp add --transport http sentry https://mcp.sentry.dev/mcp\n\n' +
' # Add HTTP server with headers:\n' +
' openclaude mcp add --transport http corridor https://app.corridor.dev/api/mcp --header "Authorization: Bearer ..."\n\n' +
' claude mcp add --transport http corridor https://app.corridor.dev/api/mcp --header "Authorization: Bearer ..."\n\n' +
' # Add stdio server with environment variables:\n' +
' openclaude mcp add -e API_KEY=xxx my-server -- npx my-mcp-server\n\n' +
' claude mcp add -e API_KEY=xxx my-server -- npx my-mcp-server\n\n' +
' # Add stdio server with subprocess flags:\n' +
' openclaude mcp add my-server -- my-command --some-flag arg1',
' claude mcp add my-server -- my-command --some-flag arg1',
)
.option(
'-s, --scope <scope>',
@@ -75,7 +75,7 @@ export function registerMcpAddCommand(mcp: Command): void {
.addOption(
new Option(
'--xaa',
"Enable XAA (SEP-990) for this server. Requires 'openclaude mcp xaa setup' first. Also requires --client-id and --client-secret (for the MCP server's AS).",
"Enable XAA (SEP-990) for this server. Requires 'claude mcp xaa setup' first. Also requires --client-id and --client-secret (for the MCP server's AS).",
).hideHelp(!isXaaEnabled()),
)
.action(async (name, commandOrUrl, args, options) => {
@@ -87,12 +87,12 @@ export function registerMcpAddCommand(mcp: Command): void {
if (!name) {
cliError(
'Error: Server name is required.\n' +
'Usage: openclaude mcp add <name> <command> [args...]',
'Usage: claude mcp add <name> <command> [args...]',
)
} else if (!actualCommand) {
cliError(
'Error: Command is required when server name is provided.\n' +
'Usage: openclaude mcp add <name> <command> [args...]',
'Usage: claude mcp add <name> <command> [args...]',
)
}
@@ -113,7 +113,7 @@ export function registerMcpAddCommand(mcp: Command): void {
if (!options.clientSecret) missing.push('--client-secret')
if (!getXaaIdpSettings()) {
missing.push(
"'openclaude mcp xaa setup' (settings.xaaIdp not configured)",
"'claude mcp xaa setup' (settings.xaaIdp not configured)",
)
}
if (missing.length) {
@@ -254,10 +254,10 @@ export function registerMcpAddCommand(mcp: Command): void {
`\nWarning: The command "${actualCommand}" looks like a URL, but is being interpreted as a stdio server as --transport was not specified.\n`,
)
process.stderr.write(
`If this is an HTTP server, use: openclaude mcp add --transport http ${name} ${actualCommand}\n`,
`If this is an HTTP server, use: claude mcp add --transport http ${name} ${actualCommand}\n`,
)
process.stderr.write(
`If this is an SSE server, use: openclaude mcp add --transport sse ${name} ${actualCommand}\n`,
`If this is an SSE server, use: claude mcp add --transport sse ${name} ${actualCommand}\n`,
)
}

View File

@@ -170,7 +170,7 @@ export function registerMcpXaaIdpCommand(mcp: Command): void {
const idp = getXaaIdpSettings()
if (!idp) {
return cliError(
"Error: no XAA IdP connection. Run 'openclaude mcp xaa setup' first.",
"Error: no XAA IdP connection. Run 'claude mcp xaa setup' first.",
)
}
@@ -235,7 +235,7 @@ export function registerMcpXaaIdpCommand(mcp: Command): void {
`Client secret: ${hasSecret ? '(stored in keychain)' : '(not set — PKCE-only)'}\n`,
)
process.stdout.write(
`Logged in: ${hasIdToken ? 'yes (id_token cached)' : "no — run 'openclaude mcp xaa login'"}\n`,
`Logged in: ${hasIdToken ? 'yes (id_token cached)' : "no — run 'claude mcp xaa login'"}\n`,
)
cliOk()
})

View File

@@ -6,7 +6,7 @@ export default {
type: 'local-jsx',
name: 'model',
get description() {
return `Set the AI model for OpenClaude (currently ${renderModelName(getMainLoopModel())})`
return `Set the AI model for Claude Code (currently ${renderModelName(getMainLoopModel())})`
},
argumentHint: '[model]',
get immediate() {

View File

@@ -713,7 +713,7 @@ function EmptyStateMessage(t0) {
{
let t1;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
t1 = <><Text dimColor={true}>Git is required to install marketplaces.</Text><Text dimColor={true}>Please install git and restart OpenClaude.</Text></>;
t1 = <><Text dimColor={true}>Git is required to install marketplaces.</Text><Text dimColor={true}>Please install git and restart Claude Code.</Text></>;
$[0] = t1;
} else {
t1 = $[0];

View File

@@ -3,7 +3,7 @@ const plugin = {
type: 'local-jsx',
name: 'plugin',
aliases: ['plugins', 'marketplace'],
description: 'Manage OpenClaude plugins',
description: 'Manage Claude Code plugins',
immediate: true,
load: () => import('./plugin.js')
} satisfies Command;

View File

@@ -11,7 +11,6 @@ import {
buildCodexOAuthProfileEnv,
buildCurrentProviderSummary,
buildProfileSaveMessage,
buildProviderManagerCompletion,
getProviderWizardDefaults,
ProviderWizard,
TextEntryDialog,
@@ -265,32 +264,6 @@ test('wizard step remount prevents a typed API key from leaking into the next fi
expect(output).not.toContain('sk-secret-12345678')
})
test('buildProviderManagerCompletion records provider switch event and model-visible reminder', () => {
const completion = buildProviderManagerCompletion({
action: 'activated',
activeProviderName: 'Sadaf Provider',
activeProviderModel: 'sadaf-model',
message: 'Provider switched to Sadaf Provider (sadaf-model)',
})
expect(completion.message).toBe(
'Provider switched to Sadaf Provider (sadaf-model)',
)
expect(completion.metaMessages).toEqual([
'<system-reminder>Provider switched mid-session to Sadaf Provider using model sadaf-model. Use this provider/model for subsequent requests unless the user switches again.</system-reminder>',
])
})
test('buildProviderManagerCompletion skips provider reminder when manager is cancelled', () => {
const completion = buildProviderManagerCompletion({
action: 'cancelled',
message: 'Provider manager closed',
})
expect(completion.message).toBe('Provider manager closed')
expect(completion.metaMessages).toBeUndefined()
})
test('buildProfileSaveMessage maps provider fields without echoing secrets', () => {
const message = buildProfileSaveMessage(
'openai',

View File

@@ -2,10 +2,7 @@ import * as React from 'react'
import type { LocalJSXCommandCall, LocalJSXCommandOnDone } from '../../types/command.js'
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js'
import {
ProviderManager,
type ProviderManagerResult,
} from '../../components/ProviderManager.js'
import { ProviderManager } from '../../components/ProviderManager.js'
import TextInput from '../../components/TextInput.js'
import {
Select,
@@ -73,29 +70,6 @@ import {
type OllamaGenerationReadiness,
} from '../../utils/providerDiscovery.js'
export function buildProviderManagerCompletion(result?: ProviderManagerResult): {
message: string
metaMessages?: string[]
} {
const message =
result?.message ??
(result?.action === 'saved'
? 'Provider profile updated'
: 'Provider manager closed')
const metaMessages =
result?.action === 'activated' && result.activeProviderName
? [
`<system-reminder>Provider switched mid-session to ${result.activeProviderName}${
result.activeProviderModel
? ` using model ${result.activeProviderModel}`
: ''
}. Use this provider/model for subsequent requests unless the user switches again.</system-reminder>`,
]
: undefined
return { message, metaMessages }
}
function describeOllamaReadinessIssue(
readiness: OllamaGenerationReadiness,
options?: {
@@ -1729,8 +1703,13 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
<ProviderManager
mode="manage"
onDone={result => {
const { message, metaMessages } = buildProviderManagerCompletion(result)
onDone(message, { display: 'system', metaMessages })
const message =
result?.message ??
(result?.action === 'saved'
? 'Provider profile updated'
: 'Provider manager closed')
onDone(message, { display: 'system' })
}}
/>
)

View File

@@ -6,7 +6,7 @@ const web = {
type: 'local-jsx',
name: 'web-setup',
description:
'Setup OpenClaude on the web (requires connecting your GitHub account)',
'Setup Claude Code on the web (requires connecting your GitHub account)',
availability: ['claude-ai'],
isEnabled: () =>
getFeatureValue_CACHED_MAY_BE_STALE('tengu_cobalt_lantern', false) &&

View File

@@ -1,17 +0,0 @@
/**
* /repomap command - minimal metadata only.
* Implementation is lazy-loaded from repomap.ts to reduce startup time.
*/
import type { Command } from '../../commands.js'
const repomap = {
type: 'local',
name: 'repomap',
description:
'Show or configure the repository structural map (codebase intelligence)',
isHidden: false,
supportsNonInteractive: true,
load: () => import('./repomap.js'),
} satisfies Command
export default repomap

View File

@@ -1,56 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { parseArgs } from './repomap.js'
describe('/repomap argument parsing', () => {
test('defaults to 1024 tokens with no flags', () => {
const result = parseArgs('')
expect(result.tokens).toBe(2048)
expect(result.focus).toEqual([])
expect(result.invalidate).toBe(false)
expect(result.stats).toBe(false)
})
test('parses --tokens flag', () => {
const result = parseArgs('--tokens 4096')
expect(result.tokens).toBe(4096)
})
test('rejects --tokens below 256', () => {
const result = parseArgs('--tokens 100')
expect(result.tokens).toBe(2048) // falls back to default
})
test('rejects --tokens above 16384', () => {
const result = parseArgs('--tokens 20000')
expect(result.tokens).toBe(2048) // falls back to default
})
test('parses --focus flag', () => {
const result = parseArgs('--focus src/tools/')
expect(result.focus).toEqual(['src/tools/'])
})
test('parses multiple --focus flags', () => {
const result = parseArgs('--focus src/tools/ --focus src/context.ts')
expect(result.focus).toEqual(['src/tools/', 'src/context.ts'])
})
test('parses --invalidate flag', () => {
const result = parseArgs('--invalidate')
expect(result.invalidate).toBe(true)
expect(result.stats).toBe(false)
})
test('parses --stats flag', () => {
const result = parseArgs('--stats')
expect(result.stats).toBe(true)
expect(result.invalidate).toBe(false)
})
test('parses combined flags', () => {
const result = parseArgs('--tokens 2048 --focus src/tools/ --invalidate')
expect(result.tokens).toBe(2048)
expect(result.focus).toEqual(['src/tools/'])
expect(result.invalidate).toBe(true)
})
})

View File

@@ -1,93 +0,0 @@
import type { LocalCommandCall } from '../../types/command.js'
import { getCwd } from '../../utils/cwd.js'
/** Parse CLI-style arguments from the command string. */
export function parseArgs(args: string): {
tokens: number
focus: string[]
invalidate: boolean
stats: boolean
} {
const parts = args.trim().split(/\s+/).filter(Boolean)
let tokens = 2048
const focus: string[] = []
let invalidate = false
let stats = false
for (let i = 0; i < parts.length; i++) {
const part = parts[i]!
if (part === '--tokens' && i + 1 < parts.length) {
const n = parseInt(parts[i + 1]!, 10)
if (!isNaN(n) && n >= 256 && n <= 16384) {
tokens = n
}
i++
} else if (part === '--focus' && i + 1 < parts.length) {
focus.push(parts[i + 1]!)
i++
} else if (part === '--invalidate') {
invalidate = true
} else if (part === '--stats') {
stats = true
}
}
return { tokens, focus, invalidate, stats }
}
export const call: LocalCommandCall = async (args) => {
const root = getCwd()
const { tokens, focus, invalidate, stats } = parseArgs(args ?? '')
// Lazy import to avoid loading tree-sitter at startup
const {
buildRepoMap,
invalidateCache,
getCacheStats,
} = await import('../../context/repoMap/index.js')
if (stats) {
const cacheStats = getCacheStats(root)
const lines = [
`Repository map cache stats:`,
` Cache directory: ${cacheStats.cacheDir}`,
` Cache file: ${cacheStats.cacheFile ?? '(none)'}`,
` Cached entries: ${cacheStats.entryCount}`,
` Cache exists: ${cacheStats.exists}`,
]
return { type: 'text', value: lines.join('\n') }
}
if (invalidate) {
invalidateCache(root)
const result = await buildRepoMap({
root,
maxTokens: tokens,
focusFiles: focus.length > 0 ? focus : undefined,
})
return {
type: 'text',
value: [
`Cache invalidated and rebuilt.`,
`Files: ${result.fileCount} ranked (${result.totalFileCount} total) | Tokens: ${result.tokenCount} | Time: ${result.buildTimeMs}ms | Cache hit: ${result.cacheHit}`,
'',
result.map,
].join('\n'),
}
}
const result = await buildRepoMap({
root,
maxTokens: tokens,
focusFiles: focus.length > 0 ? focus : undefined,
})
return {
type: 'text',
value: [
`Repository map: ${result.fileCount} files ranked (${result.totalFileCount} total) | Tokens: ${result.tokenCount} | Time: ${result.buildTimeMs}ms | Cache hit: ${result.cacheHit}`,
'',
result.map,
].join('\n'),
}
}

View File

@@ -48,7 +48,7 @@ const review: Command = {
const ultrareview: Command = {
type: 'local-jsx',
name: 'ultrareview',
description: `~1020 min · Finds and verifies bugs in your branch. Runs in OpenClaude on the web. See ${CCR_TERMS_URL}`,
description: `~1020 min · Finds and verifies bugs in your branch. Runs in Claude Code on the web. See ${CCR_TERMS_URL}`,
isEnabled: () => isUltrareviewEnabled(),
load: () => import('./review/ultrareviewCommand.js'),
}

View File

@@ -57,7 +57,7 @@ function SessionInfo(t0) {
if (!remoteSessionUrl) {
let t4;
if ($[4] === Symbol.for("react.memo_cache_sentinel")) {
t4 = <Pane><Text color="warning">Not in remote mode. Start with `openclaude --remote` to use this command.</Text><Text dimColor={true}>(press esc to close)</Text></Pane>;
t4 = <Pane><Text color="warning">Not in remote mode. Start with `claude --remote` to use this command.</Text><Text dimColor={true}>(press esc to close)</Text></Pane>;
$[4] = t4;
} else {
t4 = $[4];

View File

@@ -3,7 +3,7 @@ import type { Command } from '../../commands.js'
const stats = {
type: 'local-jsx',
name: 'stats',
description: 'Show your OpenClaude usage statistics and activity',
description: 'Show your Claude Code usage statistics and activity',
load: () => import('./stats.js'),
} satisfies Command

View File

@@ -4,7 +4,7 @@ const status = {
type: 'local-jsx',
name: 'status',
description:
'Show OpenClaude status including version, model, account, API connectivity, and tool statuses',
'Show Claude Code status including version, model, account, API connectivity, and tool statuses',
immediate: true,
load: () => import('./status.js'),
} satisfies Command

View File

@@ -3,7 +3,7 @@ import type { Command } from '../commands.js';
import { AGENT_TOOL_NAME } from '../tools/AgentTool/constants.js';
const statusline = {
type: 'prompt',
description: "Set up OpenClaude's status line UI",
description: "Set up Claude Code's status line UI",
contentLength: 0,
// Dynamic content
aliases: [],

View File

@@ -3,7 +3,7 @@ import type { Command } from '../../commands.js'
const stickers = {
type: 'local',
name: 'stickers',
description: 'Order OpenClaude stickers',
description: 'Order Claude Code stickers',
supportsNonInteractive: false,
load: () => import('./stickers.js'),
} satisfies Command

View File

@@ -4,7 +4,7 @@ import { checkStatsigFeatureGate_CACHED_MAY_BE_STALE } from '../../services/anal
const thinkback = {
type: 'local-jsx',
name: 'think-back',
description: 'Your 2025 OpenClaude Year in Review',
description: 'Your 2025 Claude Code Year in Review',
isEnabled: () =>
checkStatsigFeatureGate_CACHED_MAY_BE_STALE('tengu_thinkback'),
load: () => import('./thinkback.js'),

View File

@@ -115,7 +115,7 @@ function startDetachedPoll(taskId: string, sessionId: string, url: string, getAp
ultraplanSessionUrl: undefined
} : prev);
enqueuePendingNotification({
value: [`Ultraplan approved — executing in OpenClaude on the web. Follow along at: ${url}`, '', 'Results will land as a pull request when the remote session finishes. There is nothing to do here.'].join('\n'),
value: [`Ultraplan approved — executing in Claude Code on the web. Follow along at: ${url}`, '', 'Results will land as a pull request when the remote session finishes. There is nothing to do here.'].join('\n'),
mode: 'task-notification'
});
} else {
@@ -184,10 +184,10 @@ function startDetachedPoll(taskId: string, sessionId: string, url: string, getAp
// multi-second teleportToRemote round-trip.
function buildLaunchMessage(disconnectedBridge?: boolean): string {
const prefix = disconnectedBridge ? `${REMOTE_CONTROL_DISCONNECTED_MSG} ` : '';
return `${DIAMOND_OPEN} ultraplan\n${prefix}Starting OpenClaude on the web…`;
return `${DIAMOND_OPEN} ultraplan\n${prefix}Starting Claude Code on the web…`;
}
function buildSessionReadyMessage(url: string): string {
return `${DIAMOND_OPEN} ultraplan · Monitor progress in OpenClaude on the web ${url}\nYou can continue working — when the ${DIAMOND_OPEN} fills, press ↓ to view results`;
return `${DIAMOND_OPEN} ultraplan · Monitor progress in Claude Code on the web ${url}\nYou can continue working — when the ${DIAMOND_OPEN} fills, press ↓ to view results`;
}
function buildAlreadyActiveMessage(url: string | undefined): string {
return url ? `ultraplan: already polling. Open ${url} to check status, or wait for the plan to land here.` : 'ultraplan: already launching. Please wait for the session to start.';
@@ -272,7 +272,7 @@ export async function launchUltraplan(opts: {
return [
// Rendered via <Markdown>; raw <message> is tokenized as HTML
// and dropped. Backslash-escape the brackets.
'Usage: /ultraplan \\<prompt\\>, or include "ultraplan" anywhere', 'in your prompt', '', 'Advanced multi-agent plan mode with our most powerful model', '(Opus). Runs in OpenClaude on the web. When the plan is ready,', 'you can execute it in the web session or send it back here.', 'Terminal stays free while the remote plans.', 'Requires /login.', '', `Terms: ${CCR_TERMS_URL}`].join('\n');
'Usage: /ultraplan \\<prompt\\>, or include "ultraplan" anywhere', 'in your prompt', '', 'Advanced multi-agent plan mode with our most powerful model', '(Opus). Runs in Claude Code on the web. When the plan is ready,', 'you can execute it in the web session or send it back here.', 'Terminal stays free while the remote plans.', 'Requires /login.', '', `Terms: ${CCR_TERMS_URL}`].join('\n');
}
// Set synchronously before the detached flow to prevent duplicate launches
@@ -461,7 +461,7 @@ const call: LocalJSXCommandCall = async (onDone, context, args) => {
export default {
type: 'local-jsx',
name: 'ultraplan',
description: `~1030 min · OpenClaude on the web drafts an advanced plan you can edit and approve. See ${CCR_TERMS_URL}`,
description: `~1030 min · Claude Code on the web drafts an advanced plan you can edit and approve. See ${CCR_TERMS_URL}`,
argumentHint: '<prompt>',
isEnabled: () => "external" === 'ant',
load: () => Promise.resolve({

View File

@@ -4,5 +4,6 @@ export default {
type: 'local-jsx',
name: 'usage',
description: 'Show plan usage limits',
availability: ['claude-ai'],
load: () => import('./usage.js'),
} satisfies Command

View File

@@ -56,7 +56,7 @@ export function ClaudeInChromeOnboarding(t0) {
}
let t5;
if ($[6] !== t4) {
t5 = <Text>Claude in Chrome works with the Chrome extension to let you control your browser directly from OpenClaude. You can navigate websites, fill forms, capture screenshots, record GIFs, and debug with console logs and network requests.{t4}</Text>;
t5 = <Text>Claude in Chrome works with the Chrome extension to let you control your browser directly from Claude Code. You can navigate websites, fill forms, capture screenshots, record GIFs, and debug with console logs and network requests.{t4}</Text>;
$[6] = t4;
$[7] = t5;
} else {

View File

@@ -262,7 +262,7 @@ export function ConsoleOAuthFlow({
state: 'success'
});
void sendNotification({
message: 'OpenClaude login successful',
message: 'Claude Code login successful',
notificationType: 'auth_success'
}, terminal);
}
@@ -384,7 +384,7 @@ function OAuthStatusMessage({
case 'idle': {
const promptText =
startingMessage ||
'OpenClaude can be used with your Claude subscription or billed based on API usage through your Console account.'
'Claude Code can be used with your Claude subscription or billed based on API usage through your Console account.'
const loginOptions = [
{
@@ -512,7 +512,7 @@ function OAuthStatusMessage({
<Box flexDirection="column" gap={1}>
<Box>
<Spinner />
<Text>Creating API key for OpenClaude</Text>
<Text>Creating API key for Claude Code</Text>
</Box>
</Box>
)

View File

@@ -90,7 +90,7 @@ export function DesktopUpsellStartup(t0) {
let t3;
if ($[5] === Symbol.for("react.memo_cache_sentinel")) {
t3 = {
label: "Open in Claude desktop app",
label: "Open in Claude Code Desktop",
value: "try" as const
};
$[5] = t3;
@@ -120,7 +120,7 @@ export function DesktopUpsellStartup(t0) {
const options = t5;
let t6;
if ($[8] === Symbol.for("react.memo_cache_sentinel")) {
t6 = <Box marginBottom={1}><Text>Use OpenClaude in the Claude desktop app for visual diffs, live app preview, parallel sessions, and more.</Text></Box>;
t6 = <Box marginBottom={1}><Text>Same Claude Code with visual diffs, live app preview, parallel sessions, and more.</Text></Box>;
$[8] = t6;
} else {
t6 = $[8];
@@ -135,7 +135,7 @@ export function DesktopUpsellStartup(t0) {
}
let t8;
if ($[11] !== handleSelect || $[12] !== t7) {
t8 = <PermissionDialog title="Try the Claude desktop app"><Box flexDirection="column" paddingX={2} paddingY={1}>{t6}<Select options={options} onChange={handleSelect} onCancel={t7} /></Box></PermissionDialog>;
t8 = <PermissionDialog title="Try Claude Code Desktop"><Box flexDirection="column" paddingX={2} paddingY={1}>{t6}<Select options={options} onChange={handleSelect} onCancel={t7} /></Box></PermissionDialog>;
$[11] = handleSelect;
$[12] = t7;
$[13] = t8;

View File

@@ -138,7 +138,7 @@ export function HelpV2(t0) {
const t5 = insideModal ? undefined : maxHeight;
let t6;
if ($[31] !== tabs) {
t6 = <Tabs title={false ? "/help" : `OpenClaude v${MACRO.VERSION}`} color="professionalBlue" defaultTab="general">{tabs}</Tabs>;
t6 = <Tabs title={false ? "/help" : `Claude Code v${MACRO.VERSION}`} color="professionalBlue" defaultTab="general">{tabs}</Tabs>;
$[31] = tabs;
$[32] = t6;
} else {
@@ -146,7 +146,7 @@ export function HelpV2(t0) {
}
let t7;
if ($[33] === Symbol.for("react.memo_cache_sentinel")) {
t7 = <Box marginTop={1}><Text>For more help:{" "}<Link url="https://github.com/Gitlawb/openclaude" /></Text></Box>;
t7 = <Box marginTop={1}><Text>For more help:{" "}<Link url="https://code.claude.com/docs/en/overview" /></Text></Box>;
$[33] = t7;
} else {
t7 = $[33];

View File

@@ -70,7 +70,7 @@ export function IdeOnboardingDialog(t0) {
}
let t6;
if ($[8] !== ideName) {
t6 = <>{t5}<Text>Welcome to OpenClaude for {ideName}</Text></>;
t6 = <>{t5}<Text>Welcome to Claude Code for {ideName}</Text></>;
$[8] = ideName;
$[9] = t6;
} else {

View File

@@ -135,7 +135,7 @@ export function ChannelsNotice() {
}
let t2;
if ($[24] !== flag) {
t2 = <Text dimColor={true}>Experimental · inbound messages will be pushed into this session, this carries prompt injection risks. Restart OpenClaude without {flag} to disable.</Text>;
t2 = <Text dimColor={true}>Experimental · inbound messages will be pushed into this session, this carries prompt injection risks. Restart Claude Code without {flag} to disable.</Text>;
$[24] = flag;
$[25] = t2;
} else {

View File

@@ -250,8 +250,8 @@ export function LogoV2() {
}
const layoutMode = getLayoutMode(columns);
const userTheme = resolveThemeSetting(getGlobalConfig().theme);
const borderTitle = ` ${color("text", userTheme)("OpenClaude")} ${color("inactive", userTheme)(`v${version}`)} `;
const compactBorderTitle = color("text", userTheme)(" OpenClaude ");
const borderTitle = ` ${color("text", userTheme)("Open Claude")} ${color("inactive", userTheme)(`v${version}`)} `;
const compactBorderTitle = color("text", userTheme)(" Open Claude ");
if (layoutMode === "compact") {
let welcomeMessage = formatWelcomeMessage(username);
if (stringWidth(welcomeMessage) > columns - 4) {

View File

@@ -9,7 +9,7 @@ export function WelcomeV2() {
if (env.terminal === "Apple_Terminal") {
let t0;
if ($[0] !== theme) {
t0 = <AppleTerminalWelcomeV2 theme={theme} welcomeMessage="Welcome to OpenClaude" />;
t0 = <AppleTerminalWelcomeV2 theme={theme} welcomeMessage="Welcome to Claude Code" />;
$[0] = theme;
$[1] = t0;
} else {
@@ -28,7 +28,7 @@ export function WelcomeV2() {
let t7;
let t8;
if ($[2] === Symbol.for("react.memo_cache_sentinel")) {
t0 = <Text><Text color="claude">{"Welcome to OpenClaude"} </Text><Text dimColor={true}>v{MACRO.DISPLAY_VERSION ?? MACRO.VERSION} </Text></Text>;
t0 = <Text><Text color="claude">{"Welcome to Open Claude"} </Text><Text dimColor={true}>v{MACRO.DISPLAY_VERSION ?? MACRO.VERSION} </Text></Text>;
t1 = <Text>{"\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026"}</Text>;
t2 = <Text>{" "}</Text>;
t3 = <Text>{" "}</Text>;
@@ -113,7 +113,7 @@ export function WelcomeV2() {
let t5;
let t6;
if ($[18] === Symbol.for("react.memo_cache_sentinel")) {
t0 = <Text><Text color="claude">{"Welcome to OpenClaude"} </Text><Text dimColor={true}>v{MACRO.DISPLAY_VERSION ?? MACRO.VERSION} </Text></Text>;
t0 = <Text><Text color="claude">{"Welcome to Open Claude"} </Text><Text dimColor={true}>v{MACRO.DISPLAY_VERSION ?? MACRO.VERSION} </Text></Text>;
t1 = <Text>{"\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026\u2026"}</Text>;
t2 = <Text>{" "}</Text>;
t3 = <Text>{" * \u2588\u2588\u2588\u2588\u2588\u2593\u2593\u2591 "}</Text>;

View File

@@ -41,7 +41,7 @@ export function createWhatsNewFeed(releaseNotes: string[]): FeedConfig {
});
const emptyMessage = "external" === 'ant' ? 'Unable to fetch latest claude-cli-internal commits' : 'Check /release-notes for recent updates';
return {
title: "external" === 'ant' ? "OpenClaude Updates [internal-only: Latest CC commits]" : "OpenClaude Updates",
title: "external" === 'ant' ? "Open Claude Updates [internal-only: Latest CC commits]" : "Open Claude Updates",
lines,
footer: lines.length > 0 ? '/release-notes for more' : undefined,
emptyMessage
@@ -60,7 +60,7 @@ export function createProjectOnboardingFeed(steps: Step[]): FeedConfig {
text: `${checkmark}${text}`
};
});
const warningText = getCwd() === homedir() ? 'Note: You have launched openclaude in your home directory. For the best experience, launch it in a project directory instead.' : undefined;
const warningText = getCwd() === homedir() ? 'Note: You have launched claude in your home directory. For the best experience, launch it in a project directory instead.' : undefined;
if (warningText) {
lines.push({
text: warningText
@@ -73,7 +73,7 @@ export function createProjectOnboardingFeed(steps: Step[]): FeedConfig {
}
export function createGuestPassesFeed(): FeedConfig {
const reward = getCachedReferrerReward();
const subtitle = reward ? `Share OpenClaude and earn ${formatCreditAmount(reward)} of extra usage` : 'Share OpenClaude with friends';
const subtitle = reward ? `Share Open Claude and earn ${formatCreditAmount(reward)} of extra usage` : 'Share Open Claude with friends';
return {
title: '3 guest passes',
lines: [],

View File

@@ -265,7 +265,7 @@ export function ModelPicker(t0) {
} else {
t15 = $[41];
}
const t16 = headerText ?? "Switch between Claude models. Applies to this session and future OpenClaude sessions. For other/previous model names, specify with --model.";
const t16 = headerText ?? "Switch between Claude models. Applies to this session and future Claude Code sessions. For other/previous model names, specify with --model.";
let t17;
if ($[42] !== t16) {
t17 = <Text dimColor={true}>{t16}</Text>;

View File

@@ -146,7 +146,7 @@ export function Onboarding({
steps.push({
id: 'terminal-setup',
component: <Box flexDirection="column" gap={1} paddingLeft={1}>
<Text bold>Use OpenClaude&apos;s terminal setup?</Text>
<Text bold>Use Claude Code&apos;s terminal setup?</Text>
<Box flexDirection="column" width={70} gap={1}>
<Text>
For the optimal coding experience, enable the recommended settings

View File

@@ -80,7 +80,7 @@ export function OutputStylePicker(t0) {
const t6 = !isStandaloneCommand;
let t7;
if ($[5] === Symbol.for("react.memo_cache_sentinel")) {
t7 = <Box marginTop={1}><Text dimColor={true}>This changes how OpenClaude communicates with you</Text></Box>;
t7 = <Box marginTop={1}><Text dimColor={true}>This changes how Claude Code communicates with you</Text></Box>;
$[5] = t7;
} else {
t7 = $[5];

View File

@@ -773,7 +773,7 @@ function PromptInput({
if (feature('ULTRAPLAN') && ultraplanTriggers.length) {
addNotification({
key: 'ultraplan-active',
text: 'This prompt will launch an ultraplan session in OpenClaude on the web',
text: 'This prompt will launch an ultraplan session in Claude Code on the web',
priority: 'immediate',
timeoutMs: 5000
});

View File

@@ -110,7 +110,6 @@ const PRESET_ORDER = [
'Anthropic',
'Atomic Chat',
'Azure OpenAI',
'Bankr',
'Codex OAuth',
'DeepSeek',
'Google Gemini',
@@ -118,15 +117,12 @@ const PRESET_ORDER = [
'LM Studio',
'MiniMax',
'Mistral',
'Moonshot AI - API',
'Moonshot AI - Kimi Code',
'Moonshot AI',
'NVIDIA NIM',
'Ollama',
'OpenAI',
'OpenRouter',
'Together AI',
'xAI',
'Z.AI - GLM Coding Plan',
'Custom',
] as const
@@ -155,7 +151,6 @@ function createDeferred<T>(): {
function mockProviderProfilesModule(options?: {
addProviderProfile?: (...args: unknown[]) => unknown
getActiveProviderProfile?: () => unknown
getProviderProfiles?: () => unknown[]
updateProviderProfile?: (...args: unknown[]) => unknown
setActiveProviderProfile?: (...args: unknown[]) => unknown
@@ -164,7 +159,7 @@ function mockProviderProfilesModule(options?: {
addProviderProfile: options?.addProviderProfile ?? (() => null),
applyActiveProviderProfileFromConfig: () => {},
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
getActiveProviderProfile: options?.getActiveProviderProfile ?? (() => null),
getActiveProviderProfile: () => null,
getProviderPresetDefaults: (preset: string) =>
preset === 'ollama'
? {
@@ -194,7 +189,6 @@ function mockProviderManagerDependencies(
addProviderProfile?: (...args: unknown[]) => unknown
applySavedProfileToCurrentSession?: (...args: unknown[]) => Promise<string | null>
clearCodexCredentials?: () => { success: boolean; warning?: string }
getActiveProviderProfile?: () => unknown
getProviderProfiles?: () => unknown[]
probeOllamaGenerationReadiness?: () => Promise<{
state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed'
@@ -234,7 +228,6 @@ function mockProviderManagerDependencies(
): void {
mockProviderProfilesModule({
addProviderProfile: options?.addProviderProfile,
getActiveProviderProfile: options?.getActiveProviderProfile,
getProviderProfiles: options?.getProviderProfiles,
updateProviderProfile: options?.updateProviderProfile,
setActiveProviderProfile: options?.setActiveProviderProfile,
@@ -337,10 +330,6 @@ async function mountProviderManager(
options?: {
mode?: 'first-run' | 'manage'
onDone?: (result?: unknown) => void
onChangeAppState?: (args: {
newState: unknown
oldState: unknown
}) => void
},
): Promise<{
stdin: PassThrough
@@ -355,7 +344,7 @@ async function mountProviderManager(
})
root.render(
<AppStateProvider onChangeAppState={options?.onChangeAppState}>
<AppStateProvider>
<KeybindingSetup>
<ProviderManager
mode={options?.mode ?? 'manage'}
@@ -917,223 +906,6 @@ test('ProviderManager keeps Codex OAuth as next-startup only when activating the
await mounted.dispose()
})
test('ProviderManager activating a multi-model provider sets the session model to the primary model', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const multiModelProfile = {
id: 'provider_multi_model',
provider: 'openai',
name: 'Multi Model Provider',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-5.4; gpt-5.4-mini',
apiKey: 'sk-test',
}
const setActiveProviderProfile = mock(() => multiModelProfile)
const appStateChanges: Array<{ newState: any; oldState: any }> = []
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
getProviderProfiles: () => [multiModelProfile],
setActiveProviderProfile,
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
onChangeAppState: args => {
appStateChanges.push(args as { newState: any; oldState: any })
},
})
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Provider manager') &&
frame.includes('Set active provider'),
)
mounted.stdin.write('j')
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Set active provider') &&
frame.includes('Multi Model Provider'),
)
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForCondition(() => setActiveProviderProfile.mock.calls.length > 0)
await waitForCondition(() =>
appStateChanges.some(
({ newState, oldState }) =>
newState.mainLoopModel === 'gpt-5.4' &&
oldState.mainLoopModel !== newState.mainLoopModel,
),
)
expect(setActiveProviderProfile).toHaveBeenCalledWith('provider_multi_model')
expect(
appStateChanges.some(
({ newState }) =>
newState.mainLoopModel === 'gpt-5.4' &&
newState.mainLoopModelForSession === null,
),
).toBe(true)
expect(
appStateChanges.some(
({ newState }) => newState.mainLoopModel === 'gpt-5.4; gpt-5.4-mini',
),
).toBe(false)
await mounted.dispose()
})
test('ProviderManager editing an active multi-model provider keeps app state on the primary model', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const multiModelProfile = {
id: 'provider_multi_model',
provider: 'openai',
name: 'Multi Model Provider',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-5.4; gpt-5.4-mini',
apiKey: 'sk-test',
}
const updateProviderProfile = mock(() => multiModelProfile)
const appStateChanges: Array<{ newState: any; oldState: any }> = []
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
getActiveProviderProfile: () => multiModelProfile,
getProviderProfiles: () => [multiModelProfile],
updateProviderProfile,
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
onChangeAppState: args => {
appStateChanges.push(args as { newState: any; oldState: any })
},
})
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Provider manager') &&
frame.includes('Edit provider'),
)
mounted.stdin.write('j')
await Bun.sleep(25)
mounted.stdin.write('j')
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Edit provider') &&
frame.includes('Multi Model Provider'),
)
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Edit provider profile') &&
frame.includes('Step 1 of 7'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 2 of 7'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 3 of 7'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 4 of 7'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 5 of 7'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 6 of 7'),
)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Step 7 of 7'),
)
mounted.stdin.write('\r')
await waitForCondition(() => updateProviderProfile.mock.calls.length > 0)
await waitForCondition(() =>
appStateChanges.some(
({ newState, oldState }) =>
newState.mainLoopModel === 'gpt-5.4' &&
oldState.mainLoopModel !== newState.mainLoopModel,
),
)
expect(updateProviderProfile).toHaveBeenCalledWith(
'provider_multi_model',
expect.objectContaining({
model: 'gpt-5.4; gpt-5.4-mini',
}),
)
expect(
appStateChanges.some(
({ newState }) =>
newState.mainLoopModel === 'gpt-5.4' &&
newState.mainLoopModelForSession === null,
),
).toBe(true)
expect(
appStateChanges.some(
({ newState }) => newState.mainLoopModel === 'gpt-5.4; gpt-5.4-mini',
),
).toBe(false)
await mounted.dispose()
})
test('ProviderManager resolves Codex OAuth state from async storage without sync reads in render flow', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB

View File

@@ -46,7 +46,6 @@ import {
rankOllamaModels,
recommendOllamaModel,
} from '../utils/providerRecommendation.js'
import { clearStartupProviderOverrides } from '../utils/providerStartupOverrides.js'
import { redactUrlForDisplay } from '../utils/urlRedaction.js'
import { updateSettingsForSource } from '../utils/settings/settings.js'
import {
@@ -58,10 +57,8 @@ import TextInput from './TextInput.js'
import { useCodexOAuthFlow } from './useCodexOAuthFlow.js'
export type ProviderManagerResult = {
action: 'saved' | 'cancelled' | 'activated'
action: 'saved' | 'cancelled'
activeProfileId?: string
activeProviderName?: string
activeProviderModel?: string
message?: string
}
@@ -81,14 +78,7 @@ type Screen =
| 'select-edit'
| 'select-delete'
type DraftField =
| 'name'
| 'baseUrl'
| 'model'
| 'apiKey'
| 'apiFormat'
| 'authHeader'
| 'authHeaderValue'
type DraftField = 'name' | 'baseUrl' | 'model' | 'apiKey'
type ProviderDraft = Record<DraftField, string>
@@ -134,29 +124,8 @@ const FORM_STEPS: Array<{
{
key: 'model',
label: 'Default model',
placeholder: 'e.g. llama3.1:8b or glm-4.7; glm-4.7-flash',
helpText: 'Model name(s) to use. Separate multiple with ";" or ","; first is default.',
},
{
key: 'apiFormat',
label: 'API mode',
placeholder: 'chat_completions',
helpText: 'Choose the OpenAI-compatible API surface for this provider.',
optional: true,
},
{
key: 'authHeader',
label: 'Auth header',
placeholder: 'e.g. api-key or X-API-Key',
helpText: 'Optional. Header name used for a custom provider key.',
optional: true,
},
{
key: 'authHeaderValue',
label: 'Auth header value',
placeholder: 'Leave empty to use the API key value',
helpText: 'Optional. Value sent in the custom auth header.',
optional: true,
placeholder: 'e.g. llama3.1:8b or glm-4.7, glm-4.7-flash',
helpText: 'Model name(s) to use. Separate multiple with commas; first is default.',
},
{
key: 'apiKey',
@@ -182,9 +151,6 @@ function toDraft(profile: ProviderProfile): ProviderDraft {
baseUrl: profile.baseUrl,
model: profile.model,
apiKey: profile.apiKey ?? '',
apiFormat: profile.apiFormat ?? 'chat_completions',
authHeader: profile.authHeader ?? '',
authHeaderValue: profile.authHeaderValue ?? '',
}
}
@@ -195,9 +161,6 @@ function presetToDraft(preset: ProviderPreset): ProviderDraft {
baseUrl: defaults.baseUrl,
model: defaults.model,
apiKey: defaults.apiKey ?? '',
apiFormat: 'chat_completions',
authHeader: '',
authHeaderValue: '',
}
}
@@ -211,15 +174,7 @@ function profileSummary(profile: ProviderProfile, isActive: boolean): string {
models.length <= 3
? models.join(', ')
: `${models[0]}, ${models[1]} + ${models.length - 2} more`
const modeInfo =
profile.provider === 'openai'
? ` · ${profile.apiFormat === 'responses' ? 'responses' : 'chat/completions'}`
: ''
const authInfo =
profile.provider === 'openai' && profile.authHeader
? ` · ${profile.authHeader} auth`
: ''
return `${providerKind} · ${profile.baseUrl} · ${modelDisplay}${modeInfo}${authInfo} · ${keyInfo}${activeSuffix}`
return `${providerKind} · ${profile.baseUrl} · ${modelDisplay} · ${keyInfo}${activeSuffix}`
}
function getGithubCredentialSourceFromEnv(
@@ -498,18 +453,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
})
}, [])
const formSteps = React.useMemo(
() =>
draftProvider === 'openai'
? FORM_STEPS
: FORM_STEPS.filter(step =>
step.key !== 'apiFormat' &&
step.key !== 'authHeader' &&
step.key !== 'authHeaderValue'
),
[draftProvider],
)
const currentStep = formSteps[formStepIndex] ?? formSteps[0] ?? FORM_STEPS[0]
const currentStep = FORM_STEPS[formStepIndex] ?? FORM_STEPS[0]
const currentStepKey = currentStep.key
const currentValue = draft[currentStepKey]
@@ -727,7 +671,17 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
}
function clearStartupProviderOverrideFromUserSettings(): string | null {
return clearStartupProviderOverrides()
const { error } = updateSettingsForSource('userSettings', {
env: {
CLAUDE_CODE_USE_OPENAI: undefined as any,
CLAUDE_CODE_USE_GEMINI: undefined as any,
CLAUDE_CODE_USE_GITHUB: undefined as any,
CLAUDE_CODE_USE_BEDROCK: undefined as any,
CLAUDE_CODE_USE_VERTEX: undefined as any,
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
},
})
return error ? error.message : null
}
function buildCodexOAuthActivationMessage(options: {
@@ -814,14 +768,12 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
mainLoopModelForSession: null,
}))
refreshProfiles()
setAppState(prev => ({
...prev,
mainLoopModel: GITHUB_PROVIDER_DEFAULT_MODEL,
}))
setStatusMessage(`Active provider: ${GITHUB_PROVIDER_LABEL}`)
setIsActivating(false)
onDone({
action: 'activated',
activeProviderName: GITHUB_PROVIDER_LABEL,
activeProviderModel: GITHUB_PROVIDER_DEFAULT_MODEL,
message: `Provider switched to ${GITHUB_PROVIDER_LABEL} (${GITHUB_PROVIDER_DEFAULT_MODEL})`,
})
returnToMenu()
return
}
@@ -837,14 +789,19 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
// Update the session model to the new provider's first model.
// persistActiveProviderProfileModel (called by onChangeAppState) will
// not overwrite the multi-model list because it checks if the model
// is already in the provider's configured model list.
// is already in the profile's comma-separated model list.
const newModel = getPrimaryModel(active.model)
setAppState(prev => ({
...prev,
mainLoopModel: newModel,
}))
providerLabel = active.name
setAppState(prev => ({
...prev,
mainLoopModel: active.model,
mainLoopModelForSession: null,
}))
providerLabel = active.name
const settingsOverrideError =
clearStartupProviderOverrideFromUserSettings()
const isActiveCodexOAuth = isCodexOAuthProfile(
@@ -856,29 +813,23 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
: null
refreshProfiles()
const activationMessage = isActiveCodexOAuth
? buildCodexOAuthActivationMessage({
prefix: `Active provider: ${active.name}`,
activationWarning,
warnings: [
setStatusMessage(
isActiveCodexOAuth
? buildCodexOAuthActivationMessage({
prefix: `Active provider: ${active.name}`,
activationWarning,
settingsOverrideError
? `could not clear startup provider override (${settingsOverrideError})`
: null,
].filter((warning): warning is string => Boolean(warning)),
})
: settingsOverrideError
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
: `Active provider: ${active.name}`
setStatusMessage(activationMessage)
warnings: [
activationWarning,
settingsOverrideError
? `could not clear startup provider override (${settingsOverrideError})`
: null,
].filter((warning): warning is string => Boolean(warning)),
})
: settingsOverrideError
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
: `Active provider: ${active.name}`,
)
setIsActivating(false)
onDone({
action: 'activated',
activeProfileId: active.id,
activeProviderName: active.name,
activeProviderModel: newModel,
message: `Provider switched to ${active.name} (${newModel})`,
})
returnToMenu()
} catch (error) {
refreshProfiles()
@@ -993,9 +944,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
baseUrl: defaults.baseUrl,
model: defaults.model,
apiKey: defaults.apiKey ?? '',
apiFormat: 'chat_completions',
authHeader: '',
authHeaderValue: '',
}
setEditingProfileId(null)
setDraftProvider(defaults.provider ?? 'openai')
@@ -1042,22 +990,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
baseUrl: nextDraft.baseUrl,
model: nextDraft.model,
apiKey: nextDraft.apiKey,
apiFormat:
draftProvider === 'openai' && nextDraft.apiFormat === 'responses'
? 'responses'
: 'chat_completions',
authHeader:
draftProvider === 'openai' && nextDraft.authHeader
? nextDraft.authHeader
: undefined,
authScheme:
draftProvider === 'openai' && nextDraft.authHeader
? (nextDraft.authHeader.toLowerCase() === 'authorization' ? 'bearer' : 'raw')
: undefined,
authHeaderValue:
draftProvider === 'openai' && nextDraft.authHeaderValue
? nextDraft.authHeaderValue
: undefined,
}
const saved = editingProfileId
@@ -1073,7 +1005,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
if (isActiveSavedProfile) {
setAppState(prev => ({
...prev,
mainLoopModel: getPrimaryModel(saved.model),
mainLoopModel: saved.model,
mainLoopModelForSession: null,
}))
}
@@ -1280,9 +1212,9 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
setDraft(nextDraft)
setErrorMessage(undefined)
if (formStepIndex < formSteps.length - 1) {
if (formStepIndex < FORM_STEPS.length - 1) {
const nextIndex = formStepIndex + 1
const nextKey = formSteps[nextIndex]?.key ?? 'name'
const nextKey = FORM_STEPS[nextIndex]?.key ?? 'name'
setFormStepIndex(nextIndex)
setCursorOffset(nextDraft[nextKey].length)
return
@@ -1296,7 +1228,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
if (formStepIndex > 0) {
const nextIndex = formStepIndex - 1
const nextKey = formSteps[nextIndex]?.key ?? 'name'
const nextKey = FORM_STEPS[nextIndex]?.key ?? 'name'
setFormStepIndex(nextIndex)
setCursorOffset(draft[nextKey].length)
return
@@ -1347,11 +1279,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
label: 'Azure OpenAI',
description: 'Azure OpenAI endpoint (model=deployment name)',
},
{
value: 'bankr',
label: 'Bankr',
description: 'Bankr LLM Gateway (OpenAI-compatible)',
},
...(canUseCodexOAuth
? [
{
@@ -1394,13 +1321,8 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
},
{
value: 'moonshotai',
label: 'Moonshot AI - API',
description: 'Moonshot AI - API endpoint',
},
{
value: 'kimi-code',
label: 'Moonshot AI - Kimi Code',
description: 'Moonshot AI - Kimi Code Subscription endpoint',
label: 'Moonshot AI',
description: 'Kimi OpenAI-compatible endpoint',
},
{
value: 'nvidia-nim',
@@ -1427,16 +1349,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
label: 'Together AI',
description: 'Together chat/completions endpoint',
},
{
value: 'xai',
label: 'xAI',
description: 'xAI Grok OpenAI-compatible endpoint',
},
{
value: 'zai',
label: 'Z.AI - GLM Coding Plan',
description: 'Z.AI GLM coding subscription endpoint',
},
{
value: 'custom',
label: 'Custom',
@@ -1501,59 +1413,28 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
: 'OpenAI-compatible API'}
</Text>
<Text dimColor>
Step {formStepIndex + 1} of {formSteps.length}: {currentStep.label}
Step {formStepIndex + 1} of {FORM_STEPS.length}: {currentStep.label}
</Text>
{currentStepKey === 'apiFormat' ? (
<Select
options={[
{
value: 'chat_completions',
label: 'Chat Completions',
description: 'Use /chat/completions for broad OpenAI-compatible support',
},
{
value: 'responses',
label: 'Responses',
description: 'Use /responses for providers that support the Responses API',
},
]}
defaultValue={
currentValue === 'responses' ? 'responses' : 'chat_completions'
<Box flexDirection="row" gap={1}>
<Text>{figures.pointer}</Text>
<TextInput
value={currentValue}
onChange={value =>
setDraft(prev => ({
...prev,
[currentStepKey]: value,
}))
}
defaultFocusValue={
currentValue === 'responses' ? 'responses' : 'chat_completions'
}
onChange={value => handleFormSubmit(value)}
onCancel={handleBackFromForm}
visibleOptionCount={2}
onSubmit={handleFormSubmit}
focus={true}
showCursor={true}
placeholder={`${currentStep.placeholder}${figures.ellipsis}`}
mask={currentStepKey === 'apiKey' ? '*' : undefined}
columns={80}
cursorOffset={cursorOffset}
onChangeCursorOffset={setCursorOffset}
/>
) : (
<Box flexDirection="row" gap={1}>
<Text>{figures.pointer}</Text>
<TextInput
value={currentValue}
onChange={value =>
setDraft(prev => ({
...prev,
[currentStepKey]: value,
}))
}
onSubmit={handleFormSubmit}
focus={true}
showCursor={true}
placeholder={`${currentStep.placeholder}${figures.ellipsis}`}
mask={
currentStepKey === 'apiKey' ||
currentStepKey === 'authHeaderValue'
? '*'
: undefined
}
columns={80}
cursorOffset={cursorOffset}
onChangeCursorOffset={setCursorOffset}
/>
</Box>
)}
</Box>
{errorMessage && <Text color="error">{errorMessage}</Text>}
<Text dimColor>
Press Enter to continue. Press Esc to go back.

View File

@@ -119,17 +119,17 @@ export function ResumeTask({
return <Box flexDirection="column" padding={1}>
<Box flexDirection="row">
<Spinner />
<Text bold>Loading OpenClaude sessions</Text>
<Text bold>Loading Claude Code sessions</Text>
</Box>
<Text dimColor>
{retrying ? 'Retrying…' : 'Fetching your OpenClaude sessions…'}
{retrying ? 'Retrying…' : 'Fetching your Claude Code sessions…'}
</Text>
</Box>;
}
if (loadErrorType) {
return <Box flexDirection="column" padding={1}>
<Text bold color="error">
Error loading OpenClaude sessions
Error loading Claude Code sessions
</Text>
{renderErrorSpecificGuidance(loadErrorType)}
@@ -143,7 +143,7 @@ export function ResumeTask({
if (sessions.length === 0) {
return <Box flexDirection="column" padding={1}>
<Text bold>
No OpenClaude sessions found
No Claude Code sessions found
{currentRepo && <Text> for {currentRepo}</Text>}
</Text>
<Box marginTop={1}>
@@ -261,7 +261,7 @@ function renderErrorSpecificGuidance(errorType: LoadErrorType): React.ReactNode
</Box>;
case 'other':
return <Box marginY={1} flexDirection="row">
<Text dimColor>Sorry, OpenClaude encountered an error</Text>
<Text dimColor>Sorry, Claude Code encountered an error</Text>
</Box>;
}
}

View File

@@ -299,26 +299,6 @@ export function Config({
enabled: toolHistoryCompressionEnabled
});
}
}, {
id: 'showCacheStats',
label: 'Cache stats display',
value: globalConfig.showCacheStats,
options: ['off', 'compact', 'full'],
type: 'enum' as const,
onChange(mode: string) {
const showCacheStats = (mode === 'off' || mode === 'compact' || mode === 'full' ? mode : 'compact') as 'off' | 'compact' | 'full';
saveGlobalConfig(current_cs => ({
...current_cs,
showCacheStats
}));
setGlobalConfig({
...getGlobalConfig(),
showCacheStats
});
logEvent('tengu_show_cache_stats_setting_changed', {
mode: showCacheStats as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS
});
}
}, {
id: 'spinnerTipsEnabled',
label: 'Show tips',

View File

@@ -1,249 +0,0 @@
import * as React from 'react'
import { useEffect, useState } from 'react'
import { useTerminalSize } from '../../hooks/useTerminalSize.js'
import { Box, Text } from '../../ink.js'
import { useKeybinding } from '../../keybindings/useKeybinding.js'
import {
buildMiniMaxUsageRows,
fetchMiniMaxUsage,
type MiniMaxUsageData,
type MiniMaxUsageRow,
} from '../../services/api/minimaxUsage.js'
import { logError } from '../../utils/log.js'
import { ConfigurableShortcutHint } from '../ConfigurableShortcutHint.js'
import { Byline } from '../design-system/Byline.js'
import { ProgressBar } from '../design-system/ProgressBar.js'
const RESET_COUNTDOWN_REFRESH_MS = 30_000
const PROGRESS_BAR_WIDTH = 18
type MiniMaxUsageLimitBarProps = {
label: string
usedPercent: number
resetsAt?: string
extraSubtext?: string
maxWidth: number
nowMs: number
}
function formatCountdownDuration(ms: number): string {
const totalMinutes = Math.max(1, Math.ceil(ms / 60_000))
const days = Math.floor(totalMinutes / 1_440)
const hours = Math.floor((totalMinutes % 1_440) / 60)
const minutes = totalMinutes % 60
if (days > 0) {
return hours > 0 ? `${days}d ${hours}h` : `${days}d`
}
if (hours > 0) {
return minutes > 0 ? `${hours}h ${minutes}m` : `${hours}h`
}
return `${minutes}m`
}
function formatResetCountdown(
resetsAt: string | undefined,
nowMs: number,
): string | undefined {
if (!resetsAt) return undefined
const resetMs = Date.parse(resetsAt)
if (!Number.isFinite(resetMs)) return undefined
const remainingMs = resetMs - nowMs
if (remainingMs <= 0) {
return 'Resetting now'
}
return `Resets in ${formatCountdownDuration(remainingMs)}`
}
function MiniMaxUsageLimitBar({
label,
usedPercent,
resetsAt,
extraSubtext,
maxWidth,
nowMs,
}: MiniMaxUsageLimitBarProps): React.ReactNode {
const normalizedUsedPercent = Math.max(0, Math.min(100, usedPercent))
const usedText = `${Math.floor(normalizedUsedPercent)}% used`
const resetText = formatResetCountdown(resetsAt, nowMs)
const details = [usedText, extraSubtext].filter(
(part): part is string => Boolean(part),
)
return (
<Box flexDirection="column">
<Text>
<Text bold>{label}</Text>
{resetText ? <Text dimColor> · {resetText}</Text> : null}
</Text>
<Box flexDirection="row" gap={1}>
<ProgressBar
ratio={normalizedUsedPercent / 100}
width={Math.min(PROGRESS_BAR_WIDTH, Math.max(1, maxWidth))}
fillColor="rate_limit_fill"
emptyColor="rate_limit_empty"
/>
{details.length > 0 ? <Text dimColor>{details.join(' · ')}</Text> : null}
</Box>
</Box>
)
}
function MiniMaxUsageTextRow({
label,
value,
}: Extract<MiniMaxUsageRow, { kind: 'text' }>): React.ReactNode {
if (!value) {
return <Text bold>{label}</Text>
}
return (
<Text>
<Text bold>{label}</Text>
<Text dimColor> · {value}</Text>
</Text>
)
}
export function MiniMaxUsage(): React.ReactNode {
const [usage, setUsage] = useState<MiniMaxUsageData | null>(null)
const [error, setError] = useState<string | null>(null)
const [isLoading, setIsLoading] = useState(true)
const [nowMs, setNowMs] = useState(() => Date.now())
const { columns } = useTerminalSize()
const availableWidth = columns - 2
const maxWidth = Math.min(availableWidth, 80)
const loadUsage = React.useCallback(async () => {
setIsLoading(true)
setError(null)
try {
setUsage(await fetchMiniMaxUsage())
} catch (err) {
logError(err as Error)
setError(
err instanceof Error ? err.message : 'Failed to load MiniMax usage',
)
} finally {
setIsLoading(false)
}
}, [])
useEffect(() => {
void loadUsage()
}, [loadUsage])
useEffect(() => {
const interval = setInterval(() => {
setNowMs(Date.now())
}, RESET_COUNTDOWN_REFRESH_MS)
return () => clearInterval(interval)
}, [])
useKeybinding(
'settings:retry',
() => {
void loadUsage()
},
{
context: 'Settings',
isActive: !!error && !isLoading,
},
)
if (error) {
return (
<Box flexDirection="column" gap={1}>
<Text color="error">Error: {error}</Text>
<Text dimColor>
<Byline>
<ConfigurableShortcutHint
action="settings:retry"
context="Settings"
fallback="r"
description="retry"
/>
<ConfigurableShortcutHint
action="confirm:no"
context="Settings"
fallback="Esc"
description="cancel"
/>
</Byline>
</Text>
</Box>
)
}
if (!usage) {
return (
<Box flexDirection="column" gap={1}>
<Text dimColor>Loading MiniMax usage data</Text>
<Text dimColor>
<ConfigurableShortcutHint
action="confirm:no"
context="Settings"
fallback="Esc"
description="cancel"
/>
</Text>
</Box>
)
}
const rows =
usage.availability === 'available'
? buildMiniMaxUsageRows(usage.snapshots)
: []
return (
<Box flexDirection="column" gap={1} width="100%">
{usage.planType ? <Text dimColor>Plan: {usage.planType}</Text> : null}
{usage.availability === 'unknown' ? (
<Text dimColor>{usage.message}</Text>
) : rows.length === 0 ? (
<Text dimColor>
No MiniMax usage windows were returned for this account.
</Text>
) : null}
{rows.map((row, index) =>
row.kind === 'window' ? (
<MiniMaxUsageLimitBar
key={`${row.label}-${index}`}
label={row.label}
usedPercent={row.usedPercent}
resetsAt={row.resetsAt}
extraSubtext={row.extraSubtext}
maxWidth={maxWidth}
nowMs={nowMs}
/>
) : (
<MiniMaxUsageTextRow
key={`${row.label}-${index}`}
label={row.label}
value={row.value}
/>
),
)}
<Text dimColor>
<ConfigurableShortcutHint
action="confirm:no"
context="Settings"
fallback="Esc"
description="cancel"
/>
</Text>
</Box>
)
}

View File

@@ -1,28 +0,0 @@
import * as React from 'react'
import { Box, Text } from '../../ink.js'
import { ConfigurableShortcutHint } from '../ConfigurableShortcutHint.js'
type UnsupportedUsageProps = {
providerLabel: string
}
export function UnsupportedUsage({
providerLabel,
}: UnsupportedUsageProps): React.ReactNode {
return (
<Box flexDirection="column" gap={1}>
<Text dimColor>
Usage details are not currently available for {providerLabel}.
</Text>
<Text dimColor>
<ConfigurableShortcutHint
action="confirm:no"
context="Settings"
fallback="Esc"
description="cancel"
/>
</Text>
</Box>
)
}

View File

@@ -17,8 +17,6 @@ import { Byline } from '../design-system/Byline.js';
import { ProgressBar } from '../design-system/ProgressBar.js';
import { isEligibleForOverageCreditGrant, OverageCreditUpsell } from '../LogoV2/OverageCreditUpsell.js';
import { CodexUsage } from './CodexUsage.js';
import { MiniMaxUsage } from './MiniMaxUsage.js';
import { UnsupportedUsage } from './UnsupportedUsage.js';
type LimitBarProps = {
title: string;
limit: RateLimit;
@@ -268,26 +266,9 @@ function AnthropicUsage(): React.ReactNode {
</Box>;
}
export function Usage(): React.ReactNode {
const provider = getAPIProvider();
if (provider === 'codex') {
if (getAPIProvider() === 'codex') {
return <CodexUsage />;
}
if (provider === 'minimax') {
return <MiniMaxUsage />;
}
if (provider !== 'firstParty') {
const providerLabel = {
openai: 'this OpenAI-compatible provider',
gemini: 'Google Gemini',
github: 'GitHub Models',
mistral: 'Mistral',
'nvidia-nim': 'NVIDIA NIM',
bedrock: 'AWS Bedrock',
vertex: 'Google Vertex AI',
foundry: 'Microsoft Foundry'
}[provider] ?? 'this provider';
return <UnsupportedUsage providerLabel={providerLabel} />;
}
return <AnthropicUsage />;
}
type ExtraUsageSectionProps = {

View File

@@ -1,257 +0,0 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test'
import { detectProvider } from './StartupScreen.js'
const ENV_KEYS = [
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'CLAUDE_CODE_USE_MISTRAL',
'OPENAI_BASE_URL',
'OPENAI_API_KEY',
'OPENAI_MODEL',
'GEMINI_MODEL',
'MISTRAL_MODEL',
'ANTHROPIC_MODEL',
'CLAUDE_MODEL',
'NVIDIA_NIM',
'MINIMAX_API_KEY',
]
const originalEnv: Record<string, string | undefined> = {}
beforeEach(() => {
for (const key of ENV_KEYS) {
originalEnv[key] = process.env[key]
delete process.env[key]
}
})
afterEach(() => {
for (const key of ENV_KEYS) {
if (originalEnv[key] === undefined) {
delete process.env[key]
} else {
process.env[key] = originalEnv[key]
}
}
})
function setupOpenAIMode(baseUrl: string, model: string): void {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = baseUrl
process.env.OPENAI_MODEL = model
process.env.OPENAI_API_KEY = 'test-key'
}
// --- Issue #855: aggregator URL must win over vendor-prefixed model name ---
describe('detectProvider — aggregator URL authoritative over model-name substring (#855)', () => {
test('OpenRouter + deepseek/deepseek-chat labels as OpenRouter', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'deepseek/deepseek-chat')
expect(detectProvider().name).toBe('OpenRouter')
})
test('OpenRouter + moonshotai/kimi-k2 labels as OpenRouter', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'moonshotai/kimi-k2')
expect(detectProvider().name).toBe('OpenRouter')
})
test('OpenRouter + mistralai/mistral-large labels as OpenRouter', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'mistralai/mistral-large')
expect(detectProvider().name).toBe('OpenRouter')
})
test('OpenRouter + meta-llama/llama-3.3 labels as OpenRouter', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'meta-llama/llama-3.3-70b-instruct')
expect(detectProvider().name).toBe('OpenRouter')
})
test('Together + deepseek-ai/DeepSeek-V3 labels as Together AI', () => {
setupOpenAIMode('https://api.together.xyz/v1', 'deepseek-ai/DeepSeek-V3')
expect(detectProvider().name).toBe('Together AI')
})
test('Together + meta-llama/Llama-3.3 labels as Together AI', () => {
setupOpenAIMode('https://api.together.xyz/v1', 'meta-llama/Llama-3.3-70B-Instruct-Turbo')
expect(detectProvider().name).toBe('Together AI')
})
test('Groq + deepseek-r1-distill-llama-70b labels as Groq', () => {
setupOpenAIMode('https://api.groq.com/openai/v1', 'deepseek-r1-distill-llama-70b')
expect(detectProvider().name).toBe('Groq')
})
test('Groq + llama-3.3-70b-versatile labels as Groq', () => {
setupOpenAIMode('https://api.groq.com/openai/v1', 'llama-3.3-70b-versatile')
expect(detectProvider().name).toBe('Groq')
})
test('Azure + any deepseek deployment labels as Azure OpenAI', () => {
setupOpenAIMode('https://my-resource.openai.azure.com/', 'deepseek-chat')
expect(detectProvider().name).toBe('Azure OpenAI')
})
})
// --- Direct vendor endpoints still label correctly (regression) ---
describe('detectProvider — direct vendor endpoints', () => {
test('api.deepseek.com labels as DeepSeek', () => {
setupOpenAIMode('https://api.deepseek.com/v1', 'deepseek-chat')
expect(detectProvider().name).toBe('DeepSeek')
})
test('api.kimi.com labels as Moonshot AI - Kimi Code', () => {
setupOpenAIMode('https://api.kimi.com/coding/v1', 'kimi-for-coding')
expect(detectProvider().name).toBe('Moonshot AI - Kimi Code')
})
test('api.moonshot.cn labels as Moonshot AI - API', () => {
setupOpenAIMode('https://api.moonshot.cn/v1', 'moonshot-v1-8k')
expect(detectProvider().name).toBe('Moonshot AI - API')
})
test('api.mistral.ai labels as Mistral', () => {
setupOpenAIMode('https://api.mistral.ai/v1', 'mistral-large-latest')
expect(detectProvider().name).toBe('Mistral')
})
test('api.z.ai labels as Z.AI GLM', () => {
setupOpenAIMode('https://api.z.ai/api/coding/paas/v4', 'GLM-5.1')
expect(detectProvider().name).toBe('Z.AI - GLM')
})
test('default OpenAI URL + gpt-4o labels as OpenAI', () => {
setupOpenAIMode('https://api.openai.com/v1', 'gpt-4o')
expect(detectProvider().name).toBe('OpenAI')
})
})
// --- rawModel fallback for generic/custom endpoints ---
describe('detectProvider — rawModel fallback when URL is generic', () => {
test('custom proxy + deepseek-chat falls back to DeepSeek', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'deepseek-chat')
expect(detectProvider().name).toBe('DeepSeek')
})
test('custom proxy + kimi-for-coding falls back to Moonshot AI - Kimi Code', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'kimi-for-coding')
expect(detectProvider().name).toBe('Moonshot AI - Kimi Code')
})
test('custom proxy + kimi-k2 falls back to Moonshot AI - API', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'kimi-k2-instruct')
expect(detectProvider().name).toBe('Moonshot AI - API')
})
test('custom proxy + llama-3.3 falls back to Meta Llama', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'llama-3.3-70b')
expect(detectProvider().name).toBe('Meta Llama')
})
test('custom proxy + mistral-large falls back to Mistral', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'mistral-large-latest')
expect(detectProvider().name).toBe('Mistral')
})
test('custom proxy + exact uppercase GLM ID falls back to Z.AI GLM', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'GLM-5.1')
expect(detectProvider().name).toBe('Z.AI - GLM')
})
test('custom proxy + lowercase glm ID stays generic OpenAI', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'glm-5.1')
expect(detectProvider().name).toBe('OpenAI')
})
test('DashScope lowercase glm ID is not mislabeled as Z.AI', () => {
setupOpenAIMode('https://dashscope.aliyuncs.com/compatible-mode/v1', 'glm-5.1')
expect(detectProvider().name).toBe('OpenAI')
})
})
// --- Explicit env flags win over URL heuristics ---
describe('detectProvider — explicit dedicated-provider env flags', () => {
test('NVIDIA_NIM=1 overrides aggregator URL', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'some-nim-model')
process.env.NVIDIA_NIM = '1'
expect(detectProvider().name).toBe('NVIDIA NIM')
})
test('MINIMAX_API_KEY overrides aggregator URL', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'any-model')
process.env.MINIMAX_API_KEY = 'test-key'
expect(detectProvider().name).toBe('MiniMax')
})
})
// --- modelOverride from --model flag ---
describe('detectProvider — modelOverride from --model flag', () => {
test('modelOverride overrides default Anthropic model', () => {
const result = detectProvider('claude-opus-4-6')
expect(result.name).toBe('Anthropic')
expect(result.model).toContain('opus')
})
test('modelOverride alias is resolved for Anthropic', () => {
const result = detectProvider('opus')
expect(result.name).toBe('Anthropic')
expect(result.model).toContain('opus')
})
test('modelOverride takes priority over ANTHROPIC_MODEL env var', () => {
process.env.ANTHROPIC_MODEL = 'claude-haiku-4-5-20251001'
const result = detectProvider('claude-opus-4-6')
expect(result.name).toBe('Anthropic')
expect(result.model).toContain('opus')
})
test('modelOverride takes priority over CLAUDE_MODEL env var', () => {
process.env.CLAUDE_MODEL = 'claude-haiku-4-5-20251001'
const result = detectProvider('claude-opus-4-6')
expect(result.name).toBe('Anthropic')
expect(result.model).toContain('opus')
})
test('modelOverride works for OpenAI provider', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_API_KEY = 'test-key'
process.env.OPENAI_MODEL = 'gpt-4o'
const result = detectProvider('gpt-4-turbo')
expect(result.model).toContain('gpt-4-turbo')
})
test('modelOverride works for Gemini provider', () => {
process.env.CLAUDE_CODE_USE_GEMINI = '1'
const result = detectProvider('gemini-2.5-pro')
expect(result.model).toBe('gemini-2.5-pro')
})
test('modelOverride works for Mistral provider', () => {
process.env.CLAUDE_CODE_USE_MISTRAL = '1'
const result = detectProvider('mistral-large-latest')
expect(result.model).toBe('mistral-large-latest')
})
test('modelOverride works for GitHub provider', () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1'
const result = detectProvider('gpt-4o')
expect(result.model).toContain('gpt-4o')
})
test('undefined modelOverride preserves default behavior', () => {
const result = detectProvider(undefined)
expect(result.name).toBe('Anthropic')
expect(result.model).toContain('sonnet')
})
test('no argument preserves default behavior', () => {
const result = detectProvider()
expect(result.name).toBe('Anthropic')
expect(result.model).toContain('sonnet')
})
})

View File

@@ -9,7 +9,6 @@ import { isLocalProviderUrl, resolveProviderRequest } from '../services/api/prov
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
import { parseUserSpecifiedModel } from '../utils/model/model.js'
import { containsExactZaiGlmModelId, isZaiBaseUrl } from '../utils/zaiProvider.js'
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
@@ -84,33 +83,33 @@ const LOGO_CLAUDE = [
// ─── Provider detection ───────────────────────────────────────────────────────
export function detectProvider(modelOverride?: string): { name: string; model: string; baseUrl: string; isLocal: boolean } {
function detectProvider(): { name: string; model: string; baseUrl: string; isLocal: boolean } {
const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true'
const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true'
const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true'
const useMistral = process.env.CLAUDE_CODE_USE_MISTRAL === '1' || process.env.CLAUDE_CODE_USE_MISTRAL === 'true'
if (useGemini) {
const model = modelOverride || process.env.GEMINI_MODEL || 'gemini-2.0-flash'
const model = process.env.GEMINI_MODEL || 'gemini-2.0-flash'
const baseUrl = process.env.GEMINI_BASE_URL || 'https://generativelanguage.googleapis.com/v1beta/openai'
return { name: 'Google Gemini', model, baseUrl, isLocal: false }
}
if (useMistral) {
const model = modelOverride || process.env.MISTRAL_MODEL || 'devstral-latest'
const model = process.env.MISTRAL_MODEL || 'devstral-latest'
const baseUrl = process.env.MISTRAL_BASE_URL || 'https://api.mistral.ai/v1'
return { name: 'Mistral', model, baseUrl, isLocal: false }
}
if (useGithub) {
const model = modelOverride || process.env.OPENAI_MODEL || 'github:copilot'
const model = process.env.OPENAI_MODEL || 'github:copilot'
const baseUrl =
process.env.OPENAI_BASE_URL || 'https://api.githubcopilot.com'
return { name: 'GitHub Copilot', model, baseUrl, isLocal: false }
}
if (useOpenAI) {
const rawModel = modelOverride || process.env.OPENAI_MODEL || 'gpt-4o'
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const resolvedRequest = resolveProviderRequest({
model: rawModel,
baseUrl: process.env.OPENAI_BASE_URL,
@@ -118,44 +117,30 @@ export function detectProvider(modelOverride?: string): { name: string; model: s
const baseUrl = resolvedRequest.baseUrl
const isLocal = isLocalProviderUrl(baseUrl)
let name = 'OpenAI'
// Explicit dedicated-provider env flags win.
if (process.env.NVIDIA_NIM) name = 'NVIDIA NIM'
else if (process.env.MINIMAX_API_KEY) name = 'MiniMax'
else if (
resolvedRequest.transport === 'codex_responses' ||
baseUrl.includes('chatgpt.com/backend-api/codex')
)
if (/nvidia/i.test(baseUrl) || /nvidia/i.test(rawModel) || process.env.NVIDIA_NIM)
name = 'NVIDIA NIM'
else if (/minimax/i.test(baseUrl) || /minimax/i.test(rawModel) || process.env.MINIMAX_API_KEY)
name = 'MiniMax'
else if (resolvedRequest.transport === 'codex_responses' || baseUrl.includes('chatgpt.com/backend-api/codex'))
name = 'Codex'
// Base URL is authoritative — must precede rawModel checks so aggregators
// (OpenRouter/Together/Groq) aren't mislabelled as DeepSeek/Kimi/etc.
// when routed to models whose IDs contain a vendor prefix. See issue #855.
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
else if (/together/i.test(baseUrl)) name = 'Together AI'
else if (/groq/i.test(baseUrl)) name = 'Groq'
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
else if (/nvidia/i.test(baseUrl)) name = 'NVIDIA NIM'
else if (/minimax/i.test(baseUrl)) name = 'MiniMax'
else if (/api\.kimi\.com/i.test(baseUrl)) name = 'Moonshot AI - Kimi Code'
else if (/moonshot/i.test(baseUrl)) name = 'Moonshot AI - API'
else if (/deepseek/i.test(baseUrl)) name = 'DeepSeek'
else if (/x\.ai/i.test(baseUrl)) name = 'xAI'
else if (isZaiBaseUrl(baseUrl)) name = 'Z.AI - GLM'
else if (/mistral/i.test(baseUrl)) name = 'Mistral'
// rawModel fallback — fires only when base URL is generic/custom.
else if (/nvidia/i.test(rawModel)) name = 'NVIDIA NIM'
else if (/minimax/i.test(rawModel)) name = 'MiniMax'
else if (/\bkimi-for-coding\b/i.test(rawModel))
name = 'Moonshot AI - Kimi Code'
else if (/\bkimi-k/i.test(rawModel) || /moonshot/i.test(rawModel))
name = 'Moonshot AI - API'
else if (/deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/grok/i.test(rawModel)) name = 'xAI'
else if (containsExactZaiGlmModelId(rawModel)) name = 'Z.AI - GLM'
else if (/mistral/i.test(rawModel)) name = 'Mistral'
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
else if (/bankr/i.test(baseUrl)) name = 'Bankr'
else if (/bankr/i.test(rawModel)) name = 'Bankr'
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
else if (/moonshot/i.test(baseUrl) || /kimi/i.test(rawModel))
name = 'Moonshot (Kimi)'
else if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel))
name = 'DeepSeek'
else if (/openrouter/i.test(baseUrl))
name = 'OpenRouter'
else if (/together/i.test(baseUrl))
name = 'Together AI'
else if (/groq/i.test(baseUrl))
name = 'Groq'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel))
name = 'Mistral'
else if (/azure/i.test(baseUrl))
name = 'Azure OpenAI'
else if (/llama/i.test(rawModel))
name = 'Meta Llama'
else if (isLocal)
name = getLocalOpenAICompatibleProviderLabel(baseUrl)
// Resolve model alias to actual model name + reasoning effort
let displayModel = resolvedRequest.resolvedModel
@@ -168,7 +153,7 @@ export function detectProvider(modelOverride?: string): { name: string; model: s
// Default: Anthropic - check settings.model first, then env vars
const settings = getSettings_DEPRECATED() || {}
const modelSetting = modelOverride || settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
const modelSetting = settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
const resolvedModel = parseUserSpecifiedModel(modelSetting)
const baseUrl = process.env.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com'
const isLocal = isLocalProviderUrl(baseUrl)
@@ -184,11 +169,11 @@ function boxRow(content: string, width: number, rawLen: number): string {
// ─── Main ─────────────────────────────────────────────────────────────────────
export function printStartupScreen(modelOverride?: string): void {
export function printStartupScreen(): void {
// Skip in non-interactive / CI / print mode
if (process.env.CI || !process.stdout.isTTY) return
const p = detectProvider(modelOverride)
const p = detectProvider()
const W = 62
const out: string[] = []

View File

@@ -94,7 +94,7 @@ export function Stats(t0) {
const allTimePromise = t1;
let t2;
if ($[1] === Symbol.for("react.memo_cache_sentinel")) {
t2 = <Box marginTop={1}><Spinner /><Text> Loading your OpenClaude stats</Text></Box>;
t2 = <Box marginTop={1}><Spinner /><Text> Loading your Claude Code stats</Text></Box>;
$[1] = t2;
} else {
t2 = $[1];
@@ -242,7 +242,7 @@ function StatsContent(t0) {
if (allTimeResult.type === "empty") {
let t7;
if ($[15] === Symbol.for("react.memo_cache_sentinel")) {
t7 = <Box marginTop={1}><Text color="warning">No stats available yet. Start using OpenClaude!</Text></Box>;
t7 = <Box marginTop={1}><Text color="warning">No stats available yet. Start using Claude Code!</Text></Box>;
$[15] = t7;
} else {
t7 = $[15];

View File

@@ -73,7 +73,7 @@ export function TeleportRepoMismatchDialog(t0) {
const options = t2;
let t3;
if ($[8] !== availablePaths.length || $[9] !== errorMessage || $[10] !== handleChange || $[11] !== options || $[12] !== targetRepo || $[13] !== validating) {
t3 = availablePaths.length > 0 ? <><Box flexDirection="column" gap={1}>{errorMessage && <Text color="error">{errorMessage}</Text>}<Text>Open OpenClaude in <Text bold={true}>{targetRepo}</Text>:</Text></Box>{validating ? <Box><Spinner /><Text> Validating repository</Text></Box> : <Select options={options} onChange={value_0 => void handleChange(value_0)} />}</> : <Box flexDirection="column" gap={1}>{errorMessage && <Text color="error">{errorMessage}</Text>}<Text dimColor={true}>Run openclaude --teleport from a checkout of {targetRepo}</Text></Box>;
t3 = availablePaths.length > 0 ? <><Box flexDirection="column" gap={1}>{errorMessage && <Text color="error">{errorMessage}</Text>}<Text>Open Claude Code in <Text bold={true}>{targetRepo}</Text>:</Text></Box>{validating ? <Box><Spinner /><Text> Validating repository</Text></Box> : <Select options={options} onChange={value_0 => void handleChange(value_0)} />}</> : <Box flexDirection="column" gap={1}>{errorMessage && <Text color="error">{errorMessage}</Text>}<Text dimColor={true}>Run claude --teleport from a checkout of {targetRepo}</Text></Box>;
$[8] = availablePaths.length;
$[9] = errorMessage;
$[10] = handleChange;

View File

@@ -206,7 +206,7 @@ export function TrustDialog(t0) {
if ($[20] === Symbol.for("react.memo_cache_sentinel")) {
t16 = <Text bold={true}>{getFsImplementation().cwd()}</Text>;
t17 = <Text>Quick safety check: Is this a project you created or one you trust? (Like your own code, a well-known open source project, or work from your team). If not, take a moment to review what{"'"}s in this folder first.</Text>;
t18 = <Text>OpenClaude{"'"}ll be able to read, edit, and execute files here.</Text>;
t18 = <Text>Claude Code{"'"}ll be able to read, edit, and execute files here.</Text>;
$[20] = t16;
$[21] = t17;
$[22] = t18;

View File

@@ -254,7 +254,7 @@ function ElicitationFormDialog({
// Text fields are always in edit mode when focused — no Enter-to-edit step.
const isEditingTextField = currentFieldIsText && !focusedButton;
useRegisterOverlay('elicitation');
useNotifyAfterTimeout('OpenClaude needs your input', 'elicitation_dialog');
useNotifyAfterTimeout('Claude Code needs your input', 'elicitation_dialog');
// Sync textInputValue when the focused field changes
const syncTextInput = useCallback((fieldIndex: number | undefined) => {
@@ -1004,7 +1004,7 @@ function ElicitationURLDialog({
const phaseRef = useRef<'prompt' | 'waiting'>('prompt');
const [focusedButton, setFocusedButton] = useState<'accept' | 'decline' | 'open' | 'action' | 'cancel'>('accept');
const showCancel = waitingState?.showCancel ?? false;
useNotifyAfterTimeout('OpenClaude needs your input', 'elicitation_url_dialog');
useNotifyAfterTimeout('Claude Code needs your input', 'elicitation_url_dialog');
useRegisterOverlay('elicitation-url');
// Keep refs in sync for use in abort handler (avoids re-registering listener)

View File

@@ -102,9 +102,9 @@ export function MCPRemoteServerMenu({
if (success) {
onComplete?.(`Authentication successful. Connected to ${server.name}.`);
} else if (result.client.type === 'needs-auth') {
onComplete?.('Authentication successful, but server still requires authentication. You may need to manually restart OpenClaude.');
onComplete?.('Authentication successful, but server still requires authentication. You may need to manually restart Claude Code.');
} else {
onComplete?.('Authentication successful, but server reconnection failed. You may need to manually restart OpenClaude for the changes to take effect.');
onComplete?.('Authentication successful, but server reconnection failed. You may need to manually restart Claude Code for the changes to take effect.');
}
} catch (err) {
logEvent('tengu_claudeai_mcp_auth_completed', {
@@ -281,11 +281,11 @@ export function MCPRemoteServerMenu({
const message = isEffectivelyAuthenticated ? `Authentication successful. Reconnected to ${server.name}.` : `Authentication successful. Connected to ${server.name}.`;
onComplete?.(message);
} else if (result_0.client.type === 'needs-auth') {
onComplete?.('Authentication successful, but server still requires authentication. You may need to manually restart OpenClaude.');
onComplete?.('Authentication successful, but server still requires authentication. You may need to manually restart Claude Code.');
} else {
// result.client.type === 'failed'
logMCPDebug(server.name, `Reconnection failed after authentication`);
onComplete?.('Authentication successful, but server reconnection failed. You may need to manually restart OpenClaude for the changes to take effect.');
onComplete?.('Authentication successful, but server reconnection failed. You may need to manually restart Claude Code for the changes to take effect.');
}
}
} catch (err_1) {

View File

@@ -147,7 +147,7 @@ export function MCPSettings(t0) {
return;
}
if (servers.length === 0 && agentMcpServers.length === 0) {
onComplete("No MCP servers configured. Please run /doctor if this is unexpected. Otherwise, run `openclaude mcp --help` or visit https://github.com/Gitlawb/openclaude to learn more.");
onComplete("No MCP servers configured. Please run /doctor if this is unexpected. Otherwise, run `claude mcp --help` or visit https://code.claude.com/docs/en/mcp to learn more.");
}
};
t8 = [servers.length, filteredClients.length, agentMcpServers.length, onComplete];

View File

@@ -161,7 +161,7 @@ function ComputerUseTccPanel(t0) {
}
let t7;
if ($[15] === Symbol.for("react.memo_cache_sentinel")) {
t7 = <Text dimColor={true}>Grant the missing permissions in System Settings, then select "Try again". macOS may require you to restart OpenClaude after granting Screen Recording.</Text>;
t7 = <Text dimColor={true}>Grant the missing permissions in System Settings, then select "Try again". macOS may require you to restart Claude Code after granting Screen Recording.</Text>;
$[15] = t7;
} else {
t7 = $[15];

View File

@@ -730,7 +730,7 @@ export function buildPlanApprovalOptions({
});
if (showUltraplan) {
options.push({
label: 'No, refine with Ultraplan on OpenClaude on the web',
label: 'No, refine with Ultraplan on Claude Code on the web',
value: 'ultraplan'
});
}

View File

@@ -128,18 +128,18 @@ export type ToolUseConfirm<Input extends AnyObject = AnyObject> = {
function getNotificationMessage(toolUseConfirm: ToolUseConfirm): string {
const toolName = toolUseConfirm.tool.userFacingName(toolUseConfirm.input as never);
if (toolUseConfirm.tool === ExitPlanModeV2Tool) {
return 'OpenClaude needs your approval for the plan';
return 'Claude Code needs your approval for the plan';
}
if (toolUseConfirm.tool === EnterPlanModeTool) {
return 'OpenClaude wants to enter plan mode';
return 'Claude Code wants to enter plan mode';
}
if (feature('REVIEW_ARTIFACT') && toolUseConfirm.tool === ReviewArtifactTool) {
return 'OpenClaude needs your approval for a review artifact';
return 'Claude needs your approval for a review artifact';
}
if (!toolName || toolName.trim() === '') {
return 'OpenClaude needs your attention';
return 'Claude Code needs your attention';
}
return `OpenClaude needs your permission to use ${toolName}`;
return `Claude needs your permission to use ${toolName}`;
}
// TODO: Move this to Tool.renderPermissionRequest

View File

@@ -40,7 +40,7 @@ function PermissionDescription() {
const $ = _c(1);
let t0;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
t0 = <Text dimColor={true}>OpenClaude will be able to read files in this directory and make edits when auto-accept edits is on.</Text>;
t0 = <Text dimColor={true}>Claude Code will be able to read files in this directory and make edits when auto-accept edits is on.</Text>;
$[0] = t0;
} else {
t0 = $[0];

View File

@@ -388,9 +388,9 @@ function PermissionRulesTab(t0) {
let t8;
if ($[10] === Symbol.for("react.memo_cache_sentinel")) {
t8 = {
allow: "OpenClaude won't ask before using allowed tools.",
ask: "OpenClaude will always ask for confirmation before using these tools.",
deny: "OpenClaude will always reject requests to use denied tools."
allow: "Claude Code won't ask before using allowed tools.",
ask: "Claude Code will always ask for confirmation before using these tools.",
deny: "Claude Code will always reject requests to use denied tools."
};
$[10] = t8;
} else {
@@ -1098,7 +1098,7 @@ export function PermissionRuleList(t0) {
}
let t28;
if ($[89] === Symbol.for("react.memo_cache_sentinel")) {
t28 = <Text>OpenClaude can read files in the workspace, and make edits when auto-accept edits is on.</Text>;
t28 = <Text>Claude Code can read files in the workspace, and make edits when auto-accept edits is on.</Text>;
$[89] = t28;
} else {
t28 = $[89];

View File

@@ -68,7 +68,7 @@ export function RemoveWorkspaceDirectory(t0) {
}
let t4;
if ($[10] === Symbol.for("react.memo_cache_sentinel")) {
t4 = <Text>OpenClaude will no longer have access to files in this directory.</Text>;
t4 = <Text>Claude Code will no longer have access to files in this directory.</Text>;
$[10] = t4;
} else {
t4 = $[10];

View File

@@ -44,7 +44,7 @@ type Props = {
export function formatToolUseSummary(name: string, input: unknown): string {
// plan_ready phase is only reached via ExitPlanMode tool
if (name === EXIT_PLAN_MODE_V2_TOOL_NAME) {
return 'Review the plan in OpenClaude on the web';
return 'Review the plan in Claude Code on the web';
}
if (!input || typeof input !== 'object') return name;
// AskUserQuestion: show the question text as a CTA, not the tool name.
@@ -168,7 +168,7 @@ function UltraplanSessionDetail(t0) {
}
let t7;
if ($[12] === Symbol.for("react.memo_cache_sentinel")) {
t7 = <Text dimColor={true}>This will terminate the OpenClaude on the web session.</Text>;
t7 = <Text dimColor={true}>This will terminate the Claude Code on the web session.</Text>;
$[12] = t7;
} else {
t7 = $[12];
@@ -311,7 +311,7 @@ function UltraplanSessionDetail(t0) {
let t19;
if ($[47] === Symbol.for("react.memo_cache_sentinel")) {
t19 = {
label: "Review in OpenClaude on the web",
label: "Review in Claude Code on the web",
value: "open" as const
};
$[47] = t19;
@@ -595,13 +595,13 @@ function ReviewSessionDetail(t0) {
let t3;
if ($[11] !== completed || $[12] !== onKill || $[13] !== running) {
t3 = completed ? [{
label: "Open in OpenClaude on the web",
label: "Open in Claude Code on the web",
value: "open"
}, {
label: "Dismiss",
value: "dismiss"
}] : [{
label: "Open in OpenClaude on the web",
label: "Open in Claude Code on the web",
value: "open"
}, ...(onKill && running ? [{
label: "Stop ultrareview",

View File

@@ -11,7 +11,6 @@ import { afterEach, expect, test } from 'bun:test'
NATIVE_PACKAGE_URL: undefined,
}
import { clearSystemPromptSections } from './systemPromptSections.js'
import { getSystemPrompt, DEFAULT_AGENT_PROMPT } from './prompts.js'
import { CLI_SYSPROMPT_PREFIXES, getCLISyspromptPrefix } from './system.js'
import { CLAUDE_CODE_GUIDE_AGENT } from '../tools/AgentTool/built-in/claudeCodeGuideAgent.js'
@@ -24,7 +23,6 @@ const originalSimpleEnv = process.env.CLAUDE_CODE_SIMPLE
afterEach(() => {
process.env.CLAUDE_CODE_SIMPLE = originalSimpleEnv
clearSystemPromptSections()
})
test('CLI identity prefixes describe OpenClaude instead of Claude Code', () => {
@@ -49,21 +47,6 @@ test('simple mode identity describes OpenClaude instead of Claude Code', async (
expect(prompt[0]).not.toContain("Anthropic's official CLI for Claude")
})
test('system prompt model identity updates when model changes mid-session', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
clearSystemPromptSections()
const firstPrompt = await getSystemPrompt([], 'old-test-model')
const secondPrompt = await getSystemPrompt([], 'new-test-model')
const firstText = firstPrompt.join('\n')
const secondText = secondPrompt.join('\n')
expect(firstText).toContain('You are powered by the model old-test-model.')
expect(secondText).toContain('You are powered by the model new-test-model.')
expect(secondText).not.toContain('You are powered by the model old-test-model.')
})
test('built-in agent prompts describe OpenClaude instead of Claude Code', () => {
expect(DEFAULT_AGENT_PROMPT).toContain('OpenClaude')
expect(DEFAULT_AGENT_PROMPT).not.toContain('Claude Code')

View File

@@ -496,7 +496,7 @@ ${CYBER_RISK_INSTRUCTION}`,
systemPromptSection('ant_model_override', () =>
getAntModelOverrideSection(),
),
systemPromptSection(`env_info_simple:${model}`, () =>
systemPromptSection('env_info_simple', () =>
computeSimpleEnvInfo(model, additionalWorkingDirectories),
),
systemPromptSection('language', () =>
@@ -519,7 +519,7 @@ ${CYBER_RISK_INSTRUCTION}`,
'MCP servers connect/disconnect between turns',
),
systemPromptSection('scratchpad', () => getScratchpadInstructions()),
systemPromptSection(`frc:${model}`, () => getFunctionResultClearingSection(model)),
systemPromptSection('frc', () => getFunctionResultClearingSection(model)),
systemPromptSection(
'summarize_tool_results',
() => SUMMARIZE_TOOL_RESULTS_SECTION,

View File

@@ -1,84 +0,0 @@
import { describe, expect, test } from 'bun:test'
// The feature() function from bun:bundle is shimmed at build time.
// In tests, it's not available, so we test the getRepoMapContext logic
// by importing and calling it directly — the function checks feature('REPO_MAP')
// which in the test environment (no bun:bundle shim) will throw or return false.
// We test the actual logic paths through integration-style tests.
describe('getRepoMapContext', () => {
test('returns null when REPO_MAP flag is off (default)', async () => {
// In the test environment, feature('REPO_MAP') is not shimmed,
// so the function should return null or handle the missing shim gracefully.
// We test this by calling buildRepoMap directly and verifying the context
// integration pattern works.
// The feature flag is off by default (false in scripts/build.ts),
// so in production getRepoMapContext returns null.
// In tests, we verify the module exports correctly.
const { getRepoMapContext } = await import('./context.js')
expect(typeof getRepoMapContext).toBe('function')
})
test('buildRepoMap produces valid output for context injection', async () => {
const { mkdtempSync, writeFileSync, rmSync } = await import('fs')
const { tmpdir } = await import('os')
const { join } = await import('path')
const { buildRepoMap } = await import('./context/repoMap/index.js')
const tempDir = mkdtempSync(join(tmpdir(), 'repomap-ctx-'))
try {
writeFileSync(
join(tempDir, 'main.ts'),
'export function main(): void { console.log("hello") }\n',
)
writeFileSync(
join(tempDir, 'utils.ts'),
'import { main } from "./main"\nexport function helper(): void { main() }\n',
)
const result = await buildRepoMap({
root: tempDir,
maxTokens: 1024,
})
// Valid map that could be injected
expect(result.map.length).toBeGreaterThan(0)
expect(result.tokenCount).toBeGreaterThan(0)
expect(result.tokenCount).toBeLessThanOrEqual(1024)
expect(typeof result.cacheHit).toBe('boolean')
} finally {
rmSync(tempDir, { recursive: true, force: true })
const { invalidateCache } = await import('./context/repoMap/index.js')
invalidateCache(tempDir)
}
})
test('getSystemContext does not include repoMap key when flag is off', async () => {
// In test environment, feature() is not available from bun:bundle,
// which means getRepoMapContext will either return null or throw.
// Either way, repoMap should NOT appear in the system context.
// We verify the structural contract: getSystemContext returns an object
// without a repoMap key when the feature is disabled.
// Since we can't mock bun:bundle in tests, we verify the contract
// by checking that buildRepoMap output is properly gated.
const { buildRepoMap } = await import('./context/repoMap/index.js')
// The function works standalone
const result = await buildRepoMap({ maxTokens: 256 })
expect(typeof result.map).toBe('string')
// But the injection in getSystemContext is gated behind feature('REPO_MAP')
// which is false by default — verified by the feature flag test below
})
})
describe('REPO_MAP feature flag', () => {
test('flag defaults to false in build config', async () => {
const { readFileSync } = await import('fs')
const buildScript = readFileSync('scripts/build.ts', 'utf-8')
// Verify the flag exists and is set to false
expect(buildScript).toContain('REPO_MAP: false')
})
})

View File

@@ -31,7 +31,6 @@ export function setSystemPromptInjection(value: string | null): void {
// Clear context caches immediately when injection changes
getUserContext.cache.clear?.()
getSystemContext.cache.clear?.()
getRepoMapContext.cache.clear?.()
}
export const getGitStatus = memoize(async (): Promise<string | null> => {
@@ -111,37 +110,6 @@ export const getGitStatus = memoize(async (): Promise<string | null> => {
}
})
export const getRepoMapContext = memoize(
async (): Promise<string | null> => {
// Enable via compile-time feature flag OR runtime env var.
// The runtime env var lets users enable auto-injection without rebuilding.
const runtimeEnabled = isEnvTruthy(process.env.REPO_MAP)
if (!feature('REPO_MAP') && !runtimeEnabled) return null
if (isBareMode()) return null
if (isEnvTruthy(process.env.CLAUDE_CODE_REMOTE)) return null
try {
const startTime = Date.now()
logForDiagnosticsNoPII('info', 'repo_map_started')
const { buildRepoMap } = await import('./context/repoMap/index.js')
const result = await buildRepoMap({ maxTokens: 1024 })
logForDiagnosticsNoPII('info', 'repo_map_completed', {
duration_ms: Date.now() - startTime,
token_count: result.tokenCount,
file_count: result.fileCount,
cache_hit: result.cacheHit,
})
if (!result.map || result.map.length === 0) return null
return `This is a structural map of the repository, ranked by importance. Use it to understand the codebase architecture.\n\n${result.map}`
} catch (err) {
logForDiagnosticsNoPII('warn', 'repo_map_failed', {
error: String(err),
})
return null
}
},
)
/**
* This context is prepended to each conversation, and cached for the duration of the conversation.
*/
@@ -159,9 +127,6 @@ export const getSystemContext = memoize(
? null
: await getGitStatus()
// Build repo map in parallel with other context (memoized, so cheap on repeat)
const repoMap = await getRepoMapContext()
// Include system prompt injection if set (for cache breaking, internal-only)
const injection = feature('BREAK_CACHE_COMMAND')
? getSystemPromptInjection()
@@ -170,13 +135,11 @@ export const getSystemContext = memoize(
logForDiagnosticsNoPII('info', 'system_context_completed', {
duration_ms: Date.now() - startTime,
has_git_status: gitStatus !== null,
has_repo_map: repoMap !== null,
has_injection: injection !== null,
})
return {
...(gitStatus && { gitStatus }),
...(repoMap && { repoMap }),
...(feature('BREAK_CACHE_COMMAND') && injection
? {
cacheBreaker: `[CACHE_BREAKER: ${injection}]`,

View File

@@ -1,29 +0,0 @@
// fileA — imports from fileB and fileC
import { CacheLayer, buildCache } from './fileB'
import { createStore, type StoreConfig } from './fileC'
export class AppController {
private cache: CacheLayer
private config: StoreConfig
constructor(config: StoreConfig) {
this.cache = buildCache()
this.config = config
}
initialize(): void {
const store = createStore()
this.cache.cacheSet('primary', store)
}
getFromCache(key: string): unknown {
return this.cache.cacheGet(key)
}
}
export function startApp(config: StoreConfig): AppController {
const app = new AppController(config)
app.initialize()
return app
}

View File

@@ -1,23 +0,0 @@
// fileB — imports from fileC
import { DataStore, createStore } from './fileC'
export class CacheLayer {
private store: DataStore
constructor() {
this.store = createStore()
}
cacheGet(key: string): unknown | undefined {
return this.store.lookup(key)
}
cacheSet(key: string, value: unknown): void {
this.store.add(key, value)
}
}
export function buildCache(): CacheLayer {
return new CacheLayer()
}

Some files were not shown because too many files have changed in this diff Show More