Compare commits

..

3 Commits

Author SHA1 Message Date
gnanam1990
1137b9a037 test: fix Windows clipboard temp path fixture 2026-04-05 17:39:42 +05:30
gnanam1990
54e6df58eb fix: avoid Windows clipboard stdin codepage issues 2026-04-05 17:17:30 +05:30
gnanam1990
7f432fe87d fix: preserve unicode in Windows clipboard fallback 2026-04-05 16:59:06 +05:30
63 changed files with 598 additions and 5025 deletions

3
.gitignore vendored
View File

@@ -6,7 +6,4 @@ dist/
!.env.example !.env.example
.openclaude-profile.json .openclaude-profile.json
reports/ reports/
GEMINI.md
package-lock.json
/.claude
coverage/ coverage/

View File

@@ -185,41 +185,6 @@ With Firecrawl enabled:
Free tier at [firecrawl.dev](https://firecrawl.dev) includes 500 credits. The key is optional. Free tier at [firecrawl.dev](https://firecrawl.dev) includes 500 credits. The key is optional.
---
## Headless gRPC Server
OpenClaude can be run as a headless gRPC service, allowing you to integrate its agentic capabilities (tools, bash, file editing) into other applications, CI/CD pipelines, or custom user interfaces. The server uses bidirectional streaming to send real-time text chunks, tool calls, and request permissions for sensitive commands.
### 1. Start the gRPC Server
Start the core engine as a gRPC service on `localhost:50051`:
```bash
npm run dev:grpc
```
#### Configuration
| Variable | Default | Description |
|-----------|-------------|------------------------------------------------|
| `GRPC_PORT` | `50051` | Port the gRPC server listens on |
| `GRPC_HOST` | `localhost` | Bind address. Use `0.0.0.0` to expose on all interfaces (not recommended without authentication) |
### 2. Run the Test CLI Client
We provide a lightweight CLI client that communicates exclusively over gRPC. It acts just like the main interactive CLI, rendering colors, streaming tokens, and prompting you for tool permissions (y/n) via the gRPC `action_required` event.
In a separate terminal, run:
```bash
npm run dev:grpc:cli
```
*Note: The gRPC definitions are located in `src/proto/openclaude.proto`. You can use this file to generate clients in Python, Go, Rust, or any other language.*
---
## Source Build And Local Development ## Source Build And Local Development
```bash ```bash

128
bun.lock
View File

@@ -13,8 +13,6 @@
"@anthropic-ai/vertex-sdk": "0.14.4", "@anthropic-ai/vertex-sdk": "0.14.4",
"@commander-js/extra-typings": "12.1.0", "@commander-js/extra-typings": "12.1.0",
"@growthbook/growthbook": "1.6.5", "@growthbook/growthbook": "1.6.5",
"@grpc/grpc-js": "^1.14.3",
"@grpc/proto-loader": "^0.8.0",
"@mendable/firecrawl-js": "4.18.1", "@mendable/firecrawl-js": "4.18.1",
"@modelcontextprotocol/sdk": "1.29.0", "@modelcontextprotocol/sdk": "1.29.0",
"@opentelemetry/api": "1.9.1", "@opentelemetry/api": "1.9.1",
@@ -53,7 +51,7 @@
"ignore": "7.0.5", "ignore": "7.0.5",
"indent-string": "5.0.0", "indent-string": "5.0.0",
"jsonc-parser": "3.3.1", "jsonc-parser": "3.3.1",
"lodash-es": "4.18.1", "lodash-es": "4.18.0",
"lru-cache": "11.2.7", "lru-cache": "11.2.7",
"marked": "15.0.12", "marked": "15.0.12",
"p-map": "7.0.4", "p-map": "7.0.4",
@@ -86,14 +84,10 @@
"@types/bun": "1.3.11", "@types/bun": "1.3.11",
"@types/node": "25.5.0", "@types/node": "25.5.0",
"@types/react": "19.2.14", "@types/react": "19.2.14",
"tsx": "^4.21.0",
"typescript": "5.9.3", "typescript": "5.9.3",
}, },
}, },
}, },
"overrides": {
"lodash-es": "4.18.1",
},
"packages": { "packages": {
"@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.3.0", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA=="], "@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.3.0", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA=="],
@@ -187,58 +181,6 @@
"@emnapi/runtime": ["@emnapi/runtime@1.9.2", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw=="], "@emnapi/runtime": ["@emnapi/runtime@1.9.2", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw=="],
"@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.7", "", { "os": "aix", "cpu": "ppc64" }, "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg=="],
"@esbuild/android-arm": ["@esbuild/android-arm@0.27.7", "", { "os": "android", "cpu": "arm" }, "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ=="],
"@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.7", "", { "os": "android", "cpu": "arm64" }, "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ=="],
"@esbuild/android-x64": ["@esbuild/android-x64@0.27.7", "", { "os": "android", "cpu": "x64" }, "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg=="],
"@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.7", "", { "os": "darwin", "cpu": "arm64" }, "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw=="],
"@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.7", "", { "os": "darwin", "cpu": "x64" }, "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ=="],
"@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.7", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w=="],
"@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.7", "", { "os": "freebsd", "cpu": "x64" }, "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ=="],
"@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.7", "", { "os": "linux", "cpu": "arm" }, "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA=="],
"@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A=="],
"@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.7", "", { "os": "linux", "cpu": "ia32" }, "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg=="],
"@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q=="],
"@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw=="],
"@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.7", "", { "os": "linux", "cpu": "ppc64" }, "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ=="],
"@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ=="],
"@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.7", "", { "os": "linux", "cpu": "s390x" }, "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw=="],
"@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.7", "", { "os": "linux", "cpu": "x64" }, "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA=="],
"@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.7", "", { "os": "none", "cpu": "arm64" }, "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w=="],
"@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.7", "", { "os": "none", "cpu": "x64" }, "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw=="],
"@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.7", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A=="],
"@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.7", "", { "os": "openbsd", "cpu": "x64" }, "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg=="],
"@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.7", "", { "os": "none", "cpu": "arm64" }, "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw=="],
"@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.7", "", { "os": "sunos", "cpu": "x64" }, "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA=="],
"@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.7", "", { "os": "win32", "cpu": "arm64" }, "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA=="],
"@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.7", "", { "os": "win32", "cpu": "ia32" }, "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw=="],
"@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.7", "", { "os": "win32", "cpu": "x64" }, "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg=="],
"@growthbook/growthbook": ["@growthbook/growthbook@1.6.5", "", { "dependencies": { "dom-mutator": "^0.6.0" } }, "sha512-mUaMsgeUTpRIUOTn33EUXHRK6j7pxBjwqH4WpQyq+pukjd1AIzWlEa6w7i6bInJUcweGgP2beXZmaP6b6UPn7A=="], "@growthbook/growthbook": ["@growthbook/growthbook@1.6.5", "", { "dependencies": { "dom-mutator": "^0.6.0" } }, "sha512-mUaMsgeUTpRIUOTn33EUXHRK6j7pxBjwqH4WpQyq+pukjd1AIzWlEa6w7i6bInJUcweGgP2beXZmaP6b6UPn7A=="],
"@grpc/grpc-js": ["@grpc/grpc-js@1.14.3", "", { "dependencies": { "@grpc/proto-loader": "^0.8.0", "@js-sdsl/ordered-map": "^4.4.2" } }, "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA=="], "@grpc/grpc-js": ["@grpc/grpc-js@1.14.3", "", { "dependencies": { "@grpc/proto-loader": "^0.8.0", "@js-sdsl/ordered-map": "^4.4.2" } }, "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA=="],
@@ -511,7 +453,7 @@
"cli-highlight": ["cli-highlight@2.1.11", "", { "dependencies": { "chalk": "^4.0.0", "highlight.js": "^10.7.1", "mz": "^2.4.0", "parse5": "^5.1.1", "parse5-htmlparser2-tree-adapter": "^6.0.0", "yargs": "^16.0.0" }, "bin": { "highlight": "bin/highlight" } }, "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg=="], "cli-highlight": ["cli-highlight@2.1.11", "", { "dependencies": { "chalk": "^4.0.0", "highlight.js": "^10.7.1", "mz": "^2.4.0", "parse5": "^5.1.1", "parse5-htmlparser2-tree-adapter": "^6.0.0", "yargs": "^16.0.0" }, "bin": { "highlight": "bin/highlight" } }, "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg=="],
"cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="], "cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="],
"code-excerpt": ["code-excerpt@4.0.0", "", { "dependencies": { "convert-to-spaces": "^2.0.1" } }, "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA=="], "code-excerpt": ["code-excerpt@4.0.0", "", { "dependencies": { "convert-to-spaces": "^2.0.1" } }, "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA=="],
@@ -579,8 +521,6 @@
"es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="], "es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="],
"esbuild": ["esbuild@0.27.7", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.7", "@esbuild/android-arm": "0.27.7", "@esbuild/android-arm64": "0.27.7", "@esbuild/android-x64": "0.27.7", "@esbuild/darwin-arm64": "0.27.7", "@esbuild/darwin-x64": "0.27.7", "@esbuild/freebsd-arm64": "0.27.7", "@esbuild/freebsd-x64": "0.27.7", "@esbuild/linux-arm": "0.27.7", "@esbuild/linux-arm64": "0.27.7", "@esbuild/linux-ia32": "0.27.7", "@esbuild/linux-loong64": "0.27.7", "@esbuild/linux-mips64el": "0.27.7", "@esbuild/linux-ppc64": "0.27.7", "@esbuild/linux-riscv64": "0.27.7", "@esbuild/linux-s390x": "0.27.7", "@esbuild/linux-x64": "0.27.7", "@esbuild/netbsd-arm64": "0.27.7", "@esbuild/netbsd-x64": "0.27.7", "@esbuild/openbsd-arm64": "0.27.7", "@esbuild/openbsd-x64": "0.27.7", "@esbuild/openharmony-arm64": "0.27.7", "@esbuild/sunos-x64": "0.27.7", "@esbuild/win32-arm64": "0.27.7", "@esbuild/win32-ia32": "0.27.7", "@esbuild/win32-x64": "0.27.7" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w=="],
"escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="],
"escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="], "escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="],
@@ -627,8 +567,6 @@
"fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="], "fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="],
"fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
"fuse.js": ["fuse.js@7.1.0", "", {}, "sha512-trLf4SzuuUxfusZADLINj+dE8clK1frKdmqiJNb1Es75fmI5oY6X2mxLVUciLLjxqw/xr72Dhy+lER6dGd02FQ=="], "fuse.js": ["fuse.js@7.1.0", "", {}, "sha512-trLf4SzuuUxfusZADLINj+dE8clK1frKdmqiJNb1Es75fmI5oY6X2mxLVUciLLjxqw/xr72Dhy+lER6dGd02FQ=="],
@@ -647,8 +585,6 @@
"get-stream": ["get-stream@9.0.1", "", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="], "get-stream": ["get-stream@9.0.1", "", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="],
"get-tsconfig": ["get-tsconfig@4.13.7", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q=="],
"google-auth-library": ["google-auth-library@9.15.1", "", { "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", "gaxios": "^6.1.1", "gcp-metadata": "^6.1.0", "gtoken": "^7.0.0", "jws": "^4.0.0" } }, "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng=="], "google-auth-library": ["google-auth-library@9.15.1", "", { "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", "gaxios": "^6.1.1", "gcp-metadata": "^6.1.0", "gtoken": "^7.0.0", "jws": "^4.0.0" } }, "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng=="],
"google-logging-utils": ["google-logging-utils@0.0.2", "", {}, "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ=="], "google-logging-utils": ["google-logging-utils@0.0.2", "", {}, "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ=="],
@@ -721,7 +657,7 @@
"locate-path": ["locate-path@5.0.0", "", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="], "locate-path": ["locate-path@5.0.0", "", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="],
"lodash-es": ["lodash-es@4.18.1", "", {}, "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A=="], "lodash-es": ["lodash-es@4.18.0", "", {}, "sha512-koAgswPPA+UTaPN64Etp+PGP+WT6oqOS2NMi5yDkMaiGw9qY4VxQbQF0mtKMyr4BlTznWyzePV5UpECTJQmSUA=="],
"lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="], "lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="],
@@ -825,8 +761,6 @@
"require-main-filename": ["require-main-filename@2.0.0", "", {}, "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="], "require-main-filename": ["require-main-filename@2.0.0", "", {}, "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="],
"resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="],
"retry": ["retry@0.12.0", "", {}, "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow=="], "retry": ["retry@0.12.0", "", {}, "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow=="],
"router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="], "router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="],
@@ -897,8 +831,6 @@
"tslib": ["tslib@1.14.1", "", {}, "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="], "tslib": ["tslib@1.14.1", "", {}, "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="],
"tsx": ["tsx@4.21.0", "", { "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "bin": { "tsx": "dist/cli.mjs" } }, "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw=="],
"turndown": ["turndown@7.2.2", "", { "dependencies": { "@mixmark-io/domino": "^2.2.0" } }, "sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ=="], "turndown": ["turndown@7.2.2", "", { "dependencies": { "@mixmark-io/domino": "^2.2.0" } }, "sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ=="],
"type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="], "type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="],
@@ -949,9 +881,9 @@
"yaml": ["yaml@2.8.3", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg=="], "yaml": ["yaml@2.8.3", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg=="],
"yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="], "yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
"yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="], "yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="],
"yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="], "yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="],
@@ -959,6 +891,8 @@
"zod-to-json-schema": ["zod-to-json-schema@3.25.2", "", { "peerDependencies": { "zod": "^3.25.28 || ^4" } }, "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA=="], "zod-to-json-schema": ["zod-to-json-schema@3.25.2", "", { "peerDependencies": { "zod": "^3.25.28 || ^4" } }, "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA=="],
"@anthropic-ai/sandbox-runtime/lodash-es": ["lodash-es@4.17.23", "", {}, "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="],
"@aws-crypto/crc32/@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="], "@aws-crypto/crc32/@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="],
"@aws-crypto/crc32/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], "@aws-crypto/crc32/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
@@ -1151,6 +1085,8 @@
"@emnapi/runtime/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], "@emnapi/runtime/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
"@grpc/proto-loader/yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core": ["@opentelemetry/core@1.30.1", "", { "dependencies": { "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ=="], "@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core": ["@opentelemetry/core@1.30.1", "", { "dependencies": { "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ=="],
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-exporter-base": ["@opentelemetry/otlp-exporter-base@0.57.2", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/otlp-transformer": "0.57.2" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-XdxEzL23Urhidyebg5E6jZoaiW5ygP/mRjxLHixogbqwDy2Faduzb5N0o/Oi+XTIJu+iyxXdVORjXax+Qgfxag=="], "@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-exporter-base": ["@opentelemetry/otlp-exporter-base@0.57.2", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/otlp-transformer": "0.57.2" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-XdxEzL23Urhidyebg5E6jZoaiW5ygP/mRjxLHixogbqwDy2Faduzb5N0o/Oi+XTIJu+iyxXdVORjXax+Qgfxag=="],
@@ -1369,8 +1305,6 @@
"cli-highlight/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "cli-highlight/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
"cli-highlight/yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
"cliui/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], "cliui/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
"cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], "cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
@@ -1425,6 +1359,12 @@
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from": ["@smithy/util-buffer-from@4.2.2", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q=="], "@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from": ["@smithy/util-buffer-from@4.2.2", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q=="],
"@grpc/proto-loader/yargs/cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
"@grpc/proto-loader/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
"@grpc/proto-loader/yargs/yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="], "@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-transformer/@opentelemetry/api-logs": ["@opentelemetry/api-logs@0.57.2", "", { "dependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A=="], "@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-transformer/@opentelemetry/api-logs": ["@opentelemetry/api-logs@0.57.2", "", { "dependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A=="],
@@ -1491,12 +1431,6 @@
"cli-highlight/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "cli-highlight/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
"cli-highlight/yargs/cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="],
"cli-highlight/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
"cli-highlight/yargs/yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="],
"cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], "cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
"cliui/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], "cliui/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
@@ -1537,6 +1471,16 @@
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@4.2.2", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow=="], "@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@4.2.2", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow=="],
"@grpc/proto-loader/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
"@grpc/proto-loader/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
"@grpc/proto-loader/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
"@grpc/proto-loader/yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
"@grpc/proto-loader/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
"@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/resources/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="], "@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/resources/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
"@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="], "@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
@@ -1557,16 +1501,6 @@
"@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder": ["@smithy/querystring-builder@2.2.0", "", { "dependencies": { "@smithy/types": "^2.12.0", "@smithy/util-uri-escape": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-L1kSeviUWL+emq3CUVSgdogoM/D9QMFaqxL/dd0X7PCNWmPXqt+ExtrBjqT0V7HLN03Vs9SuiLrG3zy3JGnE5A=="], "@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder": ["@smithy/querystring-builder@2.2.0", "", { "dependencies": { "@smithy/types": "^2.12.0", "@smithy/util-uri-escape": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-L1kSeviUWL+emq3CUVSgdogoM/D9QMFaqxL/dd0X7PCNWmPXqt+ExtrBjqT0V7HLN03Vs9SuiLrG3zy3JGnE5A=="],
"cli-highlight/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
"cli-highlight/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
"cli-highlight/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
"cli-highlight/yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
"cli-highlight/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
"qrcode/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], "qrcode/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
"qrcode/yargs/cliui/wrap-ansi": ["wrap-ansi@6.2.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="], "qrcode/yargs/cliui/wrap-ansi": ["wrap-ansi@6.2.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="],
@@ -1579,16 +1513,16 @@
"yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], "yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
"@grpc/proto-loader/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
"@grpc/proto-loader/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
"@grpc/proto-loader/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
"@smithy/smithy-client/@smithy/util-stream/@smithy/fetch-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="], "@smithy/smithy-client/@smithy/util-stream/@smithy/fetch-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="],
"@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="], "@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="],
"cli-highlight/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
"cli-highlight/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
"cli-highlight/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
"qrcode/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], "qrcode/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
"qrcode/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "qrcode/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],

View File

@@ -1,144 +0,0 @@
# LiteLLM Setup
OpenClaude can connect to LiteLLM through LiteLLM's OpenAI-compatible proxy.
## Overview
LiteLLM is an open-source LLM gateway that provides a unified API to 100+ model providers. By running the LiteLLM Proxy, you can route OpenClaude requests through LiteLLM to access any of its supported providers — all while using OpenClaude's existing OpenAI-compatible provider path.
## Prerequisites
- LiteLLM installed (`pip install litellm[proxy]`)
- A `litellm_config.yaml` or equivalent LiteLLM configuration
- LiteLLM Proxy running on a local or remote port
## 1. Start the LiteLLM Proxy
### Basic installation
```bash
pip install litellm[proxy]
```
### Configure LiteLLM
Create a `litellm_config.yaml` with your desired model aliases:
```yaml
model_list:
- model_name: gpt-4o
litellm_params:
model: openai/gpt-4o
api_key: os.environ/OPENAI_API_KEY
- model_name: claude-sonnet-4
litellm_params:
model: anthropic/claude-sonnet-4-5-20250929
api_key: os.environ/ANTHROPIC_API_KEY
- model_name: gemini-2.5-flash
litellm_params:
model: gemini/gemini-2.5-flash
api_key: os.environ/GEMINI_API_KEY
- model_name: llama-3.3-70b
litellm_params:
model: together_ai/meta-llama/Llama-3.3-70B-Instruct-Turbo
api_key: os.environ/TOGETHER_API_KEY
```
### Run the proxy
```bash
litellm --config litellm_config.yaml --port 4000
```
The proxy will start at `http://localhost:4000` by default.
## 2. Point OpenClaude to LiteLLM
### Option A: Environment Variables
```bash
export CLAUDE_CODE_USE_OPENAI=1
export OPENAI_BASE_URL=http://localhost:4000
export OPENAI_API_KEY=<your-master-key-or-placeholder>
export OPENAI_MODEL=<your-litellm-model-alias>
openclaude
```
Replace `<your-litellm-model-alias>` with a model name from your `litellm_config.yaml` (e.g., `gpt-4o`, `claude-sonnet-4`, `gemini-2.5-flash`).
### Option B: Using /provider
1. Run `openclaude`
2. Type `/provider` to open the provider setup flow
3. Choose the **OpenAI-compatible** option
4. When prompted for the API key, enter the key required by your LiteLLM proxy
If your local LiteLLM setup does not enforce auth, you may still need to enter a placeholder value
- 5. When prompted for the base URL, enter `http://localhost:4000`
6. 6. When prompted for the model, enter the LiteLLM model name or alias you configured
7. 7. Save the provider configuration
## 3. Example LiteLLM Configs
### Multi-provider routing with spend tracking
```yaml
model_list:
- model_name: gpt-4o
litellm_params:
model: openai/gpt-4o
api_key: os.environ/OPENAI_API_KEY
- model_name: claude-sonnet-4
litellm_params:
model: anthropic/claude-sonnet-4-5-20250929
api_key: os.environ/ANTHROPIC_API_KEY
- model_name: deepseek-chat
litellm_params:
model: deepseek/deepseek-chat
api_key: os.environ/DEEPSEEK_API_KEY
litellm_settings:
set_verbose: false
num_retries: 3
```
### With a master key for auth
```bash
# Start proxy with a master key
litellm --config litellm_config.yaml --port 4000 --master_key sk-my-master-key
# Connect OpenClaude
export CLAUDE_CODE_USE_OPENAI=1
export OPENAI_BASE_URL=http://localhost:4000
export OPENAI_API_KEY=sk-my-master-key
export OPENAI_MODEL=gpt-4o
openclaude
```
## 4. Notes
- `OPENAI_MODEL` must match the **LiteLLM model alias** defined in your config, not the upstream raw provider model name.
- If your proxy requires authentication, use the proxy key (or `master_key`) in `OPENAI_API_KEY`.
- LiteLLM's OpenAI-compatible endpoint accepts the same request format as OpenAI, so OpenClaude works without any code changes.
- You can switch between any provider configured in LiteLLM by simply changing the `OPENAI_MODEL` value — no need to reconfigure OpenClaude.
## 5. Troubleshooting
| Issue | Likely Cause | Fix |
|-------|--------------|-----|
| 404 or Model Not Found | Model alias doesn't exist in LiteLLM config | Verify the `model_name` in `litellm_config.yaml` matches `OPENAI_MODEL` |
| Connection Refused | LiteLLM proxy isn't running | Start the proxy with `litellm --config litellm_config.yaml --port 4000` |
| Auth Failed | Missing or wrong `master_key` | Set the correct key in `OPENAI_API_KEY` |
| Upstream provider error | The backend provider key is missing or invalid | Ensure the upstream API key (e.g., `OPENAI_API_KEY`) is set in your LiteLLM proxy process environment |
| Tools fail but chat works | The selected model has weak function/tool calling support | Switch to a model with strong tool support (e.g., GPT-4o, Claude Sonnet) |
## 6. Resources
- [LiteLLM Proxy Docs](https://docs.litellm.ai/docs/proxy/quick_start)
- [LiteLLM Provider List](https://docs.litellm.ai/docs/providers)
- [LiteLLM OpenAI-Compatible Endpoints](https://docs.litellm.ai/docs/proxy/openai_compatible_proxy)

View File

@@ -1,6 +1,6 @@
{ {
"name": "@gitlawb/openclaude", "name": "@gitlawb/openclaude",
"version": "0.1.8", "version": "0.1.7",
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models", "description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
"type": "module", "type": "module",
"bin": { "bin": {
@@ -30,8 +30,6 @@
"profile:code": "bun run profile:init -- --provider ollama --model qwen2.5-coder:7b", "profile:code": "bun run profile:init -- --provider ollama --model qwen2.5-coder:7b",
"dev:fast": "bun run profile:fast && bun run dev:ollama:fast", "dev:fast": "bun run profile:fast && bun run dev:ollama:fast",
"dev:code": "bun run profile:code && bun run dev:profile", "dev:code": "bun run profile:code && bun run dev:profile",
"dev:grpc": "bun run scripts/start-grpc.ts",
"dev:grpc:cli": "bun run scripts/grpc-cli.ts",
"start": "node dist/cli.mjs", "start": "node dist/cli.mjs",
"test": "bun test", "test": "bun test",
"test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-dir=coverage --max-concurrency=1 && bun run scripts/render-coverage-heatmap.ts", "test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-dir=coverage --max-concurrency=1 && bun run scripts/render-coverage-heatmap.ts",
@@ -59,8 +57,6 @@
"@anthropic-ai/vertex-sdk": "0.14.4", "@anthropic-ai/vertex-sdk": "0.14.4",
"@commander-js/extra-typings": "12.1.0", "@commander-js/extra-typings": "12.1.0",
"@growthbook/growthbook": "1.6.5", "@growthbook/growthbook": "1.6.5",
"@grpc/grpc-js": "^1.14.3",
"@grpc/proto-loader": "^0.8.0",
"@mendable/firecrawl-js": "4.18.1", "@mendable/firecrawl-js": "4.18.1",
"@modelcontextprotocol/sdk": "1.29.0", "@modelcontextprotocol/sdk": "1.29.0",
"@opentelemetry/api": "1.9.1", "@opentelemetry/api": "1.9.1",
@@ -99,7 +95,7 @@
"ignore": "7.0.5", "ignore": "7.0.5",
"indent-string": "5.0.0", "indent-string": "5.0.0",
"jsonc-parser": "3.3.1", "jsonc-parser": "3.3.1",
"lodash-es": "4.18.1", "lodash-es": "4.18.0",
"lru-cache": "11.2.7", "lru-cache": "11.2.7",
"marked": "15.0.12", "marked": "15.0.12",
"p-map": "7.0.4", "p-map": "7.0.4",
@@ -132,7 +128,6 @@
"@types/bun": "1.3.11", "@types/bun": "1.3.11",
"@types/node": "25.5.0", "@types/node": "25.5.0",
"@types/react": "19.2.14", "@types/react": "19.2.14",
"tsx": "^4.21.0",
"typescript": "5.9.3" "typescript": "5.9.3"
}, },
"engines": { "engines": {
@@ -155,8 +150,5 @@
"license": "SEE LICENSE FILE", "license": "SEE LICENSE FILE",
"publishConfig": { "publishConfig": {
"access": "public" "access": "public"
},
"overrides": {
"lodash-es": "4.18.1"
} }
} }

View File

@@ -1,121 +0,0 @@
import * as grpc from '@grpc/grpc-js'
import * as protoLoader from '@grpc/proto-loader'
import path from 'path'
import * as readline from 'readline'
const PROTO_PATH = path.resolve(import.meta.dirname, '../src/proto/openclaude.proto')
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
keepCase: true,
longs: String,
enums: String,
defaults: true,
oneofs: true,
})
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition) as any
const openclaudeProto = protoDescriptor.openclaude.v1
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
})
function askQuestion(query: string): Promise<string> {
return new Promise(resolve => {
rl.question(query, resolve)
})
}
async function main() {
const host = process.env.GRPC_HOST || 'localhost'
const port = process.env.GRPC_PORT || '50051'
const client = new openclaudeProto.AgentService(
`${host}:${port}`,
grpc.credentials.createInsecure()
)
let call: grpc.ClientDuplexStream<any, any> | null = null
const startStream = () => {
call = client.Chat()
let textStreamed = false
call.on('data', async (serverMessage: any) => {
if (serverMessage.text_chunk) {
process.stdout.write(serverMessage.text_chunk.text)
textStreamed = true
} else if (serverMessage.tool_start) {
console.log(`\n\x1b[36m[Tool Call]\x1b[0m \x1b[1m${serverMessage.tool_start.tool_name}\x1b[0m`)
console.log(`\x1b[90m${serverMessage.tool_start.arguments_json}\x1b[0m\n`)
} else if (serverMessage.tool_result) {
console.log(`\n\x1b[32m[Tool Result]\x1b[0m \x1b[1m${serverMessage.tool_result.tool_name}\x1b[0m`)
const out = serverMessage.tool_result.output
if (out.length > 500) {
console.log(`\x1b[90m${out.substring(0, 500)}...\n(Output truncated, total length: ${out.length})\x1b[0m`)
} else {
console.log(`\x1b[90m${out}\x1b[0m`)
}
} else if (serverMessage.action_required) {
const action = serverMessage.action_required
console.log(`\n\x1b[33m[Action Required]\x1b[0m`)
const reply = await askQuestion(`\x1b[1m${action.question}\x1b[0m (y/n) > `)
call?.write({
input: {
prompt_id: action.prompt_id,
reply: reply.trim()
}
})
} else if (serverMessage.done) {
if (!textStreamed && serverMessage.done.full_text) {
process.stdout.write(serverMessage.done.full_text)
}
textStreamed = false
console.log('\n\x1b[32m[Generation Complete]\x1b[0m')
promptUser()
} else if (serverMessage.error) {
console.error(`\n\x1b[31m[Server Error]\x1b[0m ${serverMessage.error.message}`)
promptUser()
}
})
call.on('end', () => {
console.log('\n\x1b[90m[Stream closed by server]\x1b[0m')
// Don't prompt user here, let 'done' or 'error' handlers do it
})
call.on('error', (err: Error) => {
console.error('\n\x1b[31m[Stream Error]\x1b[0m', err.message)
promptUser()
})
}
const promptUser = async () => {
const message = await askQuestion('\n\x1b[35m> \x1b[0m')
if (message.trim().toLowerCase() === '/exit' || message.trim().toLowerCase() === '/quit') {
console.log('Bye!')
rl.close()
process.exit(0)
}
if (!call || call.destroyed) {
startStream()
}
call!.write({
request: {
session_id: 'cli-session-1',
message: message,
working_directory: process.cwd()
}
})
}
console.log('\x1b[32mOpenClaude gRPC CLI\x1b[0m')
console.log('\x1b[90mType /exit to quit.\x1b[0m')
promptUser()
}
main()

View File

@@ -1,50 +0,0 @@
import { GrpcServer } from '../src/grpc/server.ts'
import { init } from '../src/entrypoints/init.ts'
// Polyfill MACRO which is normally injected by the bundler
Object.assign(globalThis, {
MACRO: {
VERSION: '0.1.7',
DISPLAY_VERSION: '0.1.7',
PACKAGE_URL: '@gitlawb/openclaude',
}
})
async function main() {
console.log('Starting OpenClaude gRPC Server...')
await init()
// Mirror CLI bootstrap: hydrate secure tokens and resolve provider profile
const { enableConfigs } = await import('../src/utils/config.js')
enableConfigs()
const { applySafeConfigEnvironmentVariables } = await import('../src/utils/managedEnv.js')
applySafeConfigEnvironmentVariables()
const { hydrateGeminiAccessTokenFromSecureStorage } = await import('../src/utils/geminiCredentials.js')
hydrateGeminiAccessTokenFromSecureStorage()
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../src/utils/githubModelsCredentials.js')
hydrateGithubModelsTokenFromSecureStorage()
const { buildStartupEnvFromProfile, applyProfileEnvToProcessEnv } = await import('../src/utils/providerProfile.js')
const { getProviderValidationError, validateProviderEnvOrExit } = await import('../src/utils/providerValidation.js')
const startupEnv = await buildStartupEnvFromProfile({ processEnv: process.env })
if (startupEnv !== process.env) {
const startupProfileError = await getProviderValidationError(startupEnv)
if (startupProfileError) {
console.warn(`Warning: ignoring saved provider profile. ${startupProfileError}`)
} else {
applyProfileEnvToProcessEnv(process.env, startupEnv)
}
}
await validateProviderEnvOrExit()
const port = process.env.GRPC_PORT ? parseInt(process.env.GRPC_PORT, 10) : 50051
const host = process.env.GRPC_HOST || 'localhost'
const server = new GrpcServer()
server.start(port, host)
}
main().catch((err) => {
console.error('Fatal error starting gRPC server:', err)
process.exit(1)
})

View File

@@ -1,42 +0,0 @@
import { afterEach, expect, mock, test } from 'bun:test'
const originalEnv = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_MODEL: process.env.OPENAI_MODEL,
}
afterEach(() => {
mock.restore()
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
})
test('opens the model picker without awaiting local model discovery refresh', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
process.env.OPENAI_MODEL = 'qwen2.5-coder-7b-instruct'
let resolveDiscovery: (() => void) | undefined
const discoverOpenAICompatibleModelOptions = mock(
() =>
new Promise<void>(resolve => {
resolveDiscovery = resolve
}),
)
mock.module('../../utils/model/openaiModelDiscovery.js', () => ({
discoverOpenAICompatibleModelOptions,
}))
const { call } = await import(`./model.js?ts=${Date.now()}-${Math.random()}`)
const result = await Promise.race([
call(() => {}, {} as never, ''),
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
])
resolveDiscovery?.()
expect(result).not.toBe('timeout')
})

View File

@@ -4,7 +4,6 @@ import * as React from 'react';
import type { CommandResultDisplay } from '../../commands.js'; import type { CommandResultDisplay } from '../../commands.js';
import { ModelPicker } from '../../components/ModelPicker.js'; import { ModelPicker } from '../../components/ModelPicker.js';
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js'; import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js';
import { fetchBootstrapData } from '../../services/api/bootstrap.js';
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js'; import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
import { useAppState, useSetAppState } from '../../state/AppState.js'; import { useAppState, useSetAppState } from '../../state/AppState.js';
import type { LocalJSXCommandCall } from '../../types/command.js'; import type { LocalJSXCommandCall } from '../../types/command.js';
@@ -20,7 +19,6 @@ import { getActiveOpenAIModelOptionsCache, setActiveOpenAIModelOptionsCache } fr
import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js'; import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js';
import { isModelAllowed } from '../../utils/model/modelAllowlist.js'; import { isModelAllowed } from '../../utils/model/modelAllowlist.js';
import { validateModel } from '../../utils/model/validateModel.js'; import { validateModel } from '../../utils/model/validateModel.js';
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js';
function ModelPickerWrapper(t0) { function ModelPickerWrapper(t0) {
const $ = _c(17); const $ = _c(17);
const { const {
@@ -321,9 +319,7 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
}); });
return <SetModelAndClose args={args} onDone={onDone} />; return <SetModelAndClose args={args} onDone={onDone} />;
} }
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) { await refreshOpenAIModelOptionsCache();
void refreshOpenAIModelOptionsCache();
}
return <ModelPickerWrapper onDone={onDone} />; return <ModelPickerWrapper onDone={onDone} />;
}; };
function renderModelLabel(model: string | null): string { function renderModelLabel(model: string | null): string {

View File

@@ -2,7 +2,6 @@ import type { Command } from '../../commands.js'
const onboardGithub: Command = { const onboardGithub: Command = {
name: 'onboard-github', name: 'onboard-github',
aliases: ['onboarding-github', 'onboardgithub', 'onboardinggithub'],
description: description:
'Interactive setup for GitHub Models: device login or PAT, saved to secure storage', 'Interactive setup for GitHub Models: device login or PAT, saved to secure storage',
type: 'local-jsx', type: 'local-jsx',

View File

@@ -1,148 +0,0 @@
import { describe, expect, test } from 'bun:test'
import {
activateGithubOnboardingMode,
applyGithubOnboardingProcessEnv,
buildGithubOnboardingSettingsEnv,
hasExistingGithubModelsLoginToken,
shouldForceGithubRelogin,
} from './onboard-github.js'
describe('shouldForceGithubRelogin', () => {
test.each(['force', '--force', 'relogin', '--relogin', 'reauth', '--reauth'])(
'treats %s as force re-login',
arg => {
expect(shouldForceGithubRelogin(arg)).toBe(true)
},
)
test('returns false for empty or unknown args', () => {
expect(shouldForceGithubRelogin('')).toBe(false)
expect(shouldForceGithubRelogin(undefined)).toBe(false)
expect(shouldForceGithubRelogin('something-else')).toBe(false)
})
test('treats force flags as present in multi-word args', () => {
expect(shouldForceGithubRelogin('--force extra')).toBe(true)
expect(shouldForceGithubRelogin('foo --relogin bar')).toBe(true)
expect(shouldForceGithubRelogin('abc reauth xyz')).toBe(true)
})
})
describe('hasExistingGithubModelsLoginToken', () => {
test('returns true when GITHUB_TOKEN is present', () => {
expect(
hasExistingGithubModelsLoginToken({ GITHUB_TOKEN: 'token' }, ''),
).toBe(true)
})
test('returns true when GH_TOKEN is present', () => {
expect(
hasExistingGithubModelsLoginToken({ GH_TOKEN: 'token' }, ''),
).toBe(true)
})
test('returns true when stored token exists', () => {
expect(hasExistingGithubModelsLoginToken({}, 'stored-token')).toBe(true)
})
test('returns false when both env and stored token are missing', () => {
expect(hasExistingGithubModelsLoginToken({}, '')).toBe(false)
})
})
describe('onboarding auth precedence cleanup', () => {
test('clears preexisting OpenAI auth when switching to GitHub', () => {
const env: NodeJS.ProcessEnv = {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_API_KEY: 'sk-stale-openai-key',
OPENAI_ORG: 'org-old',
OPENAI_PROJECT: 'project-old',
OPENAI_ORGANIZATION: 'org-legacy',
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_API_BASE: 'https://api.openai.com/v1',
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID: 'profile_old',
}
applyGithubOnboardingProcessEnv('github:copilot', env)
expect(env.CLAUDE_CODE_USE_GITHUB).toBe('1')
expect(env.OPENAI_MODEL).toBe('github:copilot')
expect(env.OPENAI_API_KEY).toBeUndefined()
expect(env.OPENAI_ORG).toBeUndefined()
expect(env.OPENAI_PROJECT).toBeUndefined()
expect(env.OPENAI_ORGANIZATION).toBeUndefined()
expect(env.OPENAI_BASE_URL).toBeUndefined()
expect(env.OPENAI_API_BASE).toBeUndefined()
expect(env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
expect(env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
expect(env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBeUndefined()
const settingsEnv = buildGithubOnboardingSettingsEnv('github:copilot')
expect(settingsEnv.CLAUDE_CODE_USE_GITHUB).toBe('1')
expect(settingsEnv.OPENAI_MODEL).toBe('github:copilot')
expect(settingsEnv.OPENAI_API_KEY).toBeUndefined()
expect(settingsEnv.OPENAI_ORG).toBeUndefined()
expect(settingsEnv.OPENAI_PROJECT).toBeUndefined()
expect(settingsEnv.OPENAI_ORGANIZATION).toBeUndefined()
})
})
describe('activateGithubOnboardingMode', () => {
test('activates settings/env/hydration in order when merge succeeds', () => {
const calls: string[] = []
const result = activateGithubOnboardingMode(' github:copilot ', {
mergeSettingsEnv: model => {
calls.push(`merge:${model}`)
return { ok: true }
},
applyProcessEnv: model => {
calls.push(`apply:${model}`)
},
hydrateToken: () => {
calls.push('hydrate')
},
onChangeAPIKey: () => {
calls.push('onChangeAPIKey')
},
})
expect(result).toEqual({ ok: true })
expect(calls).toEqual([
'merge:github:copilot',
'apply:github:copilot',
'hydrate',
'onChangeAPIKey',
])
})
test('stops activation when settings merge fails', () => {
const calls: string[] = []
const result = activateGithubOnboardingMode(DEFAULT_MODEL_FOR_TESTS, {
mergeSettingsEnv: () => {
calls.push('merge')
return { ok: false, detail: 'settings write failed' }
},
applyProcessEnv: () => {
calls.push('apply')
},
hydrateToken: () => {
calls.push('hydrate')
},
onChangeAPIKey: () => {
calls.push('onChangeAPIKey')
},
})
expect(result).toEqual({ ok: false, detail: 'settings write failed' })
expect(calls).toEqual(['merge'])
})
})
const DEFAULT_MODEL_FOR_TESTS = 'github:copilot'

View File

@@ -12,20 +12,11 @@ import {
import type { LocalJSXCommandCall } from '../../types/command.js' import type { LocalJSXCommandCall } from '../../types/command.js'
import { import {
hydrateGithubModelsTokenFromSecureStorage, hydrateGithubModelsTokenFromSecureStorage,
readGithubModelsToken,
saveGithubModelsToken, saveGithubModelsToken,
} from '../../utils/githubModelsCredentials.js' } from '../../utils/githubModelsCredentials.js'
import { updateSettingsForSource } from '../../utils/settings/settings.js' import { updateSettingsForSource } from '../../utils/settings/settings.js'
const DEFAULT_MODEL = 'github:copilot' const DEFAULT_MODEL = 'github:copilot'
const FORCE_RELOGIN_ARGS = new Set([
'force',
'--force',
'relogin',
'--relogin',
'reauth',
'--reauth',
])
type Step = type Step =
| 'menu' | 'menu'
@@ -33,72 +24,17 @@ type Step =
| 'pat' | 'pat'
| 'error' | 'error'
export function shouldForceGithubRelogin(args?: string): boolean {
const normalized = (args ?? '').trim().toLowerCase()
if (!normalized) {
return false
}
return normalized.split(/\s+/).some(arg => FORCE_RELOGIN_ARGS.has(arg))
}
export function hasExistingGithubModelsLoginToken(
env: NodeJS.ProcessEnv = process.env,
storedToken?: string,
): boolean {
const envToken = env.GITHUB_TOKEN?.trim() || env.GH_TOKEN?.trim()
if (envToken) {
return true
}
const persisted = (storedToken ?? readGithubModelsToken())?.trim()
return Boolean(persisted)
}
export function buildGithubOnboardingSettingsEnv(
model: string,
): Record<string, string | undefined> {
return {
CLAUDE_CODE_USE_GITHUB: '1',
OPENAI_MODEL: model,
OPENAI_API_KEY: undefined,
OPENAI_ORG: undefined,
OPENAI_PROJECT: undefined,
OPENAI_ORGANIZATION: undefined,
OPENAI_BASE_URL: undefined,
OPENAI_API_BASE: undefined,
CLAUDE_CODE_USE_OPENAI: undefined,
CLAUDE_CODE_USE_GEMINI: undefined,
CLAUDE_CODE_USE_BEDROCK: undefined,
CLAUDE_CODE_USE_VERTEX: undefined,
CLAUDE_CODE_USE_FOUNDRY: undefined,
}
}
export function applyGithubOnboardingProcessEnv(
model: string,
env: NodeJS.ProcessEnv = process.env,
): void {
env.CLAUDE_CODE_USE_GITHUB = '1'
env.OPENAI_MODEL = model
delete env.OPENAI_API_KEY
delete env.OPENAI_ORG
delete env.OPENAI_PROJECT
delete env.OPENAI_ORGANIZATION
delete env.OPENAI_BASE_URL
delete env.OPENAI_API_BASE
delete env.CLAUDE_CODE_USE_OPENAI
delete env.CLAUDE_CODE_USE_GEMINI
delete env.CLAUDE_CODE_USE_BEDROCK
delete env.CLAUDE_CODE_USE_VERTEX
delete env.CLAUDE_CODE_USE_FOUNDRY
delete env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
delete env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
}
function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } { function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } {
const { error } = updateSettingsForSource('userSettings', { const { error } = updateSettingsForSource('userSettings', {
env: buildGithubOnboardingSettingsEnv(model) as any, env: {
CLAUDE_CODE_USE_GITHUB: '1',
OPENAI_MODEL: model,
CLAUDE_CODE_USE_OPENAI: undefined as any,
CLAUDE_CODE_USE_GEMINI: undefined as any,
CLAUDE_CODE_USE_BEDROCK: undefined as any,
CLAUDE_CODE_USE_VERTEX: undefined as any,
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
},
}) })
if (error) { if (error) {
return { ok: false, detail: error.message } return { ok: false, detail: error.message }
@@ -106,32 +42,6 @@ function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } {
return { ok: true } return { ok: true }
} }
export function activateGithubOnboardingMode(
model: string = DEFAULT_MODEL,
options?: {
mergeSettingsEnv?: (model: string) => { ok: boolean; detail?: string }
applyProcessEnv?: (model: string) => void
hydrateToken?: () => void
onChangeAPIKey?: () => void
},
): { ok: boolean; detail?: string } {
const normalizedModel = model.trim() || DEFAULT_MODEL
const mergeSettingsEnv = options?.mergeSettingsEnv ?? mergeUserSettingsEnv
const applyProcessEnv = options?.applyProcessEnv ?? applyGithubOnboardingProcessEnv
const hydrateToken =
options?.hydrateToken ?? hydrateGithubModelsTokenFromSecureStorage
const merged = mergeSettingsEnv(normalizedModel)
if (!merged.ok) {
return merged
}
applyProcessEnv(normalizedModel)
hydrateToken()
options?.onChangeAPIKey?.()
return { ok: true }
}
function OnboardGithub(props: { function OnboardGithub(props: {
onDone: Parameters<LocalJSXCommandCall>[0] onDone: Parameters<LocalJSXCommandCall>[0]
onChangeAPIKey: () => void onChangeAPIKey: () => void
@@ -154,17 +64,19 @@ function OnboardGithub(props: {
setStep('error') setStep('error')
return return
} }
const activated = activateGithubOnboardingMode(model, { const merged = mergeUserSettingsEnv(model.trim() || DEFAULT_MODEL)
onChangeAPIKey, if (!merged.ok) {
})
if (!activated.ok) {
setErrorMsg( setErrorMsg(
`Token saved, but settings were not updated: ${activated.detail ?? 'unknown error'}. ` + `Token saved, but settings were not updated: ${merged.detail ?? 'unknown error'}. ` +
`Add env CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL to ~/.claude/settings.json manually.`, `Add env CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL to ~/.claude/settings.json manually.`,
) )
setStep('error') setStep('error')
return return
} }
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = model.trim() || DEFAULT_MODEL
hydrateGithubModelsTokenFromSecureStorage()
onChangeAPIKey()
onDone( onDone(
'GitHub Models onboard complete. Token stored in secure storage; user settings updated. Restart if the model does not switch.', 'GitHub Models onboard complete. Token stored in secure storage; user settings updated. Restart if the model does not switch.',
{ display: 'user' }, { display: 'user' },
@@ -235,11 +147,11 @@ function OnboardGithub(props: {
{deviceHint.verification_uri} {deviceHint.verification_uri}
</Text> </Text>
<Text dimColor> <Text dimColor>
A browser window may have opened. Waiting for authorization... A browser window may have opened. Waiting for authorization
</Text> </Text>
</> </>
) : ( ) : (
<Text dimColor>Requesting device code from GitHub...</Text> <Text dimColor>Requesting device code from GitHub</Text>
)} )}
<Spinner /> <Spinner />
</Box> </Box>
@@ -294,7 +206,7 @@ function OnboardGithub(props: {
<Text bold>GitHub Models setup</Text> <Text bold>GitHub Models setup</Text>
<Text dimColor> <Text dimColor>
Stores your token in the OS credential store (macOS Keychain when available) Stores your token in the OS credential store (macOS Keychain when available)
and enables CLAUDE_CODE_USE_GITHUB in your user settings - no export and enables CLAUDE_CODE_USE_GITHUB in your user settings no export
GITHUB_TOKEN needed for future runs. GITHUB_TOKEN needed for future runs.
</Text> </Text>
<Select <Select
@@ -315,28 +227,7 @@ function OnboardGithub(props: {
) )
} }
export const call: LocalJSXCommandCall = async (onDone, context, args) => { export const call: LocalJSXCommandCall = async (onDone, context) => {
const forceRelogin = shouldForceGithubRelogin(args)
if (hasExistingGithubModelsLoginToken() && !forceRelogin) {
const activated = activateGithubOnboardingMode(DEFAULT_MODEL, {
onChangeAPIKey: context.onChangeAPIKey,
})
if (!activated.ok) {
onDone(
`GitHub token detected, but settings activation failed: ${activated.detail ?? 'unknown error'}. ` +
'Set CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL=github:copilot in user settings manually.',
{ display: 'system' },
)
return null
}
onDone(
'GitHub Models already authorized. Activated GitHub Models mode using your existing token. Use /onboard-github --force to re-authenticate.',
{ display: 'user' },
)
return null
}
return ( return (
<OnboardGithub <OnboardGithub
onDone={onDone} onDone={onDone}

View File

@@ -197,21 +197,6 @@ test('buildProfileSaveMessage maps provider fields without echoing secrets', ()
expect(message).not.toContain('sk-secret-12345678') expect(message).not.toContain('sk-secret-12345678')
}) })
test('buildProfileSaveMessage labels local openai-compatible profiles consistently', () => {
const message = buildProfileSaveMessage(
'openai',
{
OPENAI_MODEL: 'gpt-5.4',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
)
expect(message).toContain('Saved Local OpenAI-compatible profile.')
expect(message).toContain('Model: gpt-5.4')
expect(message).toContain('Endpoint: http://127.0.0.1:8080/v1')
})
test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly', () => { test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly', () => {
const message = buildProfileSaveMessage( const message = buildProfileSaveMessage(
'gemini', 'gemini',
@@ -245,51 +230,6 @@ test('buildCurrentProviderSummary redacts poisoned model and endpoint values', (
expect(summary.endpointLabel).toBe('sk-...5678') expect(summary.endpointLabel).toBe('sk-...5678')
}) })
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
const summary = buildCurrentProviderSummary({
processEnv: {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'qwen2.5-coder-7b-instruct',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
persisted: null,
})
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
expect(summary.modelLabel).toBe('qwen2.5-coder-7b-instruct')
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
})
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => {
const summary = buildCurrentProviderSummary({
processEnv: {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'gpt-5.4',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
persisted: null,
})
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
expect(summary.modelLabel).toBe('gpt-5.4')
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
})
test('buildCurrentProviderSummary recognizes GitHub Models mode', () => {
const summary = buildCurrentProviderSummary({
processEnv: {
CLAUDE_CODE_USE_GITHUB: '1',
OPENAI_MODEL: 'github:copilot',
OPENAI_BASE_URL: 'https://models.github.ai/inference',
},
persisted: null,
})
expect(summary.providerLabel).toBe('GitHub Models')
expect(summary.modelLabel).toBe('github:copilot')
expect(summary.endpointLabel).toBe('https://models.github.ai/inference')
})
test('getProviderWizardDefaults ignores poisoned current provider values', () => { test('getProviderWizardDefaults ignores poisoned current provider values', () => {
const defaults = getProviderWizardDefaults({ const defaults = getProviderWizardDefaults({
OPENAI_API_KEY: 'sk-secret-12345678', OPENAI_API_KEY: 'sk-secret-12345678',

View File

@@ -15,7 +15,6 @@ import { Box, Text } from '../../ink.js'
import { import {
DEFAULT_CODEX_BASE_URL, DEFAULT_CODEX_BASE_URL,
DEFAULT_OPENAI_BASE_URL, DEFAULT_OPENAI_BASE_URL,
isLocalProviderUrl,
resolveCodexApiCredentials, resolveCodexApiCredentials,
resolveProviderRequest, resolveProviderRequest,
} from '../../services/api/providerConfig.js' } from '../../services/api/providerConfig.js'
@@ -53,11 +52,7 @@ import {
recommendOllamaModel, recommendOllamaModel,
type RecommendationGoal, type RecommendationGoal,
} from '../../utils/providerRecommendation.js' } from '../../utils/providerRecommendation.js'
import { import { hasLocalOllama, listOllamaModels } from '../../utils/providerDiscovery.js'
getLocalOpenAICompatibleProviderLabel,
hasLocalOllama,
listOllamaModels,
} from '../../utils/providerDiscovery.js'
type ProviderChoice = 'auto' | ProviderProfile | 'clear' type ProviderChoice = 'auto' | ProviderProfile | 'clear'
@@ -178,23 +173,6 @@ export function buildCurrentProviderSummary(options?: {
} }
} }
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
return {
providerLabel: 'GitHub Models',
modelLabel: getSafeDisplayValue(
processEnv.OPENAI_MODEL ?? 'github:copilot',
processEnv,
),
endpointLabel: getSafeDisplayValue(
processEnv.OPENAI_BASE_URL ??
processEnv.OPENAI_API_BASE ??
'https://models.github.ai/inference',
processEnv,
),
savedProfileLabel,
}
}
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_OPENAI)) { if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_OPENAI)) {
const request = resolveProviderRequest({ const request = resolveProviderRequest({
model: processEnv.OPENAI_MODEL, model: processEnv.OPENAI_MODEL,
@@ -204,8 +182,10 @@ export function buildCurrentProviderSummary(options?: {
let providerLabel = 'OpenAI-compatible' let providerLabel = 'OpenAI-compatible'
if (request.transport === 'codex_responses') { if (request.transport === 'codex_responses') {
providerLabel = 'Codex' providerLabel = 'Codex'
} else if (isLocalProviderUrl(request.baseUrl)) { } else if (request.baseUrl.includes('localhost:11434')) {
providerLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl) providerLabel = 'Ollama'
} else if (request.baseUrl.includes('localhost:1234')) {
providerLabel = 'LM Studio'
} }
return { return {
@@ -292,20 +272,16 @@ function buildSavedProfileSummary(
), ),
} }
case 'openai': case 'openai':
default: { default:
const baseUrl = env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL
return { return {
providerLabel: isLocalProviderUrl(baseUrl) providerLabel: 'OpenAI-compatible',
? getLocalOpenAICompatibleProviderLabel(baseUrl)
: 'OpenAI-compatible',
modelLabel: getSafeDisplayValue( modelLabel: getSafeDisplayValue(
env.OPENAI_MODEL ?? 'gpt-4o', env.OPENAI_MODEL ?? 'gpt-4o',
process.env, process.env,
env, env,
), ),
endpointLabel: getSafeDisplayValue( endpointLabel: getSafeDisplayValue(
baseUrl, env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL,
process.env, process.env,
env, env,
), ),
@@ -314,7 +290,6 @@ function buildSavedProfileSummary(
? 'configured' ? 'configured'
: undefined, : undefined,
} }
}
} }
} }

View File

@@ -67,7 +67,6 @@ import { isBilledAsExtraUsage } from '../../utils/extraUsage.js';
import { getFastModeUnavailableReason, isFastModeAvailable, isFastModeCooldown, isFastModeEnabled, isFastModeSupportedByModel } from '../../utils/fastMode.js'; import { getFastModeUnavailableReason, isFastModeAvailable, isFastModeCooldown, isFastModeEnabled, isFastModeSupportedByModel } from '../../utils/fastMode.js';
import { isFullscreenEnvEnabled } from '../../utils/fullscreen.js'; import { isFullscreenEnvEnabled } from '../../utils/fullscreen.js';
import type { PromptInputHelpers } from '../../utils/handlePromptSubmit.js'; import type { PromptInputHelpers } from '../../utils/handlePromptSubmit.js';
import { extractDraggedFilePaths } from '../../utils/dragDropPaths.js';
import { getImageFromClipboard, PASTE_THRESHOLD } from '../../utils/imagePaste.js'; import { getImageFromClipboard, PASTE_THRESHOLD } from '../../utils/imagePaste.js';
import type { ImageDimensions } from '../../utils/imageResizer.js'; import type { ImageDimensions } from '../../utils/imageResizer.js';
import { cacheImagePath, storeImage } from '../../utils/imageStore.js'; import { cacheImagePath, storeImage } from '../../utils/imageStore.js';
@@ -1205,22 +1204,6 @@ function PromptInput({
// Clean up pasted text - strip ANSI escape codes and normalize line endings and tabs // Clean up pasted text - strip ANSI escape codes and normalize line endings and tabs
let text = stripAnsi(rawText).replace(/\r/g, '\n').replaceAll('\t', ' '); let text = stripAnsi(rawText).replace(/\r/g, '\n').replaceAll('\t', ' ');
// Detect file paths from drag-and-drop and convert to @mentions.
// When files are dragged into the terminal, the terminal sends their
// absolute paths via bracketed paste. Image files are handled by the
// image paste handler upstream; here we handle non-image files by
// converting them to @mentions so they get attached on submit.
const draggedPaths = extractDraggedFilePaths(text);
if (draggedPaths.length > 0) {
const mentions = draggedPaths
.map(p => (p.includes(' ') || p.includes(':') ? `@"${p}"` : `@${p}`))
.join(' ');
// Ensure spacing around the mention(s) relative to existing input
const charBefore = input[cursorOffset - 1];
const prefix = charBefore && !/\s/.test(charBefore) ? ' ' : '';
text = prefix + mentions + ' ';
}
// Match typed/auto-suggest: `!cmd` pasted into empty input enters bash mode. // Match typed/auto-suggest: `!cmd` pasted into empty input enters bash mode.
if (input.length === 0) { if (input.length === 0) {
const pastedMode = getModeFromInput(text); const pastedMode = getModeFromInput(text);
@@ -1262,23 +1245,12 @@ function PromptInput({
if (isNonSpacePrintable(input, key)) return ' ' + input; if (isNonSpacePrintable(input, key)) return ' ' + input;
return input; return input;
}, []); }, []);
// Ref mirrors cursorOffset for use in synchronous loops (e.g. multi-image
// paste) where React batches state updates and the closure value is stale.
const cursorOffsetRef = useRef(cursorOffset);
cursorOffsetRef.current = cursorOffset;
function insertTextAtCursor(text: string) { function insertTextAtCursor(text: string) {
// Use refs for input/cursor so back-to-back calls in the same event // Push current state to buffer before inserting
// (e.g. onImagePaste loop for multiple dragged images) chain correctly pushToBuffer(input, cursorOffset, pastedContents);
// instead of each reading the same stale closure values. const newInput = input.slice(0, cursorOffset) + text + input.slice(cursorOffset);
const currentInput = lastInternalInputRef.current;
const currentOffset = cursorOffsetRef.current;
pushToBuffer(currentInput, currentOffset, pastedContents);
const newInput = currentInput.slice(0, currentOffset) + text + currentInput.slice(currentOffset);
trackAndSetInput(newInput); trackAndSetInput(newInput);
const newOffset = currentOffset + text.length; setCursorOffset(cursorOffset + text.length);
cursorOffsetRef.current = newOffset;
setCursorOffset(newOffset);
} }
const doublePressEscFromEmpty = useDoublePress(() => {}, () => onShowMessageSelector()); const doublePressEscFromEmpty = useDoublePress(() => {}, () => onShowMessageSelector());

View File

@@ -123,6 +123,8 @@ const SuggestionItemRow = memo(function SuggestionItemRow({
maxColumnWidth ?? stringWidth(item.displayText) + 5, maxColumnWidth ?? stringWidth(item.displayText) + 5,
maxNameWidth, maxNameWidth,
) )
const displayTextColor = isSelected ? 'inverseText' : item.color
const shouldDim = !isSelected
let displayText = item.displayText let displayText = item.displayText
if (stringWidth(displayText) > displayTextWidth - 2) { if (stringWidth(displayText) > displayTextWidth - 2) {
@@ -142,17 +144,21 @@ const SuggestionItemRow = memo(function SuggestionItemRow({
const truncatedDescription = item.description const truncatedDescription = item.description
? truncateToWidth(item.description.replace(/\s+/g, ' '), descriptionWidth) ? truncateToWidth(item.description.replace(/\s+/g, ' '), descriptionWidth)
: '' : ''
const lineContent = `${paddedDisplayText}${tagText}${truncatedDescription}`
return ( return (
<Box width="100%" opaque={true} backgroundColor={rowBackgroundColor}> <Box width="100%" opaque={true} backgroundColor={rowBackgroundColor}>
<Text <Text wrap="truncate">
color={textColor} <Text color={displayTextColor} dimColor={shouldDim} bold={isSelected}>
dimColor={!isSelected} {paddedDisplayText}
bold={isSelected} </Text>
wrap="truncate" {tagText ? (
> <Text color={textColor} dimColor={!isSelected}>
{lineContent} {tagText}
</Text>
) : null}
<Text color={textColor} dimColor={!isSelected}>
{truncatedDescription}
</Text>
</Text> </Text>
</Box> </Box>
) )

View File

@@ -5,7 +5,6 @@ import { useKeybinding } from '../keybindings/useKeybinding.js'
import type { ProviderProfile } from '../utils/config.js' import type { ProviderProfile } from '../utils/config.js'
import { import {
addProviderProfile, addProviderProfile,
applyActiveProviderProfileFromConfig,
deleteProviderProfile, deleteProviderProfile,
getActiveProviderProfile, getActiveProviderProfile,
getProviderPresetDefaults, getProviderPresetDefaults,
@@ -15,14 +14,6 @@ import {
type ProviderProfileInput, type ProviderProfileInput,
updateProviderProfile, updateProviderProfile,
} from '../utils/providerProfiles.js' } from '../utils/providerProfiles.js'
import {
clearGithubModelsToken,
GITHUB_MODELS_HYDRATED_ENV_MARKER,
hydrateGithubModelsTokenFromSecureStorage,
readGithubModelsToken,
} from '../utils/githubModelsCredentials.js'
import { isEnvTruthy } from '../utils/envUtils.js'
import { updateSettingsForSource } from '../utils/settings/settings.js'
import { Select } from './CustomSelect/index.js' import { Select } from './CustomSelect/index.js'
import { Pane } from './design-system/Pane.js' import { Pane } from './design-system/Pane.js'
import TextInput from './TextInput.js' import TextInput from './TextInput.js'
@@ -84,13 +75,6 @@ const FORM_STEPS: Array<{
}, },
] ]
const GITHUB_PROVIDER_ID = '__github_models__'
const GITHUB_PROVIDER_LABEL = 'GitHub Models'
const GITHUB_PROVIDER_DEFAULT_MODEL = 'github:copilot'
const GITHUB_PROVIDER_DEFAULT_BASE_URL = 'https://models.github.ai/inference'
type GithubCredentialSource = 'stored' | 'env' | 'none'
function toDraft(profile: ProviderProfile): ProviderDraft { function toDraft(profile: ProviderProfile): ProviderDraft {
return { return {
name: profile.name, name: profile.name,
@@ -118,65 +102,11 @@ function profileSummary(profile: ProviderProfile, isActive: boolean): string {
return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}` return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}`
} }
function getGithubCredentialSource(
processEnv: NodeJS.ProcessEnv = process.env,
): GithubCredentialSource {
if (readGithubModelsToken()?.trim()) {
return 'stored'
}
if (processEnv.GITHUB_TOKEN?.trim() || processEnv.GH_TOKEN?.trim()) {
return 'env'
}
return 'none'
}
function isGithubProviderAvailable(
processEnv: NodeJS.ProcessEnv = process.env,
): boolean {
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
return true
}
return getGithubCredentialSource(processEnv) !== 'none'
}
function getGithubProviderModel(
processEnv: NodeJS.ProcessEnv = process.env,
): string {
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
return processEnv.OPENAI_MODEL?.trim() || GITHUB_PROVIDER_DEFAULT_MODEL
}
return GITHUB_PROVIDER_DEFAULT_MODEL
}
function getGithubProviderSummary(
isActive: boolean,
credentialSource: GithubCredentialSource,
processEnv: NodeJS.ProcessEnv = process.env,
): string {
const credentialSummary =
credentialSource === 'stored'
? 'token stored'
: credentialSource === 'env'
? 'token via env'
: 'no token found'
const activeSuffix = isActive ? ' (active)' : ''
return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}`
}
export function ProviderManager({ mode, onDone }: Props): React.ReactNode { export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
const [profiles, setProfiles] = React.useState(() => getProviderProfiles()) const [profiles, setProfiles] = React.useState(() => getProviderProfiles())
const [activeProfileId, setActiveProfileId] = React.useState( const [activeProfileId, setActiveProfileId] = React.useState(
() => getActiveProviderProfile()?.id, () => getActiveProviderProfile()?.id,
) )
const [githubProviderAvailable, setGithubProviderAvailable] = React.useState(() =>
isGithubProviderAvailable(),
)
const [githubCredentialSource, setGithubCredentialSource] = React.useState<GithubCredentialSource>(
() => getGithubCredentialSource(),
)
const [isGithubActive, setIsGithubActive] = React.useState(() =>
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB),
)
const [screen, setScreen] = React.useState<Screen>( const [screen, setScreen] = React.useState<Screen>(
mode === 'first-run' ? 'select-preset' : 'menu', mode === 'first-run' ? 'select-preset' : 'menu',
) )
@@ -200,116 +130,12 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
const nextProfiles = getProviderProfiles() const nextProfiles = getProviderProfiles()
setProfiles(nextProfiles) setProfiles(nextProfiles)
setActiveProfileId(getActiveProviderProfile()?.id) setActiveProfileId(getActiveProviderProfile()?.id)
setGithubProviderAvailable(isGithubProviderAvailable())
setGithubCredentialSource(getGithubCredentialSource())
setIsGithubActive(isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB))
}
function clearStartupProviderOverrideFromUserSettings(): string | null {
const { error } = updateSettingsForSource('userSettings', {
env: {
CLAUDE_CODE_USE_OPENAI: undefined as any,
CLAUDE_CODE_USE_GEMINI: undefined as any,
CLAUDE_CODE_USE_GITHUB: undefined as any,
CLAUDE_CODE_USE_BEDROCK: undefined as any,
CLAUDE_CODE_USE_VERTEX: undefined as any,
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
},
})
return error ? error.message : null
} }
function closeWithCancelled(message: string): void { function closeWithCancelled(message: string): void {
onDone({ action: 'cancelled', message }) onDone({ action: 'cancelled', message })
} }
function activateGithubProvider(): string | null {
const { error } = updateSettingsForSource('userSettings', {
env: {
CLAUDE_CODE_USE_GITHUB: '1',
OPENAI_MODEL: GITHUB_PROVIDER_DEFAULT_MODEL,
OPENAI_API_KEY: undefined as any,
OPENAI_ORG: undefined as any,
OPENAI_PROJECT: undefined as any,
OPENAI_ORGANIZATION: undefined as any,
OPENAI_BASE_URL: undefined as any,
OPENAI_API_BASE: undefined as any,
CLAUDE_CODE_USE_OPENAI: undefined as any,
CLAUDE_CODE_USE_GEMINI: undefined as any,
CLAUDE_CODE_USE_BEDROCK: undefined as any,
CLAUDE_CODE_USE_VERTEX: undefined as any,
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
},
})
if (error) {
return error.message
}
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = GITHUB_PROVIDER_DEFAULT_MODEL
delete process.env.OPENAI_API_KEY
delete process.env.OPENAI_ORG
delete process.env.OPENAI_PROJECT
delete process.env.OPENAI_ORGANIZATION
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
hydrateGithubModelsTokenFromSecureStorage()
return null
}
function deleteGithubProvider(): string | null {
const storedTokenBeforeClear = readGithubModelsToken()?.trim()
const cleared = clearGithubModelsToken()
if (!cleared.success) {
return cleared.warning ?? 'Could not clear GitHub credentials.'
}
const { error } = updateSettingsForSource('userSettings', {
env: {
CLAUDE_CODE_USE_GITHUB: undefined as any,
OPENAI_MODEL: undefined as any,
OPENAI_BASE_URL: undefined as any,
OPENAI_API_BASE: undefined as any,
},
})
if (error) {
return error.message
}
const hydratedTokenInSession = process.env.GITHUB_TOKEN?.trim()
if (
process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER] === '1' &&
hydratedTokenInSession &&
(!storedTokenBeforeClear || hydratedTokenInSession === storedTokenBeforeClear)
) {
delete process.env.GITHUB_TOKEN
}
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
delete process.env.OPENAI_MODEL
delete process.env.OPENAI_API_KEY
delete process.env.OPENAI_ORG
delete process.env.OPENAI_PROJECT
delete process.env.OPENAI_ORGANIZATION
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
// Restore active provider profile immediately when one exists.
applyActiveProviderProfileFromConfig()
return null
}
function startCreateFromPreset(preset: ProviderPreset): void { function startCreateFromPreset(preset: ProviderPreset): void {
const defaults = getProviderPresetDefaults(preset) const defaults = getProviderPresetDefaults(preset)
const nextDraft = { const nextDraft = {
@@ -361,20 +187,11 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
return return
} }
const isActiveSavedProfile = getActiveProviderProfile()?.id === saved.id
const settingsOverrideError = isActiveSavedProfile
? clearStartupProviderOverrideFromUserSettings()
: null
refreshProfiles() refreshProfiles()
const successMessage = setStatusMessage(
editingProfileId editingProfileId
? `Updated provider: ${saved.name}` ? `Updated provider: ${saved.name}`
: `Added provider: ${saved.name} (now active)` : `Added provider: ${saved.name} (now active)`,
setStatusMessage(
settingsOverrideError
? `${successMessage}. Warning: could not clear startup provider override (${settingsOverrideError}).`
: successMessage,
) )
if (mode === 'first-run') { if (mode === 'first-run') {
@@ -596,7 +413,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
function renderMenu(): React.ReactNode { function renderMenu(): React.ReactNode {
const hasProfiles = profiles.length > 0 const hasProfiles = profiles.length > 0
const hasSelectableProviders = hasProfiles || githubProviderAvailable
const options = [ const options = [
{ {
@@ -608,7 +424,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
value: 'activate', value: 'activate',
label: 'Set active provider', label: 'Set active provider',
description: 'Switch the active provider profile', description: 'Switch the active provider profile',
disabled: !hasSelectableProviders, disabled: !hasProfiles,
}, },
{ {
value: 'edit', value: 'edit',
@@ -620,7 +436,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
value: 'delete', value: 'delete',
label: 'Delete provider', label: 'Delete provider',
description: 'Remove a provider profile', description: 'Remove a provider profile',
disabled: !hasSelectableProviders, disabled: !hasProfiles,
}, },
{ {
value: 'done', value: 'done',
@@ -639,25 +455,14 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
</Text> </Text>
{statusMessage && <Text>{statusMessage}</Text>} {statusMessage && <Text>{statusMessage}</Text>}
<Box flexDirection="column"> <Box flexDirection="column">
{profiles.length === 0 && !githubProviderAvailable ? ( {profiles.length === 0 ? (
<Text dimColor>No provider profiles configured yet.</Text> <Text dimColor>No provider profiles configured yet.</Text>
) : ( ) : (
<> profiles.map(profile => (
{profiles.map(profile => ( <Text key={profile.id} dimColor>
<Text key={profile.id} dimColor> - {profile.name}: {profileSummary(profile, profile.id === activeProfileId)}
- {profile.name}: {profileSummary(profile, profile.id === activeProfileId)} </Text>
</Text> ))
))}
{githubProviderAvailable ? (
<Text dimColor>
- {GITHUB_PROVIDER_LABEL}:{' '}
{getGithubProviderSummary(
isGithubActive,
githubCredentialSource,
)}
</Text>
) : null}
</>
)} )}
</Box> </Box>
<Select <Select
@@ -669,7 +474,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
setScreen('select-preset') setScreen('select-preset')
break break
case 'activate': case 'activate':
if (hasSelectableProviders) { if (profiles.length > 0) {
setScreen('select-active') setScreen('select-active')
} }
break break
@@ -679,7 +484,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
} }
break break
case 'delete': case 'delete':
if (hasSelectableProviders) { if (profiles.length > 0) {
setScreen('select-delete') setScreen('select-delete')
} }
break break
@@ -699,29 +504,8 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
title: string, title: string,
emptyMessage: string, emptyMessage: string,
onSelect: (profileId: string) => void, onSelect: (profileId: string) => void,
options?: { includeGithub?: boolean },
): React.ReactNode { ): React.ReactNode {
const includeGithub = options?.includeGithub ?? false if (profiles.length === 0) {
const selectOptions = profiles.map(profile => ({
value: profile.id,
label:
profile.id === activeProfileId
? `${profile.name} (active)`
: profile.name,
description: `${profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible'} · ${profile.baseUrl} · ${profile.model}`,
}))
if (includeGithub && githubProviderAvailable) {
selectOptions.push({
value: GITHUB_PROVIDER_ID,
label: isGithubActive
? `${GITHUB_PROVIDER_LABEL} (active)`
: GITHUB_PROVIDER_LABEL,
description: `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel()}`,
})
}
if (selectOptions.length === 0) {
return ( return (
<Box flexDirection="column" gap={1}> <Box flexDirection="column" gap={1}>
<Text color="remember" bold> <Text color="remember" bold>
@@ -744,16 +528,25 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
) )
} }
const options = profiles.map(profile => ({
value: profile.id,
label:
profile.id === activeProfileId
? `${profile.name} (active)`
: profile.name,
description: `${profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible'} · ${profile.baseUrl} · ${profile.model}`,
}))
return ( return (
<Box flexDirection="column" gap={1}> <Box flexDirection="column" gap={1}>
<Text color="remember" bold> <Text color="remember" bold>
{title} {title}
</Text> </Text>
<Select <Select
options={selectOptions} options={options}
onChange={onSelect} onChange={onSelect}
onCancel={() => setScreen('menu')} onCancel={() => setScreen('menu')}
visibleOptionCount={Math.min(10, Math.max(2, selectOptions.length))} visibleOptionCount={Math.min(10, Math.max(2, options.length))}
/> />
</Box> </Box>
) )
@@ -773,36 +566,16 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
'Set active provider', 'Set active provider',
'No providers available. Add one first.', 'No providers available. Add one first.',
profileId => { profileId => {
if (profileId === GITHUB_PROVIDER_ID) {
const githubError = activateGithubProvider()
if (githubError) {
setErrorMessage(`Could not activate GitHub provider: ${githubError}`)
setScreen('menu')
return
}
refreshProfiles()
setStatusMessage(`Active provider: ${GITHUB_PROVIDER_LABEL}`)
setScreen('menu')
return
}
const active = setActiveProviderProfile(profileId) const active = setActiveProviderProfile(profileId)
if (!active) { if (!active) {
setErrorMessage('Could not change active provider.') setErrorMessage('Could not change active provider.')
setScreen('menu') setScreen('menu')
return return
} }
const settingsOverrideError =
clearStartupProviderOverrideFromUserSettings()
refreshProfiles() refreshProfiles()
setStatusMessage( setStatusMessage(`Active provider: ${active.name}`)
settingsOverrideError
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
: `Active provider: ${active.name}`,
)
setScreen('menu') setScreen('menu')
}, },
{ includeGithub: true },
) )
break break
case 'select-edit': case 'select-edit':
@@ -819,35 +592,15 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
'Delete provider', 'Delete provider',
'No providers available. Add one first.', 'No providers available. Add one first.',
profileId => { profileId => {
if (profileId === GITHUB_PROVIDER_ID) {
const githubDeleteError = deleteGithubProvider()
if (githubDeleteError) {
setErrorMessage(`Could not delete GitHub provider: ${githubDeleteError}`)
} else {
refreshProfiles()
setStatusMessage('GitHub provider deleted')
}
setScreen('menu')
return
}
const result = deleteProviderProfile(profileId) const result = deleteProviderProfile(profileId)
if (!result.removed) { if (!result.removed) {
setErrorMessage('Could not delete provider.') setErrorMessage('Could not delete provider.')
} else { } else {
const settingsOverrideError = result.activeProfileId
? clearStartupProviderOverrideFromUserSettings()
: null
refreshProfiles() refreshProfiles()
setStatusMessage( setStatusMessage('Provider deleted')
settingsOverrideError
? `Provider deleted. Warning: could not clear startup provider override (${settingsOverrideError}).`
: 'Provider deleted',
)
} }
setScreen('menu') setScreen('menu')
}, },
{ includeGithub: true },
) )
break break
case 'menu': case 'menu':

View File

@@ -5,9 +5,6 @@
* Addresses: https://github.com/Gitlawb/openclaude/issues/55 * Addresses: https://github.com/Gitlawb/openclaude/issues/55
*/ */
import { isLocalProviderUrl } from '../services/api/providerConfig.js'
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string } declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
const ESC = '\x1b[' const ESC = '\x1b['
@@ -102,7 +99,7 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
if (useOpenAI) { if (useOpenAI) {
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o' const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
const isLocal = isLocalProviderUrl(baseUrl) const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
let name = 'OpenAI' let name = 'OpenAI'
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek' if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter' else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
@@ -110,8 +107,10 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
else if (/groq/i.test(baseUrl)) name = 'Groq' else if (/groq/i.test(baseUrl)) name = 'Groq'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral' else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI' else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
else if (/llama/i.test(rawModel)) name = 'Meta Llama' else if (/llama/i.test(rawModel)) name = 'Meta Llama'
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl) else if (isLocal) name = 'Local'
// Resolve model alias to actual model name + reasoning effort // Resolve model alias to actual model name + reasoning effort
let displayModel = rawModel let displayModel = rawModel

View File

@@ -1,113 +0,0 @@
import { describe, expect, it, mock } from 'bun:test'
// We can't fully render ThemePicker due to complex dependencies
// But we can test the theme options generation logic
describe('ThemePicker', () => {
describe('theme options', () => {
it('generates correct theme options without AUTO_THEME feature flag', () => {
// Since we can't easily mock bun:bundle, test the options structure
// The real test would require integration testing
const expectedOptions = [
{ label: "Dark mode", value: "dark" },
{ label: "Light mode", value: "light" },
{ label: "Dark mode (colorblind-friendly)", value: "dark-daltonized" },
{ label: "Light mode (colorblind-friendly)", value: "light-daltonized" },
{ label: "Dark mode (ANSI colors only)", value: "dark-ansi" },
{ label: "Light mode (ANSI colors only)", value: "light-ansi" },
]
expect(expectedOptions.length).toBe(6)
})
it('includes auto theme when AUTO_THEME feature is enabled', () => {
// Test the structure when auto is present
const optionsWithAuto = [
{ label: "Auto (match terminal)", value: "auto" },
{ label: "Dark mode", value: "dark" },
]
expect(optionsWithAuto[0].value).toBe('auto')
})
})
describe('handleRowFocus callback', () => {
it('setPreviewTheme is called with theme setting', () => {
const setPreviewTheme = mock()
const handleRowFocus = (setting: string) => setPreviewTheme(setting)
handleRowFocus('dark')
expect(setPreviewTheme).toHaveBeenCalledWith('dark')
})
})
describe('handleSelect callback', () => {
it('calls savePreview and onThemeSelect', () => {
const savePreview = mock()
const onThemeSelect = mock()
const handleSelect = (setting: string) => {
savePreview()
onThemeSelect(setting)
}
handleSelect('light')
expect(savePreview).toHaveBeenCalled()
expect(onThemeSelect).toHaveBeenCalledWith('light')
})
})
describe('handleCancel callback', () => {
it('calls cancelPreview and gracefulShutdown when not skipExitHandling', () => {
const cancelPreview = mock()
const gracefulShutdown = mock()
const handleCancel = (skipExitHandling: boolean, onCancelProp?: () => void) => {
cancelPreview()
if (skipExitHandling) {
onCancelProp?.()
} else {
gracefulShutdown(0)
}
}
handleCancel(false)
expect(cancelPreview).toHaveBeenCalled()
expect(gracefulShutdown).toHaveBeenCalledWith(0)
})
it('calls onCancelProp when skipExitHandling is true', () => {
const cancelPreview = mock()
const onCancelProp = mock()
const handleCancel = (skipExitHandling: boolean, onCancelProp?: () => void) => {
cancelPreview()
if (skipExitHandling) {
onCancelProp?.()
}
}
handleCancel(true, onCancelProp)
expect(cancelPreview).toHaveBeenCalled()
expect(onCancelProp).toHaveBeenCalled()
})
})
describe('syntax hint logic', () => {
it('shows disabled hint when syntax highlighting is disabled', () => {
const syntaxHighlightingDisabled = true
const syntaxToggleShortcut = 'Ctrl+T'
const hint = syntaxHighlightingDisabled
? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
: `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
expect(hint).toContain('disabled')
})
it('shows enabled hint when syntax highlighting is active', () => {
const syntaxHighlightingDisabled = false
const syntaxToggleShortcut = 'Ctrl+T'
const hint = !syntaxHighlightingDisabled
? `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
: `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
expect(hint).toContain('enabled')
})
})
})

View File

@@ -1,14 +1,13 @@
import { c as _c } from "react-compiler-runtime";
import { feature } from 'bun:bundle'; import { feature } from 'bun:bundle';
import type { StructuredPatchHunk } from 'diff';
import * as React from 'react'; import * as React from 'react';
import { useExitOnCtrlCDWithKeybindings } from '../hooks/useExitOnCtrlCDWithKeybindings.js' import { useExitOnCtrlCDWithKeybindings } from '../hooks/useExitOnCtrlCDWithKeybindings.js';
import { useTerminalSize } from '../hooks/useTerminalSize.js'; import { useTerminalSize } from '../hooks/useTerminalSize.js';
import { Box, Text, usePreviewTheme, useTheme, useThemeSetting } from '../ink.js'; import { Box, Text, usePreviewTheme, useTheme, useThemeSetting } from '../ink.js';
import { useRegisterKeybindingContext } from '../keybindings/KeybindingContext.js'; import { useRegisterKeybindingContext } from '../keybindings/KeybindingContext.js';
import { useKeybinding } from '../keybindings/useKeybinding.js'; import { useKeybinding } from '../keybindings/useKeybinding.js';
import { useShortcutDisplay } from '../keybindings/useShortcutDisplay.js'; import { useShortcutDisplay } from '../keybindings/useShortcutDisplay.js';
import { useAppState, useSetAppState } from '../state/AppState.js'; import { useAppState, useSetAppState } from '../state/AppState.js';
import type { AppState } from '../state/AppStateStore.js';
import { gracefulShutdown } from '../utils/gracefulShutdown.js'; import { gracefulShutdown } from '../utils/gracefulShutdown.js';
import { updateSettingsForSource } from '../utils/settings/settings.js'; import { updateSettingsForSource } from '../utils/settings/settings.js';
import type { ThemeSetting } from '../utils/theme.js'; import type { ThemeSetting } from '../utils/theme.js';
@@ -17,17 +16,6 @@ import { Byline } from './design-system/Byline.js';
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js'; import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js';
import { getColorModuleUnavailableReason, getSyntaxTheme } from './StructuredDiff/colorDiff.js'; import { getColorModuleUnavailableReason, getSyntaxTheme } from './StructuredDiff/colorDiff.js';
import { StructuredDiff } from './StructuredDiff.js'; import { StructuredDiff } from './StructuredDiff.js';
type StructuredDiffComponent = React.ComponentType<{
patch: StructuredPatchHunk
dim: boolean
filePath: string
firstLine: string | null
width: number
skipHighlighting?: boolean
}>
const StructuredDiffView = StructuredDiff as StructuredDiffComponent
export type ThemePickerProps = { export type ThemePickerProps = {
onThemeSelect: (setting: ThemeSetting) => void; onThemeSelect: (setting: ThemeSetting) => void;
showIntroText?: boolean; showIntroText?: boolean;
@@ -38,224 +26,307 @@ export type ThemePickerProps = {
skipExitHandling?: boolean; skipExitHandling?: boolean;
/** Called when the user cancels (presses Escape). If skipExitHandling is true and this is provided, it will be called instead of just saving the preview. */ /** Called when the user cancels (presses Escape). If skipExitHandling is true and this is provided, it will be called instead of just saving the preview. */
onCancel?: () => void; onCancel?: () => void;
} };
export function ThemePicker(t0) {
const DEMO_PATCH: StructuredPatchHunk = { const $ = _c(59);
oldStart: 1, const {
newStart: 1, onThemeSelect,
oldLines: 3, showIntroText: t1,
newLines: 3, helpText: t2,
lines: [ showHelpTextBelow: t3,
' function greet() {', hideEscToCancel: t4,
'- console.log("Hello, World!");', skipExitHandling: t5,
'+ console.log("Hello, Claude!");', onCancel: onCancelProp
' }', } = t0;
], const showIntroText = t1 === undefined ? false : t1;
} const helpText = t2 === undefined ? "" : t2;
const showHelpTextBelow = t3 === undefined ? false : t3;
/** const hideEscToCancel = t4 === undefined ? false : t4;
* Theme chooser with live preview. Implemented without react-compiler `_c` memo const skipExitHandling = t5 === undefined ? false : t5;
* caches so preview/subtree reconciliation cannot stick on stale element refs when
* `setPreviewTheme` updates the resolved palette.
*/
export function ThemePicker({
onThemeSelect,
showIntroText = false,
helpText = '',
showHelpTextBelow = false,
hideEscToCancel = false,
skipExitHandling = false,
onCancel: onCancelProp,
}: ThemePickerProps) {
const [theme] = useTheme(); const [theme] = useTheme();
const themeSetting = useThemeSetting(); const themeSetting = useThemeSetting();
const { columns } = useTerminalSize(); const {
const colorModuleUnavailableReason = React.useMemo( columns
() => getColorModuleUnavailableReason(), } = useTerminalSize();
[], let t6;
) if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
const syntaxTheme = t6 = getColorModuleUnavailableReason();
colorModuleUnavailableReason === null ? getSyntaxTheme(theme) : null $[0] = t6;
const { setPreviewTheme, savePreview, cancelPreview } = usePreviewTheme() } else {
const syntaxHighlightingDisabled = useAppState( t6 = $[0];
(s: AppState) => s.settings.syntaxHighlightingDisabled ?? false
);
const setAppState = useSetAppState();
useRegisterKeybindingContext("ThemePicker", true);
const syntaxToggleShortcut = useShortcutDisplay("theme:toggleSyntaxHighlighting", "ThemePicker", "ctrl+t");
const toggleSyntax = React.useCallback(() => {
if (colorModuleUnavailableReason === null) {
const newValue = !syntaxHighlightingDisabled
updateSettingsForSource("userSettings", {
syntaxHighlightingDisabled: newValue
});
setAppState(prev => ({
...prev,
settings: {
...prev.settings,
syntaxHighlightingDisabled: newValue
}
}));
}
}, [
colorModuleUnavailableReason,
syntaxHighlightingDisabled,
setAppState,
])
useKeybinding("theme:toggleSyntaxHighlighting", toggleSyntax, {
context: "ThemePicker",
})
const exitState = useExitOnCtrlCDWithKeybindings(
skipExitHandling ? () => {} : undefined,
)
const themeOptions = React.useMemo(
() => [
...(feature("AUTO_THEME")
? [{ label: "Auto (match terminal)", value: "auto" as const }]
: []), {
label: "Dark mode",
value: "dark" as const
}, {
label: "Light mode",
value: "light" as const
}, {
label: "Dark mode (colorblind-friendly)",
value: "dark-daltonized" as const,
}, {
label: "Light mode (colorblind-friendly)",
value: "light-daltonized" as const,
}, {
label: "Dark mode (ANSI colors only)",
value: "dark-ansi" as const
}, {
label: "Light mode (ANSI colors only)",
value: "light-ansi" as const
},],
[],
)
const handleRowFocus = React.useCallback(
(setting: ThemeSetting) => {
setPreviewTheme(setting)
},
[setPreviewTheme],
)
const handleSelect = React.useCallback(
(setting: ThemeSetting) => {
savePreview()
onThemeSelect(setting)
},
[savePreview, onThemeSelect],
)
const handleCancel = React.useCallback(() => {
cancelPreview()
if (skipExitHandling) {
onCancelProp?.()
} else {
void gracefulShutdown(0)
}
}, [cancelPreview, onCancelProp, skipExitHandling])
const syntaxHint =
colorModuleUnavailableReason === 'env'
? `Syntax highlighting disabled (via CLAUDE_CODE_SYNTAX_HIGHLIGHT=${process.env.CLAUDE_CODE_SYNTAX_HIGHLIGHT})`
: syntaxHighlightingDisabled
? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
: syntaxTheme
? `Syntax theme: ${syntaxTheme.theme}${syntaxTheme.source ? ` (from ${syntaxTheme.source})` : ''} (${syntaxToggleShortcut} to disable)`
: `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
const header = showIntroText ? (
<Text>{"Let's get started."}</Text>
) : (
<Text bold color="permission">
Theme
</Text>
)
const introBlock = (
<Box flexDirection="column">
<Text bold>Choose the text style that looks best with your terminal</Text>
{helpText && !showHelpTextBelow ? (
<Text dimColor>{helpText}</Text>
) : null}
</Box>
)
const content = (
<Box flexDirection="column" gap={1}>
<Box flexDirection="column" gap={1}>
{header}
{introBlock}
<Select
options={themeOptions}
onFocus={handleRowFocus}
onChange={handleSelect}
onCancel={handleCancel}
visibleOptionCount={themeOptions.length}
defaultValue={themeSetting}
defaultFocusValue={themeSetting}
/>
</Box>
<Box flexDirection="column" width="100%">
<Box
key={theme}
flexDirection="column"
borderTop
borderBottom
borderLeft={false}
borderRight={false}
borderStyle="dashed"
borderColor="subtle"
>
<StructuredDiffView
patch={DEMO_PATCH}
dim={false}
filePath="demo.js"
firstLine={null}
width={columns}
/>
</Box>
<Text dimColor>
{' '}
{syntaxHint}
</Text>
</Box>
</Box>
)
if (!showIntroText) {
return (
<>
<Box flexDirection="column">{content}</Box>
{showHelpTextBelow && helpText ? (
<Box marginLeft={3}>
<Text dimColor>{helpText}</Text>
</Box>
) : null}
{!hideEscToCancel ? (
<Box marginTop={1}>
<Text dimColor italic>
{exitState.pending ? (
<>Press {exitState.keyName} again to exit</>
) : (
<Byline>
<KeyboardShortcutHint shortcut="Enter" action="select" />
<KeyboardShortcutHint shortcut="Esc" action="cancel" />
</Byline>
)}
</Text>
</Box>
) : null}
</>
)
} }
const colorModuleUnavailableReason = t6;
return content let t7;
if ($[1] !== theme) {
t7 = colorModuleUnavailableReason === null ? getSyntaxTheme(theme) : null;
$[1] = theme;
$[2] = t7;
} else {
t7 = $[2];
}
const syntaxTheme = t7;
const {
setPreviewTheme,
savePreview,
cancelPreview
} = usePreviewTheme();
const syntaxHighlightingDisabled = useAppState(_temp) ?? false;
const setAppState = useSetAppState();
useRegisterKeybindingContext("ThemePicker");
const syntaxToggleShortcut = useShortcutDisplay("theme:toggleSyntaxHighlighting", "ThemePicker", "ctrl+t");
let t8;
if ($[3] !== setAppState || $[4] !== syntaxHighlightingDisabled) {
t8 = () => {
if (colorModuleUnavailableReason === null) {
const newValue = !syntaxHighlightingDisabled;
updateSettingsForSource("userSettings", {
syntaxHighlightingDisabled: newValue
});
setAppState(prev => ({
...prev,
settings: {
...prev.settings,
syntaxHighlightingDisabled: newValue
}
}));
}
};
$[3] = setAppState;
$[4] = syntaxHighlightingDisabled;
$[5] = t8;
} else {
t8 = $[5];
}
let t9;
if ($[6] === Symbol.for("react.memo_cache_sentinel")) {
t9 = {
context: "ThemePicker"
};
$[6] = t9;
} else {
t9 = $[6];
}
useKeybinding("theme:toggleSyntaxHighlighting", t8, t9);
const exitState = useExitOnCtrlCDWithKeybindings(skipExitHandling ? _temp2 : undefined);
let t10;
if ($[7] === Symbol.for("react.memo_cache_sentinel")) {
t10 = [...(feature("AUTO_THEME") ? [{
label: "Auto (match terminal)",
value: "auto" as const
}] : []), {
label: "Dark mode",
value: "dark"
}, {
label: "Light mode",
value: "light"
}, {
label: "Dark mode (colorblind-friendly)",
value: "dark-daltonized"
}, {
label: "Light mode (colorblind-friendly)",
value: "light-daltonized"
}, {
label: "Dark mode (ANSI colors only)",
value: "dark-ansi"
}, {
label: "Light mode (ANSI colors only)",
value: "light-ansi"
}];
$[7] = t10;
} else {
t10 = $[7];
}
const themeOptions = t10;
let t11;
if ($[8] !== showIntroText) {
t11 = showIntroText ? <Text>Let's get started.</Text> : <Text bold={true} color="permission">Theme</Text>;
$[8] = showIntroText;
$[9] = t11;
} else {
t11 = $[9];
}
let t12;
if ($[10] === Symbol.for("react.memo_cache_sentinel")) {
t12 = <Text bold={true}>Choose the text style that looks best with your terminal</Text>;
$[10] = t12;
} else {
t12 = $[10];
}
let t13;
if ($[11] !== helpText || $[12] !== showHelpTextBelow) {
t13 = helpText && !showHelpTextBelow && <Text dimColor={true}>{helpText}</Text>;
$[11] = helpText;
$[12] = showHelpTextBelow;
$[13] = t13;
} else {
t13 = $[13];
}
let t14;
if ($[14] !== t13) {
t14 = <Box flexDirection="column">{t12}{t13}</Box>;
$[14] = t13;
$[15] = t14;
} else {
t14 = $[15];
}
let t15;
if ($[16] !== setPreviewTheme) {
t15 = setting => {
setPreviewTheme(setting as ThemeSetting);
};
$[16] = setPreviewTheme;
$[17] = t15;
} else {
t15 = $[17];
}
let t16;
if ($[18] !== onThemeSelect || $[19] !== savePreview) {
t16 = setting_0 => {
savePreview();
onThemeSelect(setting_0 as ThemeSetting);
};
$[18] = onThemeSelect;
$[19] = savePreview;
$[20] = t16;
} else {
t16 = $[20];
}
let t17;
if ($[21] !== cancelPreview || $[22] !== onCancelProp || $[23] !== skipExitHandling) {
t17 = skipExitHandling ? () => {
cancelPreview();
onCancelProp?.();
} : async () => {
cancelPreview();
await gracefulShutdown(0);
};
$[21] = cancelPreview;
$[22] = onCancelProp;
$[23] = skipExitHandling;
$[24] = t17;
} else {
t17 = $[24];
}
let t18;
if ($[25] !== t15 || $[26] !== t16 || $[27] !== t17 || $[28] !== themeSetting) {
t18 = <Select options={themeOptions} onFocus={t15} onChange={t16} onCancel={t17} visibleOptionCount={themeOptions.length} defaultValue={themeSetting} defaultFocusValue={themeSetting} />;
$[25] = t15;
$[26] = t16;
$[27] = t17;
$[28] = themeSetting;
$[29] = t18;
} else {
t18 = $[29];
}
let t19;
if ($[30] !== t11 || $[31] !== t14 || $[32] !== t18) {
t19 = <Box flexDirection="column" gap={1}>{t11}{t14}{t18}</Box>;
$[30] = t11;
$[31] = t14;
$[32] = t18;
$[33] = t19;
} else {
t19 = $[33];
}
let t20;
if ($[34] === Symbol.for("react.memo_cache_sentinel")) {
t20 = {
oldStart: 1,
newStart: 1,
oldLines: 3,
newLines: 3,
lines: [" function greet() {", "- console.log(\"Hello, World!\");", "+ console.log(\"Hello, Claude!\");", " }"]
};
$[34] = t20;
} else {
t20 = $[34];
}
let t21;
if ($[35] !== columns) {
t21 = <Box flexDirection="column" borderTop={true} borderBottom={true} borderLeft={false} borderRight={false} borderStyle="dashed" borderColor="subtle"><StructuredDiff patch={t20} dim={false} filePath="demo.js" firstLine={null} width={columns} /></Box>;
$[35] = columns;
$[36] = t21;
} else {
t21 = $[36];
}
const t22 = colorModuleUnavailableReason === "env" ? `Syntax highlighting disabled (via CLAUDE_CODE_SYNTAX_HIGHLIGHT=${process.env.CLAUDE_CODE_SYNTAX_HIGHLIGHT})` : syntaxHighlightingDisabled ? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)` : syntaxTheme ? `Syntax theme: ${syntaxTheme.theme}${syntaxTheme.source ? ` (from ${syntaxTheme.source})` : ""} (${syntaxToggleShortcut} to disable)` : `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`;
let t23;
if ($[37] !== t22) {
t23 = <Text dimColor={true}>{" "}{t22}</Text>;
$[37] = t22;
$[38] = t23;
} else {
t23 = $[38];
}
let t24;
if ($[39] !== t21 || $[40] !== t23) {
t24 = <Box flexDirection="column" width="100%">{t21}{t23}</Box>;
$[39] = t21;
$[40] = t23;
$[41] = t24;
} else {
t24 = $[41];
}
let t25;
if ($[42] !== t19 || $[43] !== t24) {
t25 = <Box flexDirection="column" gap={1}>{t19}{t24}</Box>;
$[42] = t19;
$[43] = t24;
$[44] = t25;
} else {
t25 = $[44];
}
const content = t25;
if (!showIntroText) {
let t26;
if ($[45] !== content) {
t26 = <Box flexDirection="column">{content}</Box>;
$[45] = content;
$[46] = t26;
} else {
t26 = $[46];
}
let t27;
if ($[47] !== helpText || $[48] !== showHelpTextBelow) {
t27 = showHelpTextBelow && helpText && <Box marginLeft={3}><Text dimColor={true}>{helpText}</Text></Box>;
$[47] = helpText;
$[48] = showHelpTextBelow;
$[49] = t27;
} else {
t27 = $[49];
}
let t28;
if ($[50] !== exitState || $[51] !== hideEscToCancel) {
t28 = !hideEscToCancel && <Box><Text dimColor={true} italic={true}>{exitState.pending ? <>Press {exitState.keyName} again to exit</> : <Byline><KeyboardShortcutHint shortcut="Enter" action="select" /><KeyboardShortcutHint shortcut="Esc" action="cancel" /></Byline>}</Text></Box>;
$[50] = exitState;
$[51] = hideEscToCancel;
$[52] = t28;
} else {
t28 = $[52];
}
let t29;
if ($[53] !== t27 || $[54] !== t28) {
t29 = <Box marginTop={1}>{t27}{t28}</Box>;
$[53] = t27;
$[54] = t28;
$[55] = t29;
} else {
t29 = $[55];
}
let t30;
if ($[56] !== t26 || $[57] !== t29) {
t30 = <>{t26}{t29}</>;
$[56] = t26;
$[57] = t29;
$[58] = t30;
} else {
t30 = $[58];
}
return t30;
}
return content;
}
function _temp2() {}
function _temp(s) {
return s.settings.syntaxHighlightingDisabled;
} }

View File

@@ -1,252 +0,0 @@
import * as grpc from '@grpc/grpc-js'
import * as protoLoader from '@grpc/proto-loader'
import path from 'path'
import { randomUUID } from 'crypto'
import { QueryEngine } from '../QueryEngine.js'
import { getTools } from '../tools.js'
import { getDefaultAppState } from '../state/AppStateStore.js'
import { AppState } from '../state/AppState.js'
import { FileStateCache, READ_FILE_STATE_CACHE_SIZE } from '../utils/fileStateCache.js'
const PROTO_PATH = path.resolve(import.meta.dirname, '../proto/openclaude.proto')
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
keepCase: true,
longs: String,
enums: String,
defaults: true,
oneofs: true,
})
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition) as any
const openclaudeProto = protoDescriptor.openclaude.v1
const MAX_SESSIONS = 1000
export class GrpcServer {
private server: grpc.Server
private sessions: Map<string, any[]> = new Map()
constructor() {
this.server = new grpc.Server()
this.server.addService(openclaudeProto.AgentService.service, {
Chat: this.handleChat.bind(this),
})
}
start(port: number = 50051, host: string = 'localhost') {
this.server.bindAsync(
`${host}:${port}`,
grpc.ServerCredentials.createInsecure(),
(error, boundPort) => {
if (error) {
console.error('Failed to start gRPC server', error)
return
}
console.log(`gRPC Server running at ${host}:${boundPort}`)
}
)
}
private handleChat(call: grpc.ServerDuplexStream<any, any>) {
let engine: QueryEngine | null = null
let appState: AppState = getDefaultAppState()
const fileCache: FileStateCache = new FileStateCache(READ_FILE_STATE_CACHE_SIZE, 25 * 1024 * 1024)
// To handle ActionRequired (ask user for permission)
const pendingRequests = new Map<string, (reply: string) => void>()
// Accumulated messages from previous turns for multi-turn context
let previousMessages: any[] = []
let sessionId = ''
let interrupted = false
call.on('data', async (clientMessage) => {
try {
if (clientMessage.request) {
if (engine) {
call.write({
error: {
message: 'A request is already in progress on this stream',
code: 'ALREADY_EXISTS'
}
})
return
}
interrupted = false
const req = clientMessage.request
sessionId = req.session_id || ''
previousMessages = []
// Load previous messages from session store (cross-stream persistence)
if (sessionId && this.sessions.has(sessionId)) {
previousMessages = [...this.sessions.get(sessionId)!]
}
const toolNameById = new Map<string, string>()
engine = new QueryEngine({
cwd: req.working_directory || process.cwd(),
tools: getTools(appState.toolPermissionContext), // Gets all available tools
commands: [], // Slash commands
mcpClients: [],
agents: [],
...(previousMessages.length > 0 ? { initialMessages: previousMessages } : {}),
includePartialMessages: true,
canUseTool: async (tool, input, context, assistantMsg, toolUseID) => {
if (toolUseID) {
toolNameById.set(toolUseID, tool.name)
}
// Notify client of the tool call first
call.write({
tool_start: {
tool_name: tool.name,
arguments_json: JSON.stringify(input),
tool_use_id: toolUseID
}
})
// Ask user for permission
const promptId = randomUUID()
const question = `Approve ${tool.name}?`
call.write({
action_required: {
prompt_id: promptId,
question,
type: 'CONFIRM_COMMAND'
}
})
return new Promise((resolve) => {
pendingRequests.set(promptId, (reply) => {
if (reply.toLowerCase() === 'yes' || reply.toLowerCase() === 'y') {
resolve({ behavior: 'allow' })
} else {
resolve({ behavior: 'deny', reason: 'User denied via gRPC' })
}
})
})
},
getAppState: () => appState,
setAppState: (updater) => { appState = updater(appState) },
readFileCache: fileCache,
userSpecifiedModel: req.model,
fallbackModel: req.model,
})
// Track accumulated response data for FinalResponse
let fullText = ''
let promptTokens = 0
let completionTokens = 0
const generator = engine.submitMessage(req.message)
for await (const msg of generator) {
if (msg.type === 'stream_event') {
if (msg.event.type === 'content_block_delta' && msg.event.delta.type === 'text_delta') {
call.write({
text_chunk: {
text: msg.event.delta.text
}
})
fullText += msg.event.delta.text
}
} else if (msg.type === 'user') {
// Extract tool results
const content = msg.message.content
if (Array.isArray(content)) {
for (const block of content) {
if (block.type === 'tool_result') {
let outputStr = ''
if (typeof block.content === 'string') {
outputStr = block.content
} else if (Array.isArray(block.content)) {
outputStr = block.content.map(c => c.type === 'text' ? c.text : '').join('\n')
}
call.write({
tool_result: {
tool_name: toolNameById.get(block.tool_use_id) ?? block.tool_use_id,
tool_use_id: block.tool_use_id,
output: outputStr,
is_error: block.is_error || false
}
})
}
}
}
} else if (msg.type === 'result') {
// Extract real token counts and final text from the result
if (msg.subtype === 'success') {
if (msg.result) {
fullText = msg.result
}
promptTokens = msg.usage?.input_tokens ?? 0
completionTokens = msg.usage?.output_tokens ?? 0
}
}
}
if (!interrupted) {
// Save messages for multi-turn context in subsequent requests
previousMessages = [...engine.getMessages()]
// Persist to session store for cross-stream resumption
if (sessionId) {
if (!this.sessions.has(sessionId) && this.sessions.size >= MAX_SESSIONS) {
// Evict oldest session (Map preserves insertion order)
this.sessions.delete(this.sessions.keys().next().value)
}
this.sessions.set(sessionId, previousMessages)
}
call.write({
done: {
full_text: fullText,
prompt_tokens: promptTokens,
completion_tokens: completionTokens
}
})
}
engine = null
} else if (clientMessage.input) {
const promptId = clientMessage.input.prompt_id
const reply = clientMessage.input.reply
if (pendingRequests.has(promptId)) {
pendingRequests.get(promptId)!(reply)
pendingRequests.delete(promptId)
}
} else if (clientMessage.cancel) {
interrupted = true
if (engine) {
engine.interrupt()
}
call.end()
}
} catch (err: any) {
console.error("Error processing stream:", err)
call.write({
error: {
message: err.message || "Internal server error",
code: "INTERNAL"
}
})
call.end()
}
})
call.on('end', () => {
interrupted = true
// Unblock any pending permission prompts so canUseTool can return
for (const resolve of pendingRequests.values()) {
resolve('no')
}
if (engine) {
engine.interrupt()
}
engine = null
pendingRequests.clear()
})
}
}

View File

@@ -27,21 +27,6 @@ async function flushClipboardCopy(): Promise<void> {
await new Promise(resolve => setTimeout(resolve, 0)) await new Promise(resolve => setTimeout(resolve, 0))
} }
async function waitForExecCall(
command: string,
attempts = 20,
): Promise<(typeof execFileNoThrowMock.mock.calls)[number] | undefined> {
for (let attempt = 0; attempt < attempts; attempt++) {
const call = execFileNoThrowMock.mock.calls.find(([cmd]) => cmd === command)
if (call) {
return call
}
await flushClipboardCopy()
}
return undefined
}
describe('Windows clipboard fallback', () => { describe('Windows clipboard fallback', () => {
beforeEach(() => { beforeEach(() => {
execFileNoThrowMock.mockClear() execFileNoThrowMock.mockClear()
@@ -77,7 +62,9 @@ describe('Windows clipboard fallback', () => {
await setClipboard('Привет мир') await setClipboard('Привет мир')
await flushClipboardCopy() await flushClipboardCopy()
const windowsCall = await waitForExecCall('powershell') const windowsCall = execFileNoThrowMock.mock.calls.find(
([cmd]) => cmd === 'powershell',
)
expect(windowsCall?.[2]).toMatchObject({ expect(windowsCall?.[2]).toMatchObject({
stdin: 'ignore', stdin: 'ignore',

View File

@@ -1,101 +0,0 @@
syntax = "proto3";
package openclaude.v1;
// Main Agent Service
service AgentService {
// Bidirectional stream: client sends tasks and answers to agent prompts,
// server streams text tokens, tool states, and requests permissions.
rpc Chat(stream ClientMessage) returns (stream ServerMessage);
}
// ---------------------------------------------------------
// MESSAGES FROM CLIENT (Input)
// ---------------------------------------------------------
message ClientMessage {
oneof payload {
// 1. Initial request (first message in the stream)
ChatRequest request = 2;
// 2. User response to an agent prompt (e.g., command confirmation)
UserInput input = 3;
// 3. Interrupt signal (if the user clicks "Stop generation")
CancelSignal cancel = 4;
}
}
message ChatRequest {
string message = 1;
string working_directory = 2; // Where the agent should execute commands
reserved 3; // Reserved to prevent accidental reuse
optional string model = 4;
string session_id = 5; // Non-empty = cross-stream session persistence
}
message UserInput {
string reply = 1; // Text response (e.g., "y", "no", or clarification)
string prompt_id = 2; // ID of the prompt we are responding to
}
message CancelSignal {
string reason = 1;
}
// ---------------------------------------------------------
// MESSAGES FROM SERVER (Output / Events)
// ---------------------------------------------------------
message ServerMessage {
// Using oneof guarantees that only one type of event arrives at a time
oneof event {
TextChunk text_chunk = 1; // Chunk of text from LLM
ToolCallStart tool_start = 2; // Agent started using a tool
ToolCallResult tool_result = 3; // Tool returned a result
ActionRequired action_required = 4;// Agent requires human intervention
FinalResponse done = 5; // Generation successfully completed
ErrorResponse error = 6; // A critical error occurred
}
}
// Stream text chunk
message TextChunk {
string text = 1;
}
// Agent decided to use a tool (bash, read_file, etc.)
message ToolCallStart {
string tool_name = 1;
string arguments_json = 2; // Arguments in JSON format
string tool_use_id = 3; // Correlation ID matching ToolCallResult
}
// Result of tool execution
message ToolCallResult {
string tool_name = 1;
string output = 2; // stdout/stderr or file contents
bool is_error = 3; // Did the command itself fail
string tool_use_id = 4; // Correlation ID matching ToolCallStart
}
// Agent paused work and is waiting for user decision
message ActionRequired {
string prompt_id = 1; // Client must return this ID in UserInput
string question = 2; // Question text (e.g., "Execute 'rm -rf /'?")
enum ActionType {
CONFIRM_COMMAND = 0; // Yes/No
REQUEST_INFORMATION = 1; // Text input
}
ActionType type = 3;
}
// Final statistics
message FinalResponse {
string full_text = 1; // The entire generated text
int32 prompt_tokens = 2;
int32 completion_tokens = 3;
}
message ErrorResponse {
string message = 1;
string code = 2;
}

View File

@@ -237,7 +237,6 @@ import { useOfficialMarketplaceNotification } from 'src/hooks/useOfficialMarketp
import { usePromptsFromClaudeInChrome } from 'src/hooks/usePromptsFromClaudeInChrome.js'; import { usePromptsFromClaudeInChrome } from 'src/hooks/usePromptsFromClaudeInChrome.js';
import { getTipToShowOnSpinner, recordShownTip } from 'src/services/tips/tipScheduler.js'; import { getTipToShowOnSpinner, recordShownTip } from 'src/services/tips/tipScheduler.js';
import type { Theme } from 'src/utils/theme.js'; import type { Theme } from 'src/utils/theme.js';
import { isPromptTypingSuppressionActive } from './replInputSuppression.js';
import { checkAndDisableBypassPermissionsIfNeeded, checkAndDisableAutoModeIfNeeded, useKickOffCheckAndDisableBypassPermissionsIfNeeded, useKickOffCheckAndDisableAutoModeIfNeeded } from 'src/utils/permissions/bypassPermissionsKillswitch.js'; import { checkAndDisableBypassPermissionsIfNeeded, checkAndDisableAutoModeIfNeeded, useKickOffCheckAndDisableBypassPermissionsIfNeeded, useKickOffCheckAndDisableAutoModeIfNeeded } from 'src/utils/permissions/bypassPermissionsKillswitch.js';
import { SandboxManager } from 'src/utils/sandbox/sandbox-adapter.js'; import { SandboxManager } from 'src/utils/sandbox/sandbox-adapter.js';
import { SANDBOX_NETWORK_ACCESS_TOOL_NAME } from 'src/cli/structuredIO.js'; import { SANDBOX_NETWORK_ACCESS_TOOL_NAME } from 'src/cli/structuredIO.js';
@@ -1337,7 +1336,6 @@ export function REPL({
const [inputValue, setInputValueRaw] = useState(() => consumeEarlyInput()); const [inputValue, setInputValueRaw] = useState(() => consumeEarlyInput());
const inputValueRef = useRef(inputValue); const inputValueRef = useRef(inputValue);
inputValueRef.current = inputValue; inputValueRef.current = inputValue;
const promptTypingSuppressionActive = isPromptTypingSuppressionActive(isPromptInputActive, inputValue);
const insertTextRef = useRef<{ const insertTextRef = useRef<{
insert: (text: string) => void; insert: (text: string) => void;
setInputWithCursor: (value: string, cursor: number) => void; setInputWithCursor: (value: string, cursor: number) => void;
@@ -2030,7 +2028,7 @@ export function REPL({
if (isMessageSelectorVisible) return 'message-selector'; if (isMessageSelectorVisible) return 'message-selector';
// Suppress interrupt dialogs while user is actively typing // Suppress interrupt dialogs while user is actively typing
if (promptTypingSuppressionActive) return undefined; if (isPromptInputActive) return undefined;
if (sandboxPermissionRequestQueue[0]) return 'sandbox-permission'; if (sandboxPermissionRequestQueue[0]) return 'sandbox-permission';
// Permission/interactive dialogs (show unless blocked by toolJSX) // Permission/interactive dialogs (show unless blocked by toolJSX)
@@ -2073,7 +2071,7 @@ export function REPL({
const focusedInputDialog = getFocusedInputDialog(); const focusedInputDialog = getFocusedInputDialog();
// True when permission prompts exist but are hidden because the user is typing // True when permission prompts exist but are hidden because the user is typing
const hasSuppressedDialogs = promptTypingSuppressionActive && (sandboxPermissionRequestQueue[0] || toolUseConfirmQueue[0] || promptQueue[0] || workerSandboxPermissions.queue[0] || elicitation.queue[0] || showingCostDialog); const hasSuppressedDialogs = isPromptInputActive && (sandboxPermissionRequestQueue[0] || toolUseConfirmQueue[0] || promptQueue[0] || workerSandboxPermissions.queue[0] || elicitation.queue[0] || showingCostDialog);
// Keep ref in sync so timer callbacks can read the current value // Keep ref in sync so timer callbacks can read the current value
focusedInputDialogRef.current = focusedInputDialog; focusedInputDialogRef.current = focusedInputDialog;

View File

@@ -1,18 +0,0 @@
import { describe, expect, it } from 'bun:test'
import { isPromptTypingSuppressionActive } from './replInputSuppression.js'
describe('isPromptTypingSuppressionActive', () => {
it('suppresses dialogs when early input already exists', () => {
expect(isPromptTypingSuppressionActive(false, 'hello')).toBe(true)
})
it('does not suppress dialogs for empty or whitespace-only input', () => {
expect(isPromptTypingSuppressionActive(false, '')).toBe(false)
expect(isPromptTypingSuppressionActive(false, ' ')).toBe(false)
})
it('keeps suppression active while the typing flag is set', () => {
expect(isPromptTypingSuppressionActive(true, '')).toBe(true)
})
})

View File

@@ -1,6 +0,0 @@
export function isPromptTypingSuppressionActive(
isPromptInputActive: boolean,
inputValue: string,
): boolean {
return isPromptInputActive || inputValue.trim().length > 0
}

View File

@@ -14,16 +14,7 @@ import { lazySchema } from '../../utils/lazySchema.js'
import { logError } from '../../utils/log.js' import { logError } from '../../utils/log.js'
import { getAPIProvider } from '../../utils/model/providers.js' import { getAPIProvider } from '../../utils/model/providers.js'
import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js' import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js'
import type { ModelOption } from '../../utils/model/modelOptions.js'
import {
getLocalOpenAICompatibleProviderLabel,
listOpenAICompatibleModels,
} from '../../utils/providerDiscovery.js'
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js' import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
import {
getAdditionalModelOptionsCacheScope,
resolveProviderRequest,
} from './providerConfig.js'
const bootstrapResponseSchema = lazySchema(() => const bootstrapResponseSchema = lazySchema(() =>
z.object({ z.object({
@@ -48,12 +39,6 @@ const bootstrapResponseSchema = lazySchema(() =>
type BootstrapResponse = z.infer<ReturnType<typeof bootstrapResponseSchema>> type BootstrapResponse = z.infer<ReturnType<typeof bootstrapResponseSchema>>
type BootstrapCachePayload = {
clientData: Record<string, unknown> | null
additionalModelOptions: ModelOption[]
additionalModelOptionsScope: string
}
async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> { async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
if (isEssentialTrafficOnly()) { if (isEssentialTrafficOnly()) {
logForDebugging('[Bootstrap] Skipped: Nonessential traffic disabled') logForDebugging('[Bootstrap] Skipped: Nonessential traffic disabled')
@@ -123,70 +108,22 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
} }
} }
async function fetchLocalOpenAIModelOptions(): Promise<BootstrapCachePayload | null> {
const scope = getAdditionalModelOptionsCacheScope()
if (!scope?.startsWith('openai:')) {
return null
}
const { baseUrl } = resolveProviderRequest()
const models = await listOpenAICompatibleModels({
baseUrl,
apiKey: process.env.OPENAI_API_KEY,
})
if (models === null) {
logForDebugging('[Bootstrap] Local OpenAI model discovery failed')
return null
}
const providerLabel = getLocalOpenAICompatibleProviderLabel(baseUrl)
return {
clientData: getGlobalConfig().clientDataCache ?? null,
additionalModelOptionsScope: scope,
additionalModelOptions: models.map(model => ({
value: model,
label: model,
description: `Detected from ${providerLabel}`,
})),
}
}
/** /**
* Fetch bootstrap data from the API and persist to disk cache. * Fetch bootstrap data from the API and persist to disk cache.
*/ */
export async function fetchBootstrapData(): Promise<void> { export async function fetchBootstrapData(): Promise<void> {
try { try {
const scope = getAdditionalModelOptionsCacheScope() const response = await fetchBootstrapAPI()
let payload: BootstrapCachePayload | null = null if (!response) return
if (scope === 'firstParty') { const clientData = response.client_data ?? null
const response = await fetchBootstrapAPI() const additionalModelOptions = response.additional_model_options ?? []
if (!response) return
payload = {
clientData: response.client_data ?? null,
additionalModelOptions: response.additional_model_options ?? [],
additionalModelOptionsScope: scope,
}
} else if (scope?.startsWith('openai:')) {
payload = await fetchLocalOpenAIModelOptions()
if (!payload) return
} else {
logForDebugging('[Bootstrap] Skipped: no additional model source')
return
}
const { clientData, additionalModelOptions, additionalModelOptionsScope } =
payload
// Only persist if data actually changed — avoids a config write on every startup. // Only persist if data actually changed — avoids a config write on every startup.
const config = getGlobalConfig() const config = getGlobalConfig()
if ( if (
isEqual(config.clientDataCache, clientData) && isEqual(config.clientDataCache, clientData) &&
isEqual(config.additionalModelOptionsCache, additionalModelOptions) && isEqual(config.additionalModelOptionsCache, additionalModelOptions)
config.additionalModelOptionsCacheScope === additionalModelOptionsScope
) { ) {
logForDebugging('[Bootstrap] Cache unchanged, skipping write') logForDebugging('[Bootstrap] Cache unchanged, skipping write')
return return
@@ -197,7 +134,6 @@ export async function fetchBootstrapData(): Promise<void> {
...current, ...current,
clientDataCache: clientData, clientDataCache: clientData,
additionalModelOptionsCache: additionalModelOptions, additionalModelOptionsCache: additionalModelOptions,
additionalModelOptionsCacheScope: additionalModelOptionsScope,
})) }))
} catch (error) { } catch (error) {
logError(error) logError(error)

View File

@@ -14,19 +14,12 @@ import {
} from './providerConfig.js' } from './providerConfig.js'
const tempDirs: string[] = [] const tempDirs: string[] = []
const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
}
afterEach(() => { afterEach(() => {
while (tempDirs.length > 0) { while (tempDirs.length > 0) {
const dir = tempDirs.pop() const dir = tempDirs.pop()
if (dir) rmSync(dir, { recursive: true, force: true }) if (dir) rmSync(dir, { recursive: true, force: true })
} }
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
}) })
function createTempAuthJson(payload: Record<string, unknown>): string { function createTempAuthJson(payload: Record<string, unknown>): string {
@@ -69,26 +62,12 @@ describe('Codex provider config', () => {
}) })
test('resolves codexplan alias to Codex transport with reasoning', () => { test('resolves codexplan alias to Codex transport with reasoning', () => {
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
const resolved = resolveProviderRequest({ model: 'codexplan' }) const resolved = resolveProviderRequest({ model: 'codexplan' })
expect(resolved.transport).toBe('codex_responses') expect(resolved.transport).toBe('codex_responses')
expect(resolved.resolvedModel).toBe('gpt-5.4') expect(resolved.resolvedModel).toBe('gpt-5.4')
expect(resolved.reasoning).toEqual({ effort: 'high' }) expect(resolved.reasoning).toEqual({ effort: 'high' })
}) })
test('does not force Codex transport when a local non-Codex base URL is explicit', () => {
const resolved = resolveProviderRequest({
model: 'codexplan',
baseUrl: 'http://127.0.0.1:8080/v1',
})
expect(resolved.transport).toBe('chat_completions')
expect(resolved.baseUrl).toBe('http://127.0.0.1:8080/v1')
expect(resolved.resolvedModel).toBe('gpt-5.4')
})
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', () => { test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', () => {
// On Windows, env vars can leak as the literal string "undefined" instead of // On Windows, env vars can leak as the literal string "undefined" instead of
// the JS value undefined when not properly unset (issue #336). // the JS value undefined when not properly unset (issue #336).

View File

@@ -557,12 +557,8 @@ export function getAssistantMessageFromError(
const stripped = error.message.replace(/^429\s+/, '') const stripped = error.message.replace(/^429\s+/, '')
const innerMessage = stripped.match(/"message"\s*:\s*"([^"]*)"/)?.[1] const innerMessage = stripped.match(/"message"\s*:\s*"([^"]*)"/)?.[1]
const detail = innerMessage || stripped const detail = innerMessage || stripped
const retryAfter = (error as APIError).headers?.get?.('retry-after')
const retryHint = retryAfter && !isNaN(Number(retryAfter))
? `Try again in ${retryAfter} seconds.`
: 'Try again in a few seconds.'
return createAssistantAPIErrorMessage({ return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || 'this may be a temporary capacity issue'}${retryHint}`, content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || `this may be a temporary capacity issue${getAPIProvider() === 'firstParty' ? ' — check status.anthropic.com' : ''}`}`,
error: 'rate_limit', error: 'rate_limit',
}) })
} }

File diff suppressed because it is too large Load Diff

View File

@@ -42,10 +42,6 @@ import {
} from './providerConfig.js' } from './providerConfig.js'
import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js' import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js'
import { redactSecretValueForDisplay } from '../../utils/providerProfile.js' import { redactSecretValueForDisplay } from '../../utils/providerProfile.js'
import {
normalizeToolArguments,
hasToolFieldMapping,
} from './toolArgumentNormalization.js'
type SecretValueSource = Partial<{ type SecretValueSource = Partial<{
OPENAI_API_KEY: string OPENAI_API_KEY: string
@@ -299,41 +295,7 @@ function convertMessages(
} }
} }
// Coalescing pass: merge consecutive messages of the same role. return result
// OpenAI/vLLM/Ollama require strict user↔assistant alternation.
// Multiple consecutive tool messages are allowed (assistant → tool* → user).
// Consecutive user or assistant messages must be merged to avoid Jinja
// template errors like "roles must alternate" (Devstral, Mistral models).
const coalesced: OpenAIMessage[] = []
for (const msg of result) {
const prev = coalesced[coalesced.length - 1]
if (prev && prev.role === msg.role && msg.role !== 'tool' && msg.role !== 'system') {
const prevContent = prev.content
const curContent = msg.content
if (typeof prevContent === 'string' && typeof curContent === 'string') {
prev.content = prevContent + (prevContent && curContent ? '\n' : '') + curContent
} else {
const toArray = (
c: string | Array<{ type: string; text?: string; image_url?: { url: string } }> | undefined,
): Array<{ type: string; text?: string; image_url?: { url: string } }> => {
if (!c) return []
if (typeof c === 'string') return c ? [{ type: 'text', text: c }] : []
return c
}
prev.content = [...toArray(prevContent), ...toArray(curContent)]
}
if (msg.tool_calls?.length) {
prev.tool_calls = [...(prev.tool_calls ?? []), ...msg.tool_calls]
}
} else {
coalesced.push(msg)
}
}
return coalesced
} }
/** /**
@@ -480,30 +442,6 @@ function convertChunkUsage(
} }
} }
const JSON_REPAIR_SUFFIXES = [
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
]
function repairPossiblyTruncatedObjectJson(raw: string): string | null {
try {
const parsed = JSON.parse(raw)
return parsed && typeof parsed === 'object' && !Array.isArray(parsed)
? raw
: null
} catch {
for (const combo of JSON_REPAIR_SUFFIXES) {
try {
const repaired = raw + combo
const parsed = JSON.parse(repaired)
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
return repaired
}
} catch {}
}
return null
}
}
/** /**
* Async generator that transforms an OpenAI SSE stream into * Async generator that transforms an OpenAI SSE stream into
* Anthropic-format BetaRawMessageStreamEvent objects. * Anthropic-format BetaRawMessageStreamEvent objects.
@@ -514,16 +452,7 @@ async function* openaiStreamToAnthropic(
): AsyncGenerator<AnthropicStreamEvent> { ): AsyncGenerator<AnthropicStreamEvent> {
const messageId = makeMessageId() const messageId = makeMessageId()
let contentBlockIndex = 0 let contentBlockIndex = 0
const activeToolCalls = new Map< const activeToolCalls = new Map<number, { id: string; name: string; index: number; jsonBuffer: string }>()
number,
{
id: string
name: string
index: number
jsonBuffer: string
normalizeAtStop: boolean
}
>()
let hasEmittedContentStart = false let hasEmittedContentStart = false
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
let hasEmittedFinalUsage = false let hasEmittedFinalUsage = false
@@ -614,14 +543,11 @@ async function* openaiStreamToAnthropic(
} }
const toolBlockIndex = contentBlockIndex const toolBlockIndex = contentBlockIndex
const initialArguments = tc.function.arguments ?? ''
const normalizeAtStop = hasToolFieldMapping(tc.function.name)
activeToolCalls.set(tc.index, { activeToolCalls.set(tc.index, {
id: tc.id, id: tc.id,
name: tc.function.name, name: tc.function.name,
index: toolBlockIndex, index: toolBlockIndex,
jsonBuffer: initialArguments, jsonBuffer: tc.function.arguments ?? '',
normalizeAtStop,
}) })
yield { yield {
@@ -638,7 +564,7 @@ async function* openaiStreamToAnthropic(
contentBlockIndex++ contentBlockIndex++
// Emit any initial arguments // Emit any initial arguments
if (tc.function.arguments && !normalizeAtStop) { if (tc.function.arguments) {
yield { yield {
type: 'content_block_delta', type: 'content_block_delta',
index: toolBlockIndex, index: toolBlockIndex,
@@ -655,11 +581,6 @@ async function* openaiStreamToAnthropic(
if (tc.function.arguments) { if (tc.function.arguments) {
active.jsonBuffer += tc.function.arguments active.jsonBuffer += tc.function.arguments
} }
if (active.normalizeAtStop) {
continue
}
yield { yield {
type: 'content_block_delta', type: 'content_block_delta',
index: active.index, index: active.index,
@@ -687,44 +608,16 @@ async function* openaiStreamToAnthropic(
} }
// Close active tool calls // Close active tool calls
for (const [, tc] of activeToolCalls) { for (const [, tc] of activeToolCalls) {
if (tc.normalizeAtStop) {
let partialJson: string
if (choice.finish_reason === 'length') {
// Truncated by max tokens — preserve raw buffer to avoid
// turning an incomplete tool call into an executable command
partialJson = tc.jsonBuffer
} else {
const repairedStructuredJson = repairPossiblyTruncatedObjectJson(
tc.jsonBuffer,
)
if (repairedStructuredJson) {
partialJson = repairedStructuredJson
} else {
partialJson = JSON.stringify(
normalizeToolArguments(tc.name, tc.jsonBuffer),
)
}
}
yield {
type: 'content_block_delta',
index: tc.index,
delta: {
type: 'input_json_delta',
partial_json: partialJson,
},
}
yield { type: 'content_block_stop', index: tc.index }
continue
}
let suffixToAdd = '' let suffixToAdd = ''
if (tc.jsonBuffer) { if (tc.jsonBuffer) {
try { try {
JSON.parse(tc.jsonBuffer) JSON.parse(tc.jsonBuffer)
} catch { } catch {
const str = tc.jsonBuffer.trimEnd() const str = tc.jsonBuffer.trimEnd()
for (const combo of JSON_REPAIR_SUFFIXES) { const combinations = [
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
]
for (const combo of combinations) {
try { try {
JSON.parse(str + combo) JSON.parse(str + combo)
suffixToAdd = combo suffixToAdd = combo
@@ -1160,10 +1053,12 @@ class OpenAIShimMessages {
if (choice?.message?.tool_calls) { if (choice?.message?.tool_calls) {
for (const tc of choice.message.tool_calls) { for (const tc of choice.message.tool_calls) {
const input = normalizeToolArguments( let input: unknown
tc.function.name, try {
tc.function.arguments, input = JSON.parse(tc.function.arguments)
) } catch {
input = { raw: tc.function.arguments }
}
content.push({ content.push({
type: 'tool_use', type: 'tool_use',
id: tc.id, id: tc.id,

View File

@@ -1,22 +1,6 @@
import { afterEach, expect, test } from 'bun:test' import { expect, test } from 'bun:test'
import { import { isLocalProviderUrl } from './providerConfig.js'
getAdditionalModelOptionsCacheScope,
isLocalProviderUrl,
resolveProviderRequest,
} from './providerConfig.js'
const originalEnv = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_MODEL: process.env.OPENAI_MODEL,
}
afterEach(() => {
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
})
test('treats localhost endpoints as local', () => { test('treats localhost endpoints as local', () => {
expect(isLocalProviderUrl('http://localhost:11434/v1')).toBe(true) expect(isLocalProviderUrl('http://localhost:11434/v1')).toBe(true)
@@ -49,37 +33,3 @@ test('treats public hosts as remote', () => {
expect(isLocalProviderUrl('https://example.com/v1')).toBe(false) expect(isLocalProviderUrl('https://example.com/v1')).toBe(false)
expect(isLocalProviderUrl('http://[2001:4860:4860::8888]:11434/v1')).toBe(false) expect(isLocalProviderUrl('http://[2001:4860:4860::8888]:11434/v1')).toBe(false)
}) })
test('creates a cache scope for local openai-compatible providers', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:1234/v1'
process.env.OPENAI_MODEL = 'llama-3.2-3b-instruct'
expect(getAdditionalModelOptionsCacheScope()).toBe(
'openai:http://localhost:1234/v1',
)
})
test('keeps codex alias models on chat completions for local openai-compatible providers', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
process.env.OPENAI_MODEL = 'gpt-5.4'
expect(resolveProviderRequest()).toMatchObject({
transport: 'chat_completions',
requestedModel: 'gpt-5.4',
resolvedModel: 'gpt-5.4',
baseUrl: 'http://127.0.0.1:8080/v1',
})
expect(getAdditionalModelOptionsCacheScope()).toBe(
'openai:http://127.0.0.1:8080/v1',
)
})
test('skips local model cache scope for remote openai-compatible providers', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
process.env.OPENAI_MODEL = 'gpt-4o'
expect(getAdditionalModelOptionsCacheScope()).toBeNull()
})

View File

@@ -219,14 +219,6 @@ export function isCodexAlias(model: string): boolean {
return base in CODEX_ALIAS_MODELS return base in CODEX_ALIAS_MODELS
} }
export function shouldUseCodexTransport(
model: string,
baseUrl: string | undefined,
): boolean {
const explicitBaseUrl = asEnvUrl(baseUrl)
return isCodexBaseUrl(explicitBaseUrl) || (!explicitBaseUrl && isCodexAlias(model))
}
export function isLocalProviderUrl(baseUrl: string | undefined): boolean { export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
if (!baseUrl) return false if (!baseUrl) return false
try { try {
@@ -310,8 +302,13 @@ export function resolveProviderRequest(options?: {
asEnvUrl(options?.baseUrl) ?? asEnvUrl(options?.baseUrl) ??
asEnvUrl(process.env.OPENAI_BASE_URL) ?? asEnvUrl(process.env.OPENAI_BASE_URL) ??
asEnvUrl(process.env.OPENAI_API_BASE) asEnvUrl(process.env.OPENAI_API_BASE)
// Use Codex transport only when:
// - the base URL is explicitly the Codex endpoint, OR
// - the model is a Codex alias AND no custom base URL has been set
// A custom OPENAI_BASE_URL (e.g. Azure, OpenRouter) always wins over
// model-name-based Codex detection to prevent auth failures (#200, #203).
const transport: ProviderTransport = const transport: ProviderTransport =
shouldUseCodexTransport(requestedModel, rawBaseUrl) isCodexBaseUrl(rawBaseUrl) || (!rawBaseUrl && isCodexAlias(requestedModel))
? 'codex_responses' ? 'codex_responses'
: 'chat_completions' : 'chat_completions'
@@ -340,30 +337,6 @@ export function resolveProviderRequest(options?: {
} }
} }
export function getAdditionalModelOptionsCacheScope(): string | null {
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) &&
!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
!isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) &&
!isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) &&
!isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)) {
return 'firstParty'
}
return null
}
const request = resolveProviderRequest()
if (request.transport !== 'chat_completions') {
return null
}
if (!isLocalProviderUrl(request.baseUrl)) {
return null
}
return `openai:${request.baseUrl.toLowerCase()}`
}
export function resolveCodexAuthPath( export function resolveCodexAuthPath(
env: NodeJS.ProcessEnv = process.env, env: NodeJS.ProcessEnv = process.env,
): string { ): string {

View File

@@ -1,180 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { normalizeToolArguments } from './toolArgumentNormalization'
describe('normalizeToolArguments', () => {
describe('Bash tool', () => {
test('wraps plain string into { command }', () => {
expect(normalizeToolArguments('Bash', 'pwd')).toEqual({ command: 'pwd' })
})
test('wraps multi-word command', () => {
expect(normalizeToolArguments('Bash', 'ls -la /tmp')).toEqual({
command: 'ls -la /tmp',
})
})
test('passes through structured JSON object', () => {
expect(
normalizeToolArguments('Bash', '{"command":"echo hi"}'),
).toEqual({ command: 'echo hi' })
})
test('returns empty object for blank string', () => {
expect(normalizeToolArguments('Bash', '')).toEqual({})
expect(normalizeToolArguments('Bash', ' ')).toEqual({})
})
test('returns parsed blank for JSON-encoded blank string', () => {
expect(normalizeToolArguments('Bash', '""')).toEqual('')
expect(normalizeToolArguments('Bash', '" "')).toEqual(' ')
})
test('returns empty object for malformed structured object literal', () => {
expect(normalizeToolArguments('Bash', '{ "command": "pwd"')).toEqual({})
})
test.each([
['{command:"pwd"}'],
["{'command':'pwd'}"],
['{command: pwd}'],
])(
'returns empty object for malformed object-shaped string %s (does not wrap into command)',
(input) => {
expect(normalizeToolArguments('Bash', input)).toEqual({})
},
)
test.each([
['false', false],
['null', null],
['[]', [] as unknown[]],
['0', 0],
['true', true],
['123', 123],
])(
'preserves JSON literal %s as-is (does not wrap into command)',
(input, expected) => {
expect(normalizeToolArguments('Bash', input)).toEqual(expected)
},
)
test('wraps JSON-encoded string into { command }', () => {
expect(normalizeToolArguments('Bash', '"pwd"')).toEqual({
command: 'pwd',
})
})
})
describe('undefined arguments', () => {
test('returns empty object for undefined', () => {
expect(normalizeToolArguments('Bash', undefined)).toEqual({})
expect(normalizeToolArguments('UnknownTool', undefined)).toEqual({})
})
})
describe('Read tool', () => {
test('wraps plain string into { file_path }', () => {
expect(normalizeToolArguments('Read', '/home/user/file.txt')).toEqual({
file_path: '/home/user/file.txt',
})
})
test('wraps JSON-encoded string into { file_path }', () => {
expect(normalizeToolArguments('Read', '"/home/user/file.txt"')).toEqual({
file_path: '/home/user/file.txt',
})
})
test('passes through structured JSON object', () => {
expect(
normalizeToolArguments('Read', '{"file_path":"/tmp/f.txt","limit":10}'),
).toEqual({ file_path: '/tmp/f.txt', limit: 10 })
})
})
describe('Write tool', () => {
test('wraps plain string into { file_path }', () => {
expect(normalizeToolArguments('Write', '/tmp/out.txt')).toEqual({
file_path: '/tmp/out.txt',
})
})
test('passes through structured JSON object', () => {
expect(
normalizeToolArguments(
'Write',
'{"file_path":"/tmp/out.txt","content":"hello"}',
),
).toEqual({ file_path: '/tmp/out.txt', content: 'hello' })
})
})
describe('Edit tool', () => {
test('wraps plain string into { file_path }', () => {
expect(normalizeToolArguments('Edit', '/tmp/edit.ts')).toEqual({
file_path: '/tmp/edit.ts',
})
})
test('passes through structured JSON object', () => {
expect(
normalizeToolArguments(
'Edit',
'{"file_path":"/tmp/f.ts","old_string":"a","new_string":"b"}',
),
).toEqual({ file_path: '/tmp/f.ts', old_string: 'a', new_string: 'b' })
})
})
describe('Glob tool', () => {
test('wraps plain string into { pattern }', () => {
expect(normalizeToolArguments('Glob', '**/*.ts')).toEqual({
pattern: '**/*.ts',
})
})
test('passes through structured JSON object', () => {
expect(
normalizeToolArguments('Glob', '{"pattern":"*.js","path":"/src"}'),
).toEqual({ pattern: '*.js', path: '/src' })
})
})
describe('Grep tool', () => {
test('wraps plain string into { pattern }', () => {
expect(normalizeToolArguments('Grep', 'TODO')).toEqual({
pattern: 'TODO',
})
})
test('passes through structured JSON object', () => {
expect(
normalizeToolArguments('Grep', '{"pattern":"fixme","path":"/src"}'),
).toEqual({ pattern: 'fixme', path: '/src' })
})
})
describe('unknown tools', () => {
test('returns empty object for plain string (no known field mapping)', () => {
expect(normalizeToolArguments('UnknownTool', 'some value')).toEqual({})
})
test('passes through structured JSON object', () => {
expect(
normalizeToolArguments('UnknownTool', '{"key":"val"}'),
).toEqual({ key: 'val' })
})
test('preserves JSON literals as-is', () => {
expect(normalizeToolArguments('UnknownTool', 'false')).toEqual(false)
expect(normalizeToolArguments('UnknownTool', 'null')).toEqual(null)
expect(normalizeToolArguments('UnknownTool', '[]')).toEqual([])
})
test('returns parsed string for JSON-encoded string on unknown tools', () => {
expect(normalizeToolArguments('UnknownTool', '"hello"')).toEqual(
'hello',
)
})
})
})

View File

@@ -1,69 +0,0 @@
const STRING_ARGUMENT_TOOL_FIELDS: Record<string, string> = {
Bash: 'command',
Read: 'file_path',
Write: 'file_path',
Edit: 'file_path',
Glob: 'pattern',
Grep: 'pattern',
}
function isBlankString(value: string): boolean {
return value.trim().length === 0
}
function isLikelyStructuredObjectLiteral(value: string): boolean {
// Match object-like patterns with key-value syntax:
// {"key":, {key:, {'key':, { "key" :, etc.
// But NOT bash compound commands like { pwd; } or { echo hi; }
return /^\s*\{\s*['"]?\w+['"]?\s*:/.test(value)
}
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null && !Array.isArray(value)
}
function getPlainStringToolArgumentField(toolName: string): string | null {
return STRING_ARGUMENT_TOOL_FIELDS[toolName] ?? null
}
export function hasToolFieldMapping(toolName: string): boolean {
return toolName in STRING_ARGUMENT_TOOL_FIELDS
}
function wrapPlainStringToolArguments(
toolName: string,
value: string,
): Record<string, string> | null {
const field = getPlainStringToolArgumentField(toolName)
if (!field) return null
return { [field]: value }
}
export function normalizeToolArguments(
toolName: string,
rawArguments: string | undefined,
): unknown {
if (rawArguments === undefined) return {}
try {
const parsed = JSON.parse(rawArguments)
if (isRecord(parsed)) {
return parsed
}
// Parsed as a non-object JSON value (string, number, boolean, null, array)
if (typeof parsed === 'string' && !isBlankString(parsed)) {
return wrapPlainStringToolArguments(toolName, parsed) ?? parsed
}
// For blank strings, booleans, null, arrays — pass through as-is
// and let Zod schema validation produce a meaningful error
return parsed
} catch {
// rawArguments is not valid JSON — treat as a plain string
if (isBlankString(rawArguments) || isLikelyStructuredObjectLiteral(rawArguments)) {
// Blank or looks like a malformed object literal — don't wrap into
// a tool field to avoid turning garbage into executable input
return {}
}
return wrapPlainStringToolArguments(toolName, rawArguments) ?? {}
}
}

View File

@@ -1,7 +1,6 @@
import { afterEach, describe, expect, mock, test } from 'bun:test' import { afterEach, describe, expect, mock, test } from 'bun:test'
import { import {
DEFAULT_GITHUB_DEVICE_SCOPE,
GitHubDeviceFlowError, GitHubDeviceFlowError,
pollAccessToken, pollAccessToken,
requestDeviceCode, requestDeviceCode,
@@ -49,81 +48,6 @@ describe('requestDeviceCode', () => {
requestDeviceCode({ clientId: 'x', fetchImpl: globalThis.fetch }), requestDeviceCode({ clientId: 'x', fetchImpl: globalThis.fetch }),
).rejects.toThrow(GitHubDeviceFlowError) ).rejects.toThrow(GitHubDeviceFlowError)
}) })
test('uses OAuth-safe default scope', async () => {
let capturedScope = ''
globalThis.fetch = mock((_url: RequestInfo | URL, init?: RequestInit) => {
const body = init?.body
if (body instanceof URLSearchParams) {
capturedScope = body.get('scope') ?? ''
} else {
capturedScope = new URLSearchParams(String(body ?? '')).get('scope') ?? ''
}
return Promise.resolve(
new Response(
JSON.stringify({
device_code: 'abc',
user_code: 'ABCD-1234',
verification_uri: 'https://github.com/login/device',
}),
{ status: 200 },
),
)
})
await requestDeviceCode({ clientId: 'test-client', fetchImpl: globalThis.fetch })
expect(capturedScope).toBe(DEFAULT_GITHUB_DEVICE_SCOPE)
expect(capturedScope).toBe('read:user')
})
test('retries with OAuth-safe scope on invalid_scope', async () => {
const scopesSeen: string[] = []
let callCount = 0
globalThis.fetch = mock((_url: RequestInfo | URL, init?: RequestInit) => {
const body = init?.body
const scope =
body instanceof URLSearchParams
? body.get('scope') ?? ''
: new URLSearchParams(String(body ?? '')).get('scope') ?? ''
scopesSeen.push(scope)
callCount++
if (callCount === 1) {
return Promise.resolve(
new Response(
JSON.stringify({
error: 'invalid_scope',
error_description: 'invalid models scope',
}),
{ status: 400 },
),
)
}
return Promise.resolve(
new Response(
JSON.stringify({
device_code: 'abc',
user_code: 'ABCD-1234',
verification_uri: 'https://github.com/login/device',
}),
{ status: 200 },
),
)
})
const result = await requestDeviceCode({
clientId: 'test-client',
scope: 'read:user,models:read',
fetchImpl: globalThis.fetch,
})
expect(result.device_code).toBe('abc')
expect(callCount).toBe(2)
expect(scopesSeen).toEqual(['read:user,models:read', 'read:user'])
})
}) })
describe('pollAccessToken', () => { describe('pollAccessToken', () => {

View File

@@ -10,10 +10,8 @@ export const GITHUB_DEVICE_CODE_URL = 'https://github.com/login/device/code'
export const GITHUB_DEVICE_ACCESS_TOKEN_URL = export const GITHUB_DEVICE_ACCESS_TOKEN_URL =
'https://github.com/login/oauth/access_token' 'https://github.com/login/oauth/access_token'
// OAuth app device flow does not accept the GitHub Models permission token /** Match runtime devsper github_oauth DEFAULT_SCOPE */
// scope (models:read). Use an OAuth-safe default. export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user,models:read'
const OAUTH_SAFE_GITHUB_DEVICE_SCOPE = 'read:user'
export const DEFAULT_GITHUB_DEVICE_SCOPE = OAUTH_SAFE_GITHUB_DEVICE_SCOPE
export class GitHubDeviceFlowError extends Error { export class GitHubDeviceFlowError extends Error {
constructor(message: string) { constructor(message: string) {
@@ -53,61 +51,38 @@ export async function requestDeviceCode(options?: {
) )
} }
const fetchFn = options?.fetchImpl ?? fetch const fetchFn = options?.fetchImpl ?? fetch
const requestedScope = const res = await fetchFn(GITHUB_DEVICE_CODE_URL, {
options?.scope?.trim() || DEFAULT_GITHUB_DEVICE_SCOPE method: 'POST',
const scopesToTry = headers: { Accept: 'application/json' },
requestedScope === OAUTH_SAFE_GITHUB_DEVICE_SCOPE body: new URLSearchParams({
? [requestedScope] client_id: clientId,
: [requestedScope, OAUTH_SAFE_GITHUB_DEVICE_SCOPE] scope: options?.scope ?? DEFAULT_GITHUB_DEVICE_SCOPE,
}),
let lastError = 'Device code request failed.' })
if (!res.ok) {
for (const scope of scopesToTry) { const text = await res.text().catch(() => '')
const res = await fetchFn(GITHUB_DEVICE_CODE_URL, { throw new GitHubDeviceFlowError(
method: 'POST', `Device code request failed: ${res.status} ${text}`,
headers: { Accept: 'application/json' }, )
body: new URLSearchParams({ }
client_id: clientId, const data = (await res.json()) as Record<string, unknown>
scope, const device_code = data.device_code
}), const user_code = data.user_code
}) const verification_uri = data.verification_uri
if (
if (!res.ok) { typeof device_code !== 'string' ||
const text = await res.text().catch(() => '') typeof user_code !== 'string' ||
lastError = `Device code request failed: ${res.status} ${text}` typeof verification_uri !== 'string'
const isInvalidScope = /invalid_scope/i.test(text) ) {
const canRetryWithFallback = throw new GitHubDeviceFlowError('Malformed device code response from GitHub')
scope !== OAUTH_SAFE_GITHUB_DEVICE_SCOPE && isInvalidScope }
if (canRetryWithFallback) { return {
continue device_code,
} user_code,
throw new GitHubDeviceFlowError(lastError) verification_uri,
} expires_in: typeof data.expires_in === 'number' ? data.expires_in : 900,
interval: typeof data.interval === 'number' ? data.interval : 5,
const data = (await res.json()) as Record<string, unknown>
const device_code = data.device_code
const user_code = data.user_code
const verification_uri = data.verification_uri
if (
typeof device_code !== 'string' ||
typeof user_code !== 'string' ||
typeof verification_uri !== 'string'
) {
throw new GitHubDeviceFlowError(
'Malformed device code response from GitHub',
)
}
return {
device_code,
user_code,
verification_uri,
expires_in: typeof data.expires_in === 'number' ? data.expires_in : 900,
interval: typeof data.interval === 'number' ? data.interval : 5,
}
} }
throw new GitHubDeviceFlowError(lastError)
} }
export type PollOptions = { export type PollOptions = {

View File

@@ -9,7 +9,6 @@ import { getGlobalConfig, saveGlobalConfig } from '../utils/config.js'
import { toError } from '../utils/errors.js' import { toError } from '../utils/errors.js'
import { logError } from '../utils/log.js' import { logError } from '../utils/log.js'
import { applyConfigEnvironmentVariables } from '../utils/managedEnv.js' import { applyConfigEnvironmentVariables } from '../utils/managedEnv.js'
import { persistActiveProviderProfileModel } from '../utils/providerProfiles.js'
import { import {
permissionModeFromString, permissionModeFromString,
toExternalPermissionMode, toExternalPermissionMode,
@@ -111,12 +110,6 @@ export function onChangeAppState({
// Save to settings // Save to settings
updateSettingsForSource('userSettings', { model: newState.mainLoopModel }) updateSettingsForSource('userSettings', { model: newState.mainLoopModel })
setMainLoopModelOverride(newState.mainLoopModel) setMainLoopModelOverride(newState.mainLoopModel)
// Keep active provider profiles in sync with /model choices so restarts
// keep using the last selected model instead of the profile's old default.
if (process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED === '1') {
persistActiveProviderProfileModel(newState.mainLoopModel)
}
} }
// expandedView → persist as showExpandedTodos + showSpinnerTree for backwards compat // expandedView → persist as showExpandedTodos + showSpinnerTree for backwards compat

View File

@@ -1,85 +0,0 @@
import { describe, expect, test } from 'bun:test'
import {
extractAtMentionedFiles,
extractMcpResourceMentions,
} from './attachments.js'
// Contract tests for the two @-mention extractors.
//
// Scope: the narrow contract between `extractAtMentionedFiles` and
// `extractMcpResourceMentions` where both are called on the same input
// and must not both claim the same token. The motivating bug is that
// `extractMcpResourceMentions`'s `\b` anchor lets it backtrack over the
// closing quote of a quoted file mention, producing a ghost match for
// `@"C:\Users\..."`. These tests pin the boundary so any regression in
// the MCP regex is caught immediately.
describe('extractor contract', () => {
describe('extractMcpResourceMentions must return empty for', () => {
const cases: Array<[string, string]> = [
// Primary bug: the quoted form that PromptInput emits for Windows
// paths today. `\b` backtracks past the trailing `"` and produces
// a ghost MCP match on current HEAD.
['a quoted Windows drive-letter path', '@"C:\\Users\\me\\file.txt"'],
// Even if the quote layer were stripped, a bare drive letter
// followed by a path separator is never an MCP resource.
['an unquoted Windows drive-letter path', '@C:\\Users\\me\\file.txt'],
// Sanity: quoted POSIX paths with no `:` at all never matched the
// MCP regex and must keep not matching after the fix.
['a quoted POSIX path with a space', '@"/Users/foo/my file.ts"'],
['an unquoted POSIX path', '@/Users/foo/bar.ts'],
// Quoted POSIX path that embeds a `:` in the filename — the quote
// layer must shield it from MCP matching, same as the Windows case.
['a quoted POSIX path with a colon in the name', '@"/tmp/weird:name.txt"'],
]
test.each(cases)('%s', (_label, input) => {
expect(extractMcpResourceMentions(input)).toEqual([])
})
})
describe('extractMcpResourceMentions still matches legitimate MCP mentions', () => {
// Regression guard for the fix. If someone tightens the MCP regex
// too aggressively, these break and the intent is clear.
const cases: Array<[string, string, string[]]> = [
[
'a simple server:resource token',
'@server:resource/path',
['server:resource/path'],
],
[
'a plugin-scoped server name with a dash',
'@asana-plugin:project-status/123',
['asana-plugin:project-status/123'],
],
[
'an MCP mention inline in prose',
'please check @server:res here',
['server:res'],
],
]
test.each(cases)('%s', (_label, input, expected) => {
expect(extractMcpResourceMentions(input)).toEqual(expected)
})
})
describe('extractAtMentionedFiles extracts the file paths it should', () => {
// Asserted separately from the MCP side: the bug is purely in the
// MCP extractor over-matching, so these assertions are the
// "baseline still works" half of the contract.
const cases: Array<[string, string, string[]]> = [
[
'a quoted Windows drive-letter path',
'@"C:\\Users\\me\\file.txt"',
['C:\\Users\\me\\file.txt'],
],
[
'a quoted POSIX path with a space',
'@"/Users/foo/my file.ts"',
['/Users/foo/my file.ts'],
],
['an unquoted POSIX path', '@/Users/foo/bar.ts', ['/Users/foo/bar.ts']],
]
test.each(cases)('%s', (_label, input, expected) => {
expect(extractAtMentionedFiles(input)).toEqual(expected)
})
})
})

View File

@@ -2793,30 +2793,11 @@ export function extractAtMentionedFiles(content: string): string[] {
export function extractMcpResourceMentions(content: string): string[] { export function extractMcpResourceMentions(content: string): string[] {
// Extract MCP resources mentioned with @ symbol in format @server:uri // Extract MCP resources mentioned with @ symbol in format @server:uri
// Example: "@server1:resource/path" would extract "server1:resource/path" // Example: "@server1:resource/path" would extract "server1:resource/path"
// const atMentionRegex = /(^|\s)@([^\s]+:[^\s]+)\b/g
// Two guards against Windows-path / quoted-file collisions (see
// `attachments.extractors.test.ts`):
//
// 1. `(?!")` right after `@` drops quoted tokens entirely. The earlier
// form (without the lookahead and with `[^\s]` character classes)
// backtracked past the closing `"` at the `\b` anchor and produced
// ghost matches like `"C:\Users\...\file.txt` for any quoted file
// mention containing a colon.
// 2. The `"` added to the character classes is belt-and-braces: even
// if the lookahead were later removed or bypassed, the engine can
// no longer consume a quote character mid-match.
const atMentionRegex = /(^|\s)@(?!")([^\s"]+:[^\s"]+)\b/g
const matches = content.match(atMentionRegex) || [] const matches = content.match(atMentionRegex) || []
return uniq( // Remove the prefix (everything before @) from each match
matches return uniq(matches.map(match => match.slice(match.indexOf('@') + 1)))
.map(match => match.slice(match.indexOf('@') + 1))
// Post-match filter: a single-letter "server" followed by `:\` or
// `:/` is always a Windows drive-letter prefix, never a real MCP
// resource. This covers the unquoted `@C:\Users\...` case that
// the regex alone cannot disambiguate from `@server:resource`.
.filter(m => !/^[A-Za-z]:[\\/]/.test(m)),
)
} }
export function extractAgentMentions(content: string): string[] { export function extractAgentMentions(content: string): string[] {

View File

@@ -576,7 +576,6 @@ export type GlobalConfig = {
// Additional model options for the model picker (fetched during bootstrap). // Additional model options for the model picker (fetched during bootstrap).
additionalModelOptionsCache?: ModelOption[] additionalModelOptionsCache?: ModelOption[]
additionalModelOptionsCacheScope?: string
// Additional model options discovered from OpenAI-compatible endpoints. // Additional model options discovered from OpenAI-compatible endpoints.
openaiAdditionalModelOptionsCache?: ModelOption[] openaiAdditionalModelOptionsCache?: ModelOption[]

View File

@@ -1,100 +0,0 @@
import { afterAll, describe, expect, test } from 'bun:test'
import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from 'fs'
import { tmpdir } from 'os'
import { join } from 'path'
import { extractDraggedFilePaths } from './dragDropPaths.js'
describe('extractDraggedFilePaths', () => {
// Paths that exist on any system.
const thisFile = import.meta.path
const packageJson = `${process.cwd()}/package.json`
// Fixtures created synchronously at describe-load time (not in
// `beforeAll`) so their paths are available to `test.each` tables,
// which are built before any hook runs.
const tmpDir = mkdtempSync(join(tmpdir(), 'dragdrop-test-'))
const spacedFile = join(tmpDir, 'my file.txt')
writeFileSync(spacedFile, 'test')
const scopedDir = join(tmpDir, '@types')
mkdirSync(scopedDir)
const atSignFile = join(scopedDir, 'index.d.ts')
writeFileSync(atSignFile, 'test')
afterAll(() => {
rmSync(tmpDir, { recursive: true, force: true })
})
describe('returns an empty array', () => {
const emptyCases: Array<[string, string]> = [
['a non-absolute path', 'relative/path/file.ts'],
['a plain image path', '/Users/foo/image.png'],
['an uppercase image extension', '/Users/foo/SHOT.PNG'],
['a double-quoted image path', '"/Users/foo/shot.png"'],
['a single-quoted image path', "'/Users/foo/shot.jpg'"],
['regular prose text', 'hello world this is text'],
['a nonexistent absolute path', '/definitely/nonexistent/file.ts'],
['a single-quoted nonexistent path', "'/definitely/nonexistent.ts'"],
['an empty string', ''],
['whitespace only', ' \n '],
// Mixed-segment cases: all-or-nothing policy means a single bad
// entry disqualifies the whole paste.
['a mix where one path does not exist', `${thisFile}\n/nonexistent/file.ts`],
['a mix where one segment is an image', `${thisFile}\n/Users/foo/shot.png`],
]
test.each(emptyCases)('for %s', (_label, input) => {
expect(extractDraggedFilePaths(input)).toEqual([])
})
})
describe('resolves a single path', () => {
const singleCases: Array<[string, string, string]> = [
['a plain absolute path', thisFile, thisFile],
['a double-quoted path', `"${thisFile}"`, thisFile],
['a single-quoted path', `'${thisFile}'`, thisFile],
['a path with leading/trailing whitespace', ` ${thisFile} `, thisFile],
// Realistic: dragging something under `node_modules/@types/...`.
// `@` inside the path must not collide with the mention prefix
// that the caller prepends downstream.
['a path containing an `@` segment', atSignFile, atSignFile],
]
test.each(singleCases)('from %s', (_label, input, expected) => {
expect(extractDraggedFilePaths(input)).toEqual([expected])
})
})
describe('resolves multiple paths', () => {
const multiCases: Array<[string, string, string[]]> = [
[
'newline-separated',
`${thisFile}\n${packageJson}`,
[thisFile, packageJson],
],
[
'space-separated (Finder drag)',
`${thisFile} ${packageJson}`,
[thisFile, packageJson],
],
]
test.each(multiCases)('when input is %s', (_label, input, expected) => {
expect(extractDraggedFilePaths(input)).toEqual(expected)
})
})
// Backslash-escaped paths are a Finder/macOS + Linux convention — on
// Windows the shell-escape step is skipped, so these cases do not apply.
if (process.platform !== 'win32') {
describe('handles backslash-escaped paths', () => {
test('returns empty for an escaped image path', () => {
// The image check must apply after escape stripping so Finder
// image drags still route to the image paste handler.
expect(extractDraggedFilePaths('/Users/foo/my\\ shot.png')).toEqual([])
})
test('resolves an escaped real file with a space in its name', () => {
// Raw form matches what a terminal delivers on Finder drag.
const escaped = spacedFile.replace(/ /g, '\\ ')
expect(extractDraggedFilePaths(escaped)).toEqual([spacedFile])
})
})
}
})

View File

@@ -1,55 +0,0 @@
import { existsSync } from 'fs'
import { isAbsolute } from 'path'
// Inlined to avoid pulling the full `imagePaste.ts` module (which imports
// `bun:bundle`) into this file's dependency graph. Must stay in sync with
// `IMAGE_EXTENSION_REGEX` in `./imagePaste.ts`.
const IMAGE_EXTENSION_REGEX = /\.(png|jpe?g|gif|webp)$/i
/**
* Detect absolute file paths in pasted text (typically from drag-and-drop).
* Returns the cleaned paths if ALL segments are existing non-image files,
* or an empty array otherwise.
*
* Splitting logic mirrors usePasteHandler: space preceding `/` or a Windows
* drive letter, plus newline separators.
*/
export function extractDraggedFilePaths(text: string): string[] {
const segments = text
.split(/ (?=\/|[A-Za-z]:\\)/)
.flatMap(part => part.split('\n'))
.map(s => s.trim())
.filter(Boolean)
if (segments.length === 0) return []
const cleaned: string[] = []
for (const raw of segments) {
// Strip outer quotes and shell-escape backslashes
let p = raw
if (
(p.startsWith('"') && p.endsWith('"')) ||
(p.startsWith("'") && p.endsWith("'"))
) {
p = p.slice(1, -1)
}
if (process.platform !== 'win32') {
p = p.replace(/\\(.)/g, '$1')
}
// Image files are handled by the upstream image paste handler.
// Check against the cleaned path so quoted/escaped image paths like
// `"/foo/shot.png"` or `/foo/my\ shot.png` are reliably excluded.
if (IMAGE_EXTENSION_REGEX.test(p)) return []
if (!isAbsolute(p)) return []
// Verify the path actually exists on disk. Plain `fs.existsSync` is
// used intentionally here instead of the wrapped `getFsImplementation`
// to keep this module free of the heavy `fsOperations` dependency
// chain — this is a pure existence check with no permission semantics.
if (!existsSync(p)) return []
cleaned.push(p)
}
return cleaned
}

View File

@@ -10,8 +10,6 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
GITHUB_TOKEN: process.env.GITHUB_TOKEN, GITHUB_TOKEN: process.env.GITHUB_TOKEN,
GH_TOKEN: process.env.GH_TOKEN, GH_TOKEN: process.env.GH_TOKEN,
CLAUDE_CODE_GITHUB_TOKEN_HYDRATED:
process.env.CLAUDE_CODE_GITHUB_TOKEN_HYDRATED,
CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE, CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE,
} }
@@ -45,13 +43,11 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
) )
hydrateGithubModelsTokenFromSecureStorage() hydrateGithubModelsTokenFromSecureStorage()
expect(process.env.GITHUB_TOKEN).toBe('stored-secret') expect(process.env.GITHUB_TOKEN).toBe('stored-secret')
expect(process.env.CLAUDE_CODE_GITHUB_TOKEN_HYDRATED).toBe('1')
}) })
test('does not override existing GITHUB_TOKEN', async () => { test('does not override existing GITHUB_TOKEN', async () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.GITHUB_TOKEN = 'already' process.env.GITHUB_TOKEN = 'already'
delete process.env.CLAUDE_CODE_GITHUB_TOKEN_HYDRATED
mock.module('./secureStorage/index.js', () => ({ mock.module('./secureStorage/index.js', () => ({
getSecureStorage: () => ({ getSecureStorage: () => ({
@@ -66,6 +62,5 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
) )
hydrateGithubModelsTokenFromSecureStorage() hydrateGithubModelsTokenFromSecureStorage()
expect(process.env.GITHUB_TOKEN).toBe('already') expect(process.env.GITHUB_TOKEN).toBe('already')
expect(process.env.CLAUDE_CODE_GITHUB_TOKEN_HYDRATED).toBeUndefined()
}) })
}) })

View File

@@ -3,8 +3,6 @@ import { getSecureStorage } from './secureStorage/index.js'
/** JSON key in the shared OpenClaude secure storage blob. */ /** JSON key in the shared OpenClaude secure storage blob. */
export const GITHUB_MODELS_STORAGE_KEY = 'githubModels' as const export const GITHUB_MODELS_STORAGE_KEY = 'githubModels' as const
export const GITHUB_MODELS_HYDRATED_ENV_MARKER =
'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED' as const
export type GithubModelsCredentialBlob = { export type GithubModelsCredentialBlob = {
accessToken: string accessToken: string
@@ -29,28 +27,18 @@ export function readGithubModelsToken(): string | undefined {
*/ */
export function hydrateGithubModelsTokenFromSecureStorage(): void { export function hydrateGithubModelsTokenFromSecureStorage(): void {
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) { if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
return return
} }
if (process.env.GH_TOKEN?.trim()) { if (process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()) {
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
return
}
if (process.env.GITHUB_TOKEN?.trim()) {
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
return return
} }
if (isBareMode()) { if (isBareMode()) {
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
return return
} }
const t = readGithubModelsToken() const t = readGithubModelsToken()
if (t) { if (t) {
process.env.GITHUB_TOKEN = t process.env.GITHUB_TOKEN = t
process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER] = '1'
return
} }
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
} }
export function saveGithubModelsToken(token: string): { export function saveGithubModelsToken(token: string): {

View File

@@ -1,5 +1,4 @@
import { feature } from 'bun:bundle' import { feature } from 'bun:bundle'
import { getAPIProvider } from './model/providers.js'
import type { BetaUsage as Usage } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs' import type { BetaUsage as Usage } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
import type { import type {
ContentBlock, ContentBlock,
@@ -1766,7 +1765,6 @@ export function stripCallerFieldFromAssistantMessage(
id: block.id, id: block.id,
name: block.name, name: block.name,
input: block.input, input: block.input,
...(getAPIProvider() === 'gemini' && (block as any).extra_content ? { extra_content: (block as any).extra_content } : {})
} }
}), }),
}, },
@@ -2223,24 +2221,21 @@ export function normalizeMessagesForAPI(
// When tool search is enabled, preserve all fields including 'caller' // When tool search is enabled, preserve all fields including 'caller'
if (toolSearchEnabled) { if (toolSearchEnabled) {
const { extra_content, ...restBlock } = block as any
return { return {
...restBlock, ...block,
name: canonicalName, name: canonicalName,
input: normalizedInput, input: normalizedInput,
...(getAPIProvider() === 'gemini' && extra_content ? { extra_content } : {})
} }
} }
// When tool search is NOT enabled, explicitly construct tool_use // When tool search is NOT enabled, explicitly construct tool_use
// block with only standard API fields to avoid sending fields like // block with only standard API fields to avoid sending fields like
// 'caller' that may be stored in sessions from tool search runs // 'caller' that may be stored in sessions from tool search runs
return { return {
type: 'tool_use' as const, type: 'tool_use' as const,
id: block.id, id: block.id,
name: canonicalName, name: canonicalName,
input: normalizedInput, input: normalizedInput,
...(getAPIProvider() === 'gemini' && (block as any).extra_content ? { extra_content: (block as any).extra_content } : {})
} }
} }
return block return block

View File

@@ -80,9 +80,7 @@ export function getUserSpecifiedModelSetting(): ModelSetting | undefined {
const provider = getAPIProvider() const provider = getAPIProvider()
specifiedModel = specifiedModel =
(provider === 'gemini' ? process.env.GEMINI_MODEL : undefined) || (provider === 'gemini' ? process.env.GEMINI_MODEL : undefined) ||
(provider === 'openai' || provider === 'gemini' || provider === 'github' (provider === 'openai' || provider === 'gemini' ? process.env.OPENAI_MODEL : undefined) ||
? process.env.OPENAI_MODEL
: undefined) ||
(provider === 'firstParty' ? process.env.ANTHROPIC_MODEL : undefined) || (provider === 'firstParty' ? process.env.ANTHROPIC_MODEL : undefined) ||
settings.model || settings.model ||
undefined undefined
@@ -239,10 +237,6 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o' return process.env.OPENAI_MODEL || 'gpt-4o'
} }
// GitHub provider: always use the configured GitHub model
if (getAPIProvider() === 'github') {
return process.env.OPENAI_MODEL || 'github:copilot'
}
// Codex provider: always use the configured Codex model (default gpt-5.4) // Codex provider: always use the configured Codex model (default gpt-5.4)
if (getAPIProvider() === 'codex') { if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4' return process.env.OPENAI_MODEL || 'gpt-5.4'

View File

@@ -1,83 +0,0 @@
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
import { resetModelStringsForTestingOnly } from '../../bootstrap/state.js'
import { saveGlobalConfig } from '../config.js'
async function importFreshModelOptionsModule() {
mock.restore()
mock.module('./providers.js', () => ({
getAPIProvider: () => 'github',
}))
const nonce = `${Date.now()}-${Math.random()}`
return import(`./modelOptions.js?ts=${nonce}`)
}
const originalEnv = {
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
OPENAI_MODEL: process.env.OPENAI_MODEL,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
ANTHROPIC_CUSTOM_MODEL_OPTION: process.env.ANTHROPIC_CUSTOM_MODEL_OPTION,
}
beforeEach(() => {
mock.restore()
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY
delete process.env.OPENAI_MODEL
delete process.env.OPENAI_BASE_URL
delete process.env.ANTHROPIC_CUSTOM_MODEL_OPTION
resetModelStringsForTestingOnly()
})
afterEach(() => {
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.ANTHROPIC_CUSTOM_MODEL_OPTION =
originalEnv.ANTHROPIC_CUSTOM_MODEL_OPTION
saveGlobalConfig(current => ({
...current,
additionalModelOptionsCache: [],
additionalModelOptionsCacheScope: undefined,
openaiAdditionalModelOptionsCache: [],
openaiAdditionalModelOptionsCacheByProfile: {},
providerProfiles: [],
activeProviderProfileId: undefined,
}))
resetModelStringsForTestingOnly()
})
test('GitHub provider exposes only default + GitHub model in /model options', async () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1'
delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY
process.env.OPENAI_MODEL = 'github:copilot'
delete process.env.ANTHROPIC_CUSTOM_MODEL_OPTION
const { getModelOptions } = await importFreshModelOptionsModule()
const options = getModelOptions(false)
const nonDefault = options.filter(
(option: { value: unknown }) => option.value !== null,
)
expect(nonDefault.length).toBe(1)
expect(nonDefault[0]?.value).toBe('github:copilot')
})

View File

@@ -1,6 +1,5 @@
// biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered // biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered
import { getInitialMainLoopModel } from '../../bootstrap/state.js' import { getInitialMainLoopModel } from '../../bootstrap/state.js'
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js'
import { import {
isClaudeAISubscriber, isClaudeAISubscriber,
isMaxSubscriber, isMaxSubscriber,
@@ -45,25 +44,6 @@ export type ModelOption = {
descriptionForModel?: string descriptionForModel?: string
} }
function getScopedAdditionalModelOptions(): ModelOption[] {
const config = getGlobalConfig()
const activeScope = getAdditionalModelOptionsCacheScope()
if (!activeScope) {
return []
}
if (config.additionalModelOptionsCacheScope !== undefined) {
return config.additionalModelOptionsCacheScope === activeScope
? (config.additionalModelOptionsCache ?? [])
: []
}
return activeScope === 'firstParty'
? (config.additionalModelOptionsCache ?? [])
: []
}
export function getDefaultOptionForUser(fastMode = false): ModelOption { export function getDefaultOptionForUser(fastMode = false): ModelOption {
if (process.env.USER_TYPE === 'ant') { if (process.env.USER_TYPE === 'ant') {
const currentModel = renderDefaultModelSetting( const currentModel = renderDefaultModelSetting(
@@ -352,18 +332,6 @@ function getCodexModelOptions(): ModelOption[] {
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model. // @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list. // Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
function getModelOptionsBase(fastMode = false): ModelOption[] { function getModelOptionsBase(fastMode = false): ModelOption[] {
if (getAPIProvider() === 'github') {
const githubModel = process.env.OPENAI_MODEL?.trim() || 'github:copilot'
return [
getDefaultOptionForUser(fastMode),
{
value: githubModel,
label: githubModel,
description: 'GitHub Models default',
},
]
}
// When using Ollama, show models from the Ollama server instead of Claude models // When using Ollama, show models from the Ollama server instead of Claude models
if (getAPIProvider() === 'openai' && isOllamaProvider()) { if (getAPIProvider() === 'openai' && isOllamaProvider()) {
const defaultOption = getDefaultOptionForUser(fastMode) const defaultOption = getDefaultOptionForUser(fastMode)
@@ -440,16 +408,6 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
return standardOptions return standardOptions
} }
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
const activeOpenAIOptions = getActiveOpenAIModelOptionsCache()
return [
getDefaultOptionForUser(fastMode),
...(activeOpenAIOptions.length > 0
? activeOpenAIOptions
: getScopedAdditionalModelOptions()),
]
}
// PAYG 1P API: Default (Sonnet) + Sonnet 1M + Opus 4.6 + Opus 1M + Haiku // PAYG 1P API: Default (Sonnet) + Sonnet 1M + Opus 4.6 + Opus 1M + Haiku
if (getAPIProvider() === 'firstParty') { if (getAPIProvider() === 'firstParty') {
const payg1POptions = [getDefaultOptionForUser(fastMode)] const payg1POptions = [getDefaultOptionForUser(fastMode)]
@@ -591,10 +549,6 @@ function getKnownModelOption(model: string): ModelOption | null {
} }
export function getModelOptions(fastMode = false): ModelOption[] { export function getModelOptions(fastMode = false): ModelOption[] {
if (getAPIProvider() === 'github') {
return filterModelOptionsByAllowlist(getModelOptionsBase(fastMode))
}
const options = getModelOptionsBase(fastMode) const options = getModelOptionsBase(fastMode)
// Add the custom model from the ANTHROPIC_CUSTOM_MODEL_OPTION env var // Add the custom model from the ANTHROPIC_CUSTOM_MODEL_OPTION env var
@@ -612,8 +566,13 @@ export function getModelOptions(fastMode = false): ModelOption[] {
}) })
} }
// Append additional model options fetched during bootstrap const additionalOptions =
for (const opt of getScopedAdditionalModelOptions()) { getAPIProvider() === 'openai'
? getActiveOpenAIModelOptionsCache()
: getGlobalConfig().additionalModelOptionsCache ?? []
// Append additional model options fetched during bootstrap/endpoints.
for (const opt of additionalOptions) {
if (!options.some(existing => existing.value === opt.value)) { if (!options.some(existing => existing.value === opt.value)) {
options.push(opt) options.push(opt)
} }

View File

@@ -1,54 +0,0 @@
import { afterEach, expect, test } from 'bun:test'
import { resetModelStringsForTestingOnly } from '../../bootstrap/state.js'
import { parseUserSpecifiedModel } from './model.js'
import { getModelStrings } from './modelStrings.js'
const originalEnv = {
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
}
function clearProviderFlags(): void {
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY
}
afterEach(() => {
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
resetModelStringsForTestingOnly()
})
test('GitHub provider model strings are concrete IDs', () => {
clearProviderFlags()
process.env.CLAUDE_CODE_USE_GITHUB = '1'
const modelStrings = getModelStrings()
for (const value of Object.values(modelStrings)) {
expect(typeof value).toBe('string')
expect(value.trim().length).toBeGreaterThan(0)
}
})
test('GitHub provider model strings are safe to parse', () => {
clearProviderFlags()
process.env.CLAUDE_CODE_USE_GITHUB = '1'
const modelStrings = getModelStrings()
expect(() => parseUserSpecifiedModel(modelStrings.sonnet46 as any)).not.toThrow()
})

View File

@@ -25,7 +25,7 @@ const MODEL_KEYS = Object.keys(ALL_MODEL_CONFIGS) as ModelKey[]
function getBuiltinModelStrings(provider: APIProvider): ModelStrings { function getBuiltinModelStrings(provider: APIProvider): ModelStrings {
// Codex piggybacks on the OpenAI provider transport for Anthropic tier aliases. // Codex piggybacks on the OpenAI provider transport for Anthropic tier aliases.
// Reuse OpenAI mappings so model string lookups never return undefined. // Reuse OpenAI mappings so model string lookups never return undefined.
const providerKey = provider === 'codex' || provider === 'github' ? 'openai' : provider const providerKey = provider === 'codex' ? 'openai' : provider
const out = {} as ModelStrings const out = {} as ModelStrings
for (const key of MODEL_KEYS) { for (const key of MODEL_KEYS) {
out[key] = ALL_MODEL_CONFIGS[key][providerKey] out[key] = ALL_MODEL_CONFIGS[key][providerKey]

View File

@@ -23,13 +23,9 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'gpt-4.1-nano': 1_047_576, 'gpt-4.1-nano': 1_047_576,
'gpt-4-turbo': 128_000, 'gpt-4-turbo': 128_000,
'gpt-4': 8_192, 'gpt-4': 8_192,
'o1': 200_000,
'o1-mini': 128_000,
'o1-preview': 128_000,
'o1-pro': 200_000,
'o3': 200_000,
'o3-mini': 200_000, 'o3-mini': 200_000,
'o4-mini': 200_000, 'o4-mini': 200_000,
'o3': 200_000,
// DeepSeek (V3: 128k context per official docs) // DeepSeek (V3: 128k context per official docs)
'deepseek-chat': 128_000, 'deepseek-chat': 128_000,
@@ -67,9 +63,6 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'phi4:14b': 16_384, 'phi4:14b': 16_384,
'gemma2:27b': 8_192, 'gemma2:27b': 8_192,
'codellama:13b': 16_384, 'codellama:13b': 16_384,
'llama3.2:1b': 128_000,
'qwen3:8b': 128_000,
'codestral': 32_768,
} }
/** /**
@@ -89,13 +82,9 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'gpt-4.1-nano': 32_768, 'gpt-4.1-nano': 32_768,
'gpt-4-turbo': 4_096, 'gpt-4-turbo': 4_096,
'gpt-4': 4_096, 'gpt-4': 4_096,
'o1': 100_000,
'o1-mini': 65_536,
'o1-preview': 32_768,
'o1-pro': 100_000,
'o3': 100_000,
'o3-mini': 100_000, 'o3-mini': 100_000,
'o4-mini': 100_000, 'o4-mini': 100_000,
'o3': 100_000,
// DeepSeek // DeepSeek
'deepseek-chat': 8_192, 'deepseek-chat': 8_192,
@@ -131,9 +120,6 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'phi4:14b': 4_096, 'phi4:14b': 4_096,
'gemma2:27b': 4_096, 'gemma2:27b': 4_096,
'codellama:13b': 4_096, 'codellama:13b': 4_096,
'llama3.2:1b': 4_096,
'qwen3:8b': 8_192,
'codestral': 8_192,
} }
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined { function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {

View File

@@ -7,9 +7,6 @@ const originalEnv = {
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK, CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX, CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY, CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
OPENAI_MODEL: process.env.OPENAI_MODEL,
} }
afterEach(() => { afterEach(() => {
@@ -19,9 +16,6 @@ afterEach(() => {
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
}) })
async function importFreshProvidersModule() { async function importFreshProvidersModule() {
@@ -35,9 +29,6 @@ function clearProviderEnv(): void {
delete process.env.CLAUDE_CODE_USE_BEDROCK delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY delete process.env.CLAUDE_CODE_USE_FOUNDRY
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
delete process.env.OPENAI_MODEL
} }
test('first-party provider keeps Anthropic account setup flow enabled', () => { test('first-party provider keeps Anthropic account setup flow enabled', () => {
@@ -78,32 +69,3 @@ test('GEMINI takes precedence over GitHub when both are set', async () => {
expect(getAPIProvider()).toBe('gemini') expect(getAPIProvider()).toBe('gemini')
}) })
test('explicit local openai-compatible base URLs stay on the openai provider', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
process.env.OPENAI_MODEL = 'gpt-5.4'
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('openai')
})
test('codex aliases still resolve to the codex provider without a non-codex base URL', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'codexplan'
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('codex')
})
test('official OpenAI base URLs now keep provider detection on openai for aliases', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
process.env.OPENAI_MODEL = 'gpt-5.4'
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('openai')
})

View File

@@ -1,5 +1,5 @@
import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js' import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js'
import { shouldUseCodexTransport } from '../../services/api/providerConfig.js' import { isCodexAlias } from '../../services/api/providerConfig.js'
import { isEnvTruthy } from '../envUtils.js' import { isEnvTruthy } from '../envUtils.js'
export type APIProvider = export type APIProvider =
@@ -34,10 +34,11 @@ export function usesAnthropicAccountFlow(): boolean {
return getAPIProvider() === 'firstParty' return getAPIProvider() === 'firstParty'
} }
function isCodexModel(): boolean { function isCodexModel(): boolean {
return shouldUseCodexTransport( const model = (process.env.OPENAI_MODEL || '').trim()
process.env.OPENAI_MODEL || '', if (!model) return false
process.env.OPENAI_BASE_URL ?? process.env.OPENAI_API_BASE, // Delegate to the canonical alias table in providerConfig to keep
) // the two Codex detection systems (provider type + transport) in sync.
return isCodexAlias(model)
} }
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS { export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {

View File

@@ -1,71 +0,0 @@
import { describe, expect, test } from 'bun:test'
import type { LoadedPlugin } from '../../types/plugin.js'
import { mergePluginSources } from './pluginLoader.js'
function marketplacePlugin(
name: string,
marketplace: string,
enabled: boolean,
): LoadedPlugin {
const pluginId = `${name}@${marketplace}`
return {
name,
manifest: { name } as LoadedPlugin['manifest'],
path: `/tmp/${pluginId}`,
source: pluginId,
repository: pluginId,
enabled,
}
}
describe('mergePluginSources', () => {
test('keeps the enabled copy when duplicate marketplace plugins disagree on enabled state', () => {
const enabledOfficial = marketplacePlugin(
'frontend-design',
'claude-plugins-official',
true,
)
const disabledLegacy = marketplacePlugin(
'frontend-design',
'claude-code-plugins',
false,
)
const result = mergePluginSources({
session: [],
marketplace: [disabledLegacy, enabledOfficial],
builtin: [],
})
expect(result.plugins).toEqual([enabledOfficial])
expect(result.errors).toEqual([])
})
test('keeps the later copy when duplicate marketplace plugins are both enabled', () => {
const legacy = marketplacePlugin(
'frontend-design',
'claude-code-plugins',
true,
)
const official = marketplacePlugin(
'frontend-design',
'claude-plugins-official',
true,
)
const result = mergePluginSources({
session: [],
marketplace: [legacy, official],
builtin: [],
})
expect(result.plugins).toEqual([official])
expect(result.errors).toHaveLength(1)
expect(result.errors[0]).toMatchObject({
type: 'generic-error',
source: legacy.source,
plugin: legacy.name,
})
})
})

View File

@@ -3045,63 +3045,24 @@ export function mergePluginSources(sources: {
}) })
const sessionNames = new Set(sessionPlugins.map(p => p.name)) const sessionNames = new Set(sessionPlugins.map(p => p.name))
// Different marketplaces can enable the same short plugin name, but const marketplacePlugins = sources.marketplace.filter(p => {
// downstream command/skill loading scopes by plugin.name. if (sessionNames.has(p.name)) {
const marketplacePluginsByName = new Map<string, LoadedPlugin>()
for (const plugin of sources.marketplace) {
if (sessionNames.has(plugin.name)) {
logForDebugging( logForDebugging(
`Plugin "${plugin.name}" from --plugin-dir overrides installed version`, `Plugin "${p.name}" from --plugin-dir overrides installed version`,
) )
continue return false
} }
const existing = marketplacePluginsByName.get(plugin.name) return true
if (!existing) { })
marketplacePluginsByName.set(plugin.name, plugin)
continue
}
const winner = selectMarketplacePlugin(existing, plugin)
const dropped = winner === existing ? plugin : existing
marketplacePluginsByName.set(plugin.name, winner)
logForDebugging(
`Ignoring duplicate marketplace plugin "${plugin.name}" from ${dropped.source}; using ${winner.source}`,
{ level: 'warn' },
)
if (existing.enabled && plugin.enabled) {
errors.push({
type: 'generic-error',
source: dropped.source,
plugin: plugin.name,
error: `Duplicate marketplace plugin "${plugin.name}" ignored: using "${winner.source}" and skipping "${dropped.source}" to avoid short-name collisions`,
})
}
}
// Session first, then non-overridden marketplace, then builtin. // Session first, then non-overridden marketplace, then builtin.
// Downstream first-match consumers see session plugins before // Downstream first-match consumers see session plugins before
// installed ones for any that slipped past the name filter. // installed ones for any that slipped past the name filter.
return { return {
plugins: [ plugins: [...sessionPlugins, ...marketplacePlugins, ...sources.builtin],
...sessionPlugins,
...marketplacePluginsByName.values(),
...sources.builtin,
],
errors, errors,
} }
} }
function selectMarketplacePlugin(
current: LoadedPlugin,
candidate: LoadedPlugin,
): LoadedPlugin {
if (current.enabled !== candidate.enabled) {
return candidate.enabled ? candidate : current
}
return candidate
}
/** /**
* Main plugin loading function that discovers and loads all plugins. * Main plugin loading function that discovers and loads all plugins.
* *

View File

@@ -1,78 +0,0 @@
import { afterEach, expect, mock, test } from 'bun:test'
import {
getLocalOpenAICompatibleProviderLabel,
listOpenAICompatibleModels,
} from './providerDiscovery.js'
const originalFetch = globalThis.fetch
const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
}
afterEach(() => {
globalThis.fetch = originalFetch
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
})
test('lists models from a local openai-compatible /models endpoint', async () => {
globalThis.fetch = mock((input, init) => {
const url = typeof input === 'string' ? input : input.url
expect(url).toBe('http://localhost:1234/v1/models')
expect(init?.headers).toEqual({ Authorization: 'Bearer local-key' })
return Promise.resolve(
new Response(
JSON.stringify({
data: [
{ id: 'qwen2.5-coder-7b-instruct' },
{ id: 'llama-3.2-3b-instruct' },
{ id: 'qwen2.5-coder-7b-instruct' },
],
}),
{ status: 200 },
),
)
}) as typeof globalThis.fetch
await expect(
listOpenAICompatibleModels({
baseUrl: 'http://localhost:1234/v1',
apiKey: 'local-key',
}),
).resolves.toEqual([
'qwen2.5-coder-7b-instruct',
'llama-3.2-3b-instruct',
])
})
test('returns null when a local openai-compatible /models request fails', async () => {
globalThis.fetch = mock(() =>
Promise.resolve(new Response('not available', { status: 503 })),
) as typeof globalThis.fetch
await expect(
listOpenAICompatibleModels({ baseUrl: 'http://localhost:1234/v1' }),
).resolves.toBeNull()
})
test('detects LM Studio from the default localhost port', () => {
expect(getLocalOpenAICompatibleProviderLabel('http://localhost:1234/v1')).toBe(
'LM Studio',
)
})
test('detects common local openai-compatible providers by hostname', () => {
expect(
getLocalOpenAICompatibleProviderLabel('http://localai.local:8080/v1'),
).toBe('LocalAI')
expect(
getLocalOpenAICompatibleProviderLabel('http://vllm.local:8000/v1'),
).toBe('vLLM')
})
test('falls back to a generic local openai-compatible label', () => {
expect(
getLocalOpenAICompatibleProviderLabel('http://127.0.0.1:8080/v1'),
).toBe('Local OpenAI-compatible')
})

View File

@@ -1,5 +1,4 @@
import type { OllamaModelDescriptor } from './providerRecommendation.ts' import type { OllamaModelDescriptor } from './providerRecommendation.ts'
import { DEFAULT_OPENAI_BASE_URL } from '../services/api/providerConfig.js'
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434' export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337' export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
@@ -54,64 +53,6 @@ export function getAtomicChatChatBaseUrl(baseUrl?: string): string {
return `${getAtomicChatApiBaseUrl(baseUrl)}/v1` return `${getAtomicChatApiBaseUrl(baseUrl)}/v1`
} }
export function getOpenAICompatibleModelsBaseUrl(baseUrl?: string): string {
return (
baseUrl || process.env.OPENAI_BASE_URL || DEFAULT_OPENAI_BASE_URL
).replace(/\/+$/, '')
}
export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string {
try {
const parsed = new URL(getOpenAICompatibleModelsBaseUrl(baseUrl))
const host = parsed.host.toLowerCase()
const hostname = parsed.hostname.toLowerCase()
const path = parsed.pathname.toLowerCase()
const haystack = `${hostname} ${path}`
if (
host.endsWith(':1234') ||
haystack.includes('lmstudio') ||
haystack.includes('lm-studio')
) {
return 'LM Studio'
}
if (host.endsWith(':11434') || haystack.includes('ollama')) {
return 'Ollama'
}
if (haystack.includes('localai')) {
return 'LocalAI'
}
if (haystack.includes('jan')) {
return 'Jan'
}
if (haystack.includes('kobold')) {
return 'KoboldCpp'
}
if (haystack.includes('llama.cpp') || haystack.includes('llamacpp')) {
return 'llama.cpp'
}
if (haystack.includes('vllm')) {
return 'vLLM'
}
if (
haystack.includes('open-webui') ||
haystack.includes('openwebui')
) {
return 'Open WebUI'
}
if (
haystack.includes('text-generation-webui') ||
haystack.includes('oobabooga')
) {
return 'text-generation-webui'
}
} catch {
// Fall back to the generic label when the base URL is malformed.
}
return 'Local OpenAI-compatible'
}
export async function hasLocalOllama(baseUrl?: string): Promise<boolean> { export async function hasLocalOllama(baseUrl?: string): Promise<boolean> {
const { signal, clear } = withTimeoutSignal(1200) const { signal, clear } = withTimeoutSignal(1200)
try { try {
@@ -170,46 +111,6 @@ export async function listOllamaModels(
} }
} }
export async function listOpenAICompatibleModels(options?: {
baseUrl?: string
apiKey?: string
}): Promise<string[] | null> {
const { signal, clear } = withTimeoutSignal(5000)
try {
const response = await fetch(
`${getOpenAICompatibleModelsBaseUrl(options?.baseUrl)}/models`,
{
method: 'GET',
headers: options?.apiKey
? {
Authorization: `Bearer ${options.apiKey}`,
}
: undefined,
signal,
},
)
if (!response.ok) {
return null
}
const data = (await response.json()) as {
data?: Array<{ id?: string }>
}
return Array.from(
new Set(
(data.data ?? [])
.filter(model => Boolean(model.id))
.map(model => model.id!),
),
)
} catch {
return null
} finally {
clear()
}
}
export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> { export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> {
const { signal, clear } = withTimeoutSignal(1200) const { signal, clear } = withTimeoutSignal(1200)
try { try {

View File

@@ -1,4 +1,4 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test' import { describe, expect, test, afterEach } from 'bun:test'
import { import {
parseProviderFlag, parseProviderFlag,
applyProviderFlag, applyProviderFlag,
@@ -8,26 +8,18 @@ import {
const originalEnv = { ...process.env } const originalEnv = { ...process.env }
const RESET_KEYS = [
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'OPENAI_BASE_URL',
'OPENAI_API_KEY',
'OPENAI_MODEL',
'GEMINI_MODEL',
] as const
beforeEach(() => {
for (const key of RESET_KEYS) {
delete process.env[key]
}
})
afterEach(() => { afterEach(() => {
for (const key of RESET_KEYS) { for (const key of [
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'OPENAI_BASE_URL',
'OPENAI_API_KEY',
'OPENAI_MODEL',
'GEMINI_MODEL',
]) {
if (originalEnv[key] === undefined) delete process.env[key] if (originalEnv[key] === undefined) delete process.env[key]
else process.env[key] = originalEnv[key] else process.env[key] = originalEnv[key]
} }

View File

@@ -485,26 +485,6 @@ test('buildStartupEnvFromProfile leaves explicit provider selections untouched',
assert.equal(env.OPENAI_API_KEY, undefined) assert.equal(env.OPENAI_API_KEY, undefined)
}) })
test('buildStartupEnvFromProfile leaves profile-managed env untouched', async () => {
const processEnv = {
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',
ANTHROPIC_BASE_URL: 'https://api.anthropic.com',
ANTHROPIC_MODEL: 'claude-sonnet-4-6',
}
const env = await buildStartupEnvFromProfile({
persisted: profile('openai', {
OPENAI_API_KEY: 'sk-persisted',
OPENAI_MODEL: 'gpt-4o',
}),
processEnv,
})
assert.equal(env, processEnv)
assert.equal(env.ANTHROPIC_MODEL, 'claude-sonnet-4-6')
assert.equal(env.OPENAI_MODEL, undefined)
})
test('buildStartupEnvFromProfile treats explicit falsey provider flags as user intent', async () => { test('buildStartupEnvFromProfile treats explicit falsey provider flags as user intent', async () => {
const processEnv = { const processEnv = {
CLAUDE_CODE_USE_OPENAI: '0', CLAUDE_CODE_USE_OPENAI: '0',

View File

@@ -407,11 +407,6 @@ export function deleteProfileFile(options?: ProfileFileLocation): string {
export function hasExplicitProviderSelection( export function hasExplicitProviderSelection(
processEnv: NodeJS.ProcessEnv = process.env, processEnv: NodeJS.ProcessEnv = process.env,
): boolean { ): boolean {
// If env was already applied from a provider profile, preserve it.
if (processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED === '1') {
return true
}
return ( return (
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined || processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined || processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||

View File

@@ -2,15 +2,10 @@ import { afterEach, describe, expect, mock, test } from 'bun:test'
import type { ProviderProfile } from './config.js' import type { ProviderProfile } from './config.js'
async function importFreshProvidersModule() {
return import(`./model/providers.ts?ts=${Date.now()}-${Math.random()}`)
}
const originalEnv = { ...process.env } const originalEnv = { ...process.env }
const RESTORED_KEYS = [ const RESTORED_KEYS = [
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED', 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED',
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID',
'CLAUDE_CODE_USE_OPENAI', 'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI', 'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB', 'CLAUDE_CODE_USE_GITHUB',
@@ -26,35 +21,8 @@ const RESTORED_KEYS = [
'ANTHROPIC_API_KEY', 'ANTHROPIC_API_KEY',
] as const ] as const
type MockConfigState = {
providerProfiles: ProviderProfile[]
activeProviderProfileId?: string
openaiAdditionalModelOptionsCache: unknown[]
openaiAdditionalModelOptionsCacheByProfile: Record<string, unknown[]>
additionalModelOptionsCache?: unknown[]
additionalModelOptionsCacheScope?: string
}
function createMockConfigState(): MockConfigState {
return {
providerProfiles: [],
activeProviderProfileId: undefined,
openaiAdditionalModelOptionsCache: [],
openaiAdditionalModelOptionsCacheByProfile: {},
additionalModelOptionsCache: [],
additionalModelOptionsCacheScope: undefined,
}
}
let mockConfigState: MockConfigState = createMockConfigState()
function saveMockGlobalConfig(
updater: (current: MockConfigState) => MockConfigState,
): void {
mockConfigState = updater(mockConfigState)
}
afterEach(() => { afterEach(() => {
mock.restore()
for (const key of RESTORED_KEYS) { for (const key of RESTORED_KEYS) {
if (originalEnv[key] === undefined) { if (originalEnv[key] === undefined) {
delete process.env[key] delete process.env[key]
@@ -62,31 +30,8 @@ afterEach(() => {
process.env[key] = originalEnv[key] process.env[key] = originalEnv[key]
} }
} }
mock.restore()
mockConfigState = createMockConfigState()
}) })
async function importFreshProviderProfileModules() {
mock.restore()
mock.module('./config.js', () => ({
getGlobalConfig: () => mockConfigState,
saveGlobalConfig: (
updater: (current: MockConfigState) => MockConfigState,
) => {
mockConfigState = updater(mockConfigState)
},
}))
const nonce = `${Date.now()}-${Math.random()}`
const providers = await import(`./model/providers.js?ts=${nonce}`)
const providerProfiles = await import(`./providerProfiles.js?ts=${nonce}`)
return {
...providers,
...providerProfiles,
}
}
function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile { function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
return { return {
id: 'provider_test', id: 'provider_test',
@@ -98,31 +43,57 @@ function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile
} }
} }
async function importFreshProviderModules() {
mock.restore()
let configState = {
providerProfiles: [] as ProviderProfile[],
activeProviderProfileId: undefined as string | undefined,
openaiAdditionalModelOptionsCache: [] as any[],
openaiAdditionalModelOptionsCacheByProfile: {} as Record<string, any[]>,
}
mock.module('./config.js', () => ({
getGlobalConfig: () => configState,
saveGlobalConfig: (
updater: (current: typeof configState) => typeof configState,
) => {
configState = updater(configState)
},
}))
const providerProfiles = await import(
`./providerProfiles.js?ts=${Date.now()}-${Math.random()}`
)
const providers = await import(
`./model/providers.js?ts=${Date.now()}-${Math.random()}`
)
return {
...providerProfiles,
...providers,
}
}
describe('applyProviderProfileToProcessEnv', () => { describe('applyProviderProfileToProcessEnv', () => {
test('openai profile clears competing gemini/github flags', async () => { test('openai profile clears competing gemini/github flags', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.CLAUDE_CODE_USE_GITHUB = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1'
const { applyProviderProfileToProcessEnv, getAPIProvider } =
await importFreshProviderModules()
applyProviderProfileToProcessEnv(buildProfile()) applyProviderProfileToProcessEnv(buildProfile())
const { getAPIProvider: getFreshAPIProvider } =
await importFreshProvidersModule()
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined() expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined() expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1') expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe( expect(getAPIProvider()).toBe('openai')
'provider_test',
)
expect(getFreshAPIProvider()).toBe('openai')
}) })
test('anthropic profile clears competing gemini/github flags', async () => { test('anthropic profile clears competing gemini/github flags', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.CLAUDE_CODE_USE_GITHUB = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1'
const { applyProviderProfileToProcessEnv, getAPIProvider } =
await importFreshProviderModules()
applyProviderProfileToProcessEnv( applyProviderProfileToProcessEnv(
buildProfile({ buildProfile({
@@ -131,23 +102,21 @@ describe('applyProviderProfileToProcessEnv', () => {
model: 'claude-sonnet-4-6', model: 'claude-sonnet-4-6',
}), }),
) )
const { getAPIProvider: getFreshAPIProvider } =
await importFreshProvidersModule()
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined() expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined() expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined() expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
expect(getFreshAPIProvider()).toBe('firstParty') expect(getAPIProvider()).toBe('firstParty')
}) })
}) })
describe('applyActiveProviderProfileFromConfig', () => { describe('applyActiveProviderProfileFromConfig', () => {
test('does not override explicit startup provider selection', async () => { test('does not override explicit startup provider selection', async () => {
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderProfileModules()
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b' process.env.OPENAI_MODEL = 'qwen2.5:3b'
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderModules()
const applied = applyActiveProviderProfileFromConfig({ const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [ providerProfiles: [
@@ -166,12 +135,12 @@ describe('applyActiveProviderProfileFromConfig', () => {
}) })
test('does not override explicit startup selection when profile marker is stale', async () => { test('does not override explicit startup selection when profile marker is stale', async () => {
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderProfileModules()
process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = '1' process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b' process.env.OPENAI_MODEL = 'qwen2.5:3b'
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderModules()
const applied = applyActiveProviderProfileFromConfig({ const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [ providerProfiles: [
@@ -185,74 +154,12 @@ describe('applyActiveProviderProfileFromConfig', () => {
} as any) } as any)
expect(applied).toBeUndefined() expect(applied).toBeUndefined()
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1') expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1') expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
}) })
test('re-applies active profile when profile-managed env drifts', async () => {
const { applyActiveProviderProfileFromConfig, applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(
buildProfile({
id: 'saved_openai',
baseUrl: 'http://192.168.33.108:11434/v1',
model: 'kimi-k2.5:cloud',
}),
)
// Simulate settings/env merge clobbering the model while profile flags remain.
process.env.OPENAI_MODEL = 'github:copilot'
const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [
buildProfile({
id: 'saved_openai',
baseUrl: 'http://192.168.33.108:11434/v1',
model: 'kimi-k2.5:cloud',
}),
],
activeProviderProfileId: 'saved_openai',
} as any)
expect(applied?.id).toBe('saved_openai')
expect(process.env.OPENAI_MODEL).toBe('kimi-k2.5:cloud')
expect(process.env.OPENAI_BASE_URL).toBe('http://192.168.33.108:11434/v1')
})
test('does not re-apply active profile when flags conflict with current provider', async () => {
const { applyActiveProviderProfileFromConfig, applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(
buildProfile({
id: 'saved_openai',
baseUrl: 'http://192.168.33.108:11434/v1',
model: 'kimi-k2.5:cloud',
}),
)
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'github:copilot'
const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [
buildProfile({
id: 'saved_openai',
baseUrl: 'http://192.168.33.108:11434/v1',
model: 'kimi-k2.5:cloud',
}),
],
activeProviderProfileId: 'saved_openai',
} as any)
expect(applied).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBe('1')
expect(process.env.OPENAI_MODEL).toBe('github:copilot')
})
test('applies active profile when no explicit provider is selected', async () => { test('applies active profile when no explicit provider is selected', async () => {
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderProfileModules()
delete process.env.CLAUDE_CODE_USE_OPENAI delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_GITHUB
@@ -262,6 +169,8 @@ describe('applyActiveProviderProfileFromConfig', () => {
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b' process.env.OPENAI_MODEL = 'qwen2.5:3b'
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderModules()
const applied = applyActiveProviderProfileFromConfig({ const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [ providerProfiles: [
@@ -275,82 +184,16 @@ describe('applyActiveProviderProfileFromConfig', () => {
} as any) } as any)
expect(applied?.id).toBe('saved_openai') expect(applied?.id).toBe('saved_openai')
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1') expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1') expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
expect(process.env.OPENAI_MODEL).toBe('gpt-4o') expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
}) })
}) })
describe('persistActiveProviderProfileModel', () => {
test('updates active profile model and current env for profile-managed sessions', async () => {
const {
applyProviderProfileToProcessEnv,
getProviderProfiles,
persistActiveProviderProfileModel,
} = await importFreshProviderProfileModules()
const activeProfile = buildProfile({
id: 'saved_openai',
baseUrl: 'http://192.168.33.108:11434/v1',
model: 'kimi-k2.5:cloud',
})
saveMockGlobalConfig(current => ({
...current,
providerProfiles: [activeProfile],
activeProviderProfileId: activeProfile.id,
}))
applyProviderProfileToProcessEnv(activeProfile)
const updated = persistActiveProviderProfileModel('minimax-m2.5:cloud')
expect(updated?.id).toBe(activeProfile.id)
expect(updated?.model).toBe('minimax-m2.5:cloud')
expect(process.env.OPENAI_MODEL).toBe('minimax-m2.5:cloud')
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe(
activeProfile.id,
)
const saved = getProviderProfiles().find(
(profile: ProviderProfile) => profile.id === activeProfile.id,
)
expect(saved?.model).toBe('minimax-m2.5:cloud')
})
test('does not mutate process env when session is not profile-managed', async () => {
const {
getProviderProfiles,
persistActiveProviderProfileModel,
} = await importFreshProviderProfileModules()
const activeProfile = buildProfile({
id: 'saved_openai',
model: 'kimi-k2.5:cloud',
})
saveMockGlobalConfig(current => ({
...current,
providerProfiles: [activeProfile],
activeProviderProfileId: activeProfile.id,
}))
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'cli-model'
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
persistActiveProviderProfileModel('minimax-m2.5:cloud')
expect(process.env.OPENAI_MODEL).toBe('cli-model')
const saved = getProviderProfiles().find(
(profile: ProviderProfile) => profile.id === activeProfile.id,
)
expect(saved?.model).toBe('minimax-m2.5:cloud')
})
})
describe('getProviderPresetDefaults', () => { describe('getProviderPresetDefaults', () => {
test('ollama preset defaults to a local Ollama model', async () => { test('ollama preset defaults to a local Ollama model', async () => {
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
delete process.env.OPENAI_MODEL delete process.env.OPENAI_MODEL
const { getProviderPresetDefaults } = await importFreshProviderModules()
const defaults = getProviderPresetDefaults('ollama') const defaults = getProviderPresetDefaults('ollama')
@@ -362,25 +205,21 @@ describe('getProviderPresetDefaults', () => {
describe('deleteProviderProfile', () => { describe('deleteProviderProfile', () => {
test('deleting final profile clears provider env when active profile applied it', async () => { test('deleting final profile clears provider env when active profile applied it', async () => {
const { const {
applyProviderProfileToProcessEnv, addProviderProfile,
deleteProviderProfile, deleteProviderProfile,
} = await importFreshProviderProfileModules() } =
applyProviderProfileToProcessEnv( await importFreshProviderModules()
buildProfile({ const profile = addProviderProfile({
id: 'only_profile', name: 'Only Profile',
baseUrl: 'https://api.openai.com/v1', provider: 'openai',
model: 'gpt-4o', baseUrl: 'https://api.openai.com/v1',
apiKey: 'sk-test', model: 'gpt-4o',
}), apiKey: 'sk-test',
) })
saveMockGlobalConfig(current => ({ expect(profile).not.toBeNull()
...current,
providerProfiles: [buildProfile({ id: 'only_profile' })],
activeProviderProfileId: 'only_profile',
}))
const result = deleteProviderProfile('only_profile') const result = deleteProviderProfile(profile!.id)
expect(result.removed).toBe(true) expect(result.removed).toBe(true)
expect(result.activeProfileId).toBeUndefined() expect(result.activeProfileId).toBeUndefined()
@@ -405,24 +244,30 @@ describe('deleteProviderProfile', () => {
}) })
test('deleting final profile preserves explicit startup provider env', async () => { test('deleting final profile preserves explicit startup provider env', async () => {
const { deleteProviderProfile } = await importFreshProviderProfileModules() const { addProviderProfile, deleteProviderProfile } =
await importFreshProviderModules()
const profile = addProviderProfile({
name: 'Only Profile',
provider: 'openai',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-4o',
})
expect(profile).not.toBeNull()
process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = undefined
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b' process.env.OPENAI_MODEL = 'qwen2.5:3b'
saveMockGlobalConfig(current => ({ const result = deleteProviderProfile(profile!.id)
...current,
providerProfiles: [buildProfile({ id: 'only_profile' })],
activeProviderProfileId: 'only_profile',
}))
const result = deleteProviderProfile('only_profile')
expect(result.removed).toBe(true) expect(result.removed).toBe(true)
expect(result.activeProfileId).toBeUndefined() expect(result.activeProfileId).toBeUndefined()
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined() expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1') expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1') expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
}) })

View File

@@ -37,7 +37,6 @@ export type ProviderPresetDefaults = Omit<ProviderProfileInput, 'provider'> & {
const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1' const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1'
const DEFAULT_OLLAMA_MODEL = 'llama3.1:8b' const DEFAULT_OLLAMA_MODEL = 'llama3.1:8b'
const PROFILE_ENV_APPLIED_FLAG = 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED' const PROFILE_ENV_APPLIED_FLAG = 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED'
const PROFILE_ENV_APPLIED_ID = 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID'
function trimValue(value: string | undefined): string { function trimValue(value: string | undefined): string {
return value?.trim() ?? '' return value?.trim() ?? ''
@@ -265,23 +264,6 @@ function hasProviderSelectionFlags(
) )
} }
function hasConflictingProviderFlagsForProfile(
processEnv: NodeJS.ProcessEnv,
profile: ProviderProfile,
): boolean {
if (profile.provider === 'anthropic') {
return hasProviderSelectionFlags(processEnv)
}
return (
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined ||
processEnv.CLAUDE_CODE_USE_VERTEX !== undefined ||
processEnv.CLAUDE_CODE_USE_FOUNDRY !== undefined
)
}
function sameOptionalEnvValue( function sameOptionalEnvValue(
left: string | undefined, left: string | undefined,
right: string | undefined, right: string | undefined,
@@ -302,10 +284,6 @@ function isProcessEnvAlignedWithProfile(
return false return false
} }
if (trimOrUndefined(processEnv[PROFILE_ENV_APPLIED_ID]) !== profile.id) {
return false
}
if (profile.provider === 'anthropic') { if (profile.provider === 'anthropic') {
return ( return (
!hasProviderSelectionFlags(processEnv) && !hasProviderSelectionFlags(processEnv) &&
@@ -361,13 +339,11 @@ export function clearProviderProfileEnvFromProcessEnv(
delete processEnv.ANTHROPIC_MODEL delete processEnv.ANTHROPIC_MODEL
delete processEnv.ANTHROPIC_API_KEY delete processEnv.ANTHROPIC_API_KEY
delete processEnv[PROFILE_ENV_APPLIED_FLAG] delete processEnv[PROFILE_ENV_APPLIED_FLAG]
delete processEnv[PROFILE_ENV_APPLIED_ID]
} }
export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void { export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void {
clearProviderProfileEnvFromProcessEnv() clearProviderProfileEnvFromProcessEnv()
process.env[PROFILE_ENV_APPLIED_FLAG] = '1' process.env[PROFILE_ENV_APPLIED_FLAG] = '1'
process.env[PROFILE_ENV_APPLIED_ID] = profile.id
process.env.ANTHROPIC_MODEL = profile.model process.env.ANTHROPIC_MODEL = profile.model
if (profile.provider === 'anthropic') { if (profile.provider === 'anthropic') {
@@ -410,24 +386,12 @@ export function applyActiveProviderProfileFromConfig(
return undefined return undefined
} }
const isCurrentEnvProfileManaged =
processEnv[PROFILE_ENV_APPLIED_FLAG] === '1' &&
trimOrUndefined(processEnv[PROFILE_ENV_APPLIED_ID]) === activeProfile.id
if (!options?.force && hasProviderSelectionFlags(processEnv)) { if (!options?.force && hasProviderSelectionFlags(processEnv)) {
// Respect explicit startup provider intent. Auto-heal only when this // Respect explicit startup provider intent. Re-apply only when the
// exact active profile previously applied the current env. // current process env is already profile-managed and aligned.
if (!isCurrentEnvProfileManaged) { if (!isProcessEnvAlignedWithProfile(processEnv, activeProfile)) {
return undefined return undefined
} }
if (hasConflictingProviderFlagsForProfile(processEnv, activeProfile)) {
return undefined
}
if (isProcessEnvAlignedWithProfile(processEnv, activeProfile)) {
return activeProfile
}
} }
applyProviderProfileToProcessEnv(activeProfile) applyProviderProfileToProcessEnv(activeProfile)
@@ -532,61 +496,6 @@ export function updateProviderProfile(
return updatedProfile return updatedProfile
} }
export function persistActiveProviderProfileModel(
model: string,
): ProviderProfile | null {
const nextModel = trimOrUndefined(model)
if (!nextModel) {
return null
}
const activeProfile = getActiveProviderProfile()
if (!activeProfile) {
return null
}
saveGlobalConfig(current => {
const currentProfiles = getProviderProfiles(current)
const profileIndex = currentProfiles.findIndex(
profile => profile.id === activeProfile.id,
)
if (profileIndex < 0) {
return current
}
const currentProfile = currentProfiles[profileIndex]
if (currentProfile.model === nextModel) {
return current
}
const nextProfiles = [...currentProfiles]
nextProfiles[profileIndex] = {
...currentProfile,
model: nextModel,
}
return {
...current,
providerProfiles: nextProfiles,
}
})
const resolvedProfile = getActiveProviderProfile()
if (!resolvedProfile || resolvedProfile.id !== activeProfile.id) {
return null
}
if (
process.env[PROFILE_ENV_APPLIED_FLAG] === '1' &&
trimOrUndefined(process.env[PROFILE_ENV_APPLIED_ID]) === resolvedProfile.id
) {
applyProviderProfileToProcessEnv(resolvedProfile)
}
return resolvedProfile
}
export function setActiveProviderProfile( export function setActiveProviderProfile(
profileId: string, profileId: string,
): ProviderProfile | null { ): ProviderProfile | null {