Compare commits
17 Commits
fix/386-wi
...
fix/repl-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fde8959375 | ||
|
|
6c61790063 | ||
|
|
f1af292b82 | ||
|
|
26eef92fe7 | ||
|
|
112df59117 | ||
|
|
8724d59d48 | ||
|
|
af08b4f762 | ||
|
|
5012c160c9 | ||
|
|
c1934974aa | ||
|
|
94de37d44f | ||
|
|
3b3aca716d | ||
|
|
d5852ca73d | ||
|
|
c534aa5771 | ||
|
|
60d3d8961a | ||
|
|
3b9893b586 | ||
|
|
daf2c90b6d | ||
|
|
4ac7367733 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,4 +6,7 @@ dist/
|
||||
!.env.example
|
||||
.openclaude-profile.json
|
||||
reports/
|
||||
GEMINI.md
|
||||
package-lock.json
|
||||
/.claude
|
||||
coverage/
|
||||
|
||||
35
README.md
35
README.md
@@ -185,6 +185,41 @@ With Firecrawl enabled:
|
||||
|
||||
Free tier at [firecrawl.dev](https://firecrawl.dev) includes 500 credits. The key is optional.
|
||||
|
||||
---
|
||||
|
||||
## Headless gRPC Server
|
||||
|
||||
OpenClaude can be run as a headless gRPC service, allowing you to integrate its agentic capabilities (tools, bash, file editing) into other applications, CI/CD pipelines, or custom user interfaces. The server uses bidirectional streaming to send real-time text chunks, tool calls, and request permissions for sensitive commands.
|
||||
|
||||
### 1. Start the gRPC Server
|
||||
|
||||
Start the core engine as a gRPC service on `localhost:50051`:
|
||||
|
||||
```bash
|
||||
npm run dev:grpc
|
||||
```
|
||||
|
||||
#### Configuration
|
||||
|
||||
| Variable | Default | Description |
|
||||
|-----------|-------------|------------------------------------------------|
|
||||
| `GRPC_PORT` | `50051` | Port the gRPC server listens on |
|
||||
| `GRPC_HOST` | `localhost` | Bind address. Use `0.0.0.0` to expose on all interfaces (not recommended without authentication) |
|
||||
|
||||
### 2. Run the Test CLI Client
|
||||
|
||||
We provide a lightweight CLI client that communicates exclusively over gRPC. It acts just like the main interactive CLI, rendering colors, streaming tokens, and prompting you for tool permissions (y/n) via the gRPC `action_required` event.
|
||||
|
||||
In a separate terminal, run:
|
||||
|
||||
```bash
|
||||
npm run dev:grpc:cli
|
||||
```
|
||||
|
||||
*Note: The gRPC definitions are located in `src/proto/openclaude.proto`. You can use this file to generate clients in Python, Go, Rust, or any other language.*
|
||||
|
||||
---
|
||||
|
||||
## Source Build And Local Development
|
||||
|
||||
```bash
|
||||
|
||||
128
bun.lock
128
bun.lock
@@ -13,6 +13,8 @@
|
||||
"@anthropic-ai/vertex-sdk": "0.14.4",
|
||||
"@commander-js/extra-typings": "12.1.0",
|
||||
"@growthbook/growthbook": "1.6.5",
|
||||
"@grpc/grpc-js": "^1.14.3",
|
||||
"@grpc/proto-loader": "^0.8.0",
|
||||
"@mendable/firecrawl-js": "4.18.1",
|
||||
"@modelcontextprotocol/sdk": "1.29.0",
|
||||
"@opentelemetry/api": "1.9.1",
|
||||
@@ -51,7 +53,7 @@
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.18.0",
|
||||
"lodash-es": "4.18.1",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
@@ -84,10 +86,14 @@
|
||||
"@types/bun": "1.3.11",
|
||||
"@types/node": "25.5.0",
|
||||
"@types/react": "19.2.14",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "5.9.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
"overrides": {
|
||||
"lodash-es": "4.18.1",
|
||||
},
|
||||
"packages": {
|
||||
"@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.3.0", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA=="],
|
||||
|
||||
@@ -181,6 +187,58 @@
|
||||
|
||||
"@emnapi/runtime": ["@emnapi/runtime@1.9.2", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw=="],
|
||||
|
||||
"@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.7", "", { "os": "aix", "cpu": "ppc64" }, "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg=="],
|
||||
|
||||
"@esbuild/android-arm": ["@esbuild/android-arm@0.27.7", "", { "os": "android", "cpu": "arm" }, "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ=="],
|
||||
|
||||
"@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.7", "", { "os": "android", "cpu": "arm64" }, "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ=="],
|
||||
|
||||
"@esbuild/android-x64": ["@esbuild/android-x64@0.27.7", "", { "os": "android", "cpu": "x64" }, "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg=="],
|
||||
|
||||
"@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.7", "", { "os": "darwin", "cpu": "arm64" }, "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw=="],
|
||||
|
||||
"@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.7", "", { "os": "darwin", "cpu": "x64" }, "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ=="],
|
||||
|
||||
"@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.7", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w=="],
|
||||
|
||||
"@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.7", "", { "os": "freebsd", "cpu": "x64" }, "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ=="],
|
||||
|
||||
"@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.7", "", { "os": "linux", "cpu": "arm" }, "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA=="],
|
||||
|
||||
"@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A=="],
|
||||
|
||||
"@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.7", "", { "os": "linux", "cpu": "ia32" }, "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg=="],
|
||||
|
||||
"@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q=="],
|
||||
|
||||
"@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw=="],
|
||||
|
||||
"@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.7", "", { "os": "linux", "cpu": "ppc64" }, "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ=="],
|
||||
|
||||
"@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ=="],
|
||||
|
||||
"@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.7", "", { "os": "linux", "cpu": "s390x" }, "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw=="],
|
||||
|
||||
"@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.7", "", { "os": "linux", "cpu": "x64" }, "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA=="],
|
||||
|
||||
"@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.7", "", { "os": "none", "cpu": "arm64" }, "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w=="],
|
||||
|
||||
"@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.7", "", { "os": "none", "cpu": "x64" }, "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw=="],
|
||||
|
||||
"@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.7", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A=="],
|
||||
|
||||
"@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.7", "", { "os": "openbsd", "cpu": "x64" }, "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg=="],
|
||||
|
||||
"@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.7", "", { "os": "none", "cpu": "arm64" }, "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw=="],
|
||||
|
||||
"@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.7", "", { "os": "sunos", "cpu": "x64" }, "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA=="],
|
||||
|
||||
"@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.7", "", { "os": "win32", "cpu": "arm64" }, "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA=="],
|
||||
|
||||
"@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.7", "", { "os": "win32", "cpu": "ia32" }, "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw=="],
|
||||
|
||||
"@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.7", "", { "os": "win32", "cpu": "x64" }, "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg=="],
|
||||
|
||||
"@growthbook/growthbook": ["@growthbook/growthbook@1.6.5", "", { "dependencies": { "dom-mutator": "^0.6.0" } }, "sha512-mUaMsgeUTpRIUOTn33EUXHRK6j7pxBjwqH4WpQyq+pukjd1AIzWlEa6w7i6bInJUcweGgP2beXZmaP6b6UPn7A=="],
|
||||
|
||||
"@grpc/grpc-js": ["@grpc/grpc-js@1.14.3", "", { "dependencies": { "@grpc/proto-loader": "^0.8.0", "@js-sdsl/ordered-map": "^4.4.2" } }, "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA=="],
|
||||
@@ -453,7 +511,7 @@
|
||||
|
||||
"cli-highlight": ["cli-highlight@2.1.11", "", { "dependencies": { "chalk": "^4.0.0", "highlight.js": "^10.7.1", "mz": "^2.4.0", "parse5": "^5.1.1", "parse5-htmlparser2-tree-adapter": "^6.0.0", "yargs": "^16.0.0" }, "bin": { "highlight": "bin/highlight" } }, "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg=="],
|
||||
|
||||
"cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="],
|
||||
"cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
|
||||
|
||||
"code-excerpt": ["code-excerpt@4.0.0", "", { "dependencies": { "convert-to-spaces": "^2.0.1" } }, "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA=="],
|
||||
|
||||
@@ -521,6 +579,8 @@
|
||||
|
||||
"es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="],
|
||||
|
||||
"esbuild": ["esbuild@0.27.7", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.7", "@esbuild/android-arm": "0.27.7", "@esbuild/android-arm64": "0.27.7", "@esbuild/android-x64": "0.27.7", "@esbuild/darwin-arm64": "0.27.7", "@esbuild/darwin-x64": "0.27.7", "@esbuild/freebsd-arm64": "0.27.7", "@esbuild/freebsd-x64": "0.27.7", "@esbuild/linux-arm": "0.27.7", "@esbuild/linux-arm64": "0.27.7", "@esbuild/linux-ia32": "0.27.7", "@esbuild/linux-loong64": "0.27.7", "@esbuild/linux-mips64el": "0.27.7", "@esbuild/linux-ppc64": "0.27.7", "@esbuild/linux-riscv64": "0.27.7", "@esbuild/linux-s390x": "0.27.7", "@esbuild/linux-x64": "0.27.7", "@esbuild/netbsd-arm64": "0.27.7", "@esbuild/netbsd-x64": "0.27.7", "@esbuild/openbsd-arm64": "0.27.7", "@esbuild/openbsd-x64": "0.27.7", "@esbuild/openharmony-arm64": "0.27.7", "@esbuild/sunos-x64": "0.27.7", "@esbuild/win32-arm64": "0.27.7", "@esbuild/win32-ia32": "0.27.7", "@esbuild/win32-x64": "0.27.7" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w=="],
|
||||
|
||||
"escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="],
|
||||
|
||||
"escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="],
|
||||
@@ -567,6 +627,8 @@
|
||||
|
||||
"fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="],
|
||||
|
||||
"fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
|
||||
|
||||
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
|
||||
|
||||
"fuse.js": ["fuse.js@7.1.0", "", {}, "sha512-trLf4SzuuUxfusZADLINj+dE8clK1frKdmqiJNb1Es75fmI5oY6X2mxLVUciLLjxqw/xr72Dhy+lER6dGd02FQ=="],
|
||||
@@ -585,6 +647,8 @@
|
||||
|
||||
"get-stream": ["get-stream@9.0.1", "", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="],
|
||||
|
||||
"get-tsconfig": ["get-tsconfig@4.13.7", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q=="],
|
||||
|
||||
"google-auth-library": ["google-auth-library@9.15.1", "", { "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", "gaxios": "^6.1.1", "gcp-metadata": "^6.1.0", "gtoken": "^7.0.0", "jws": "^4.0.0" } }, "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng=="],
|
||||
|
||||
"google-logging-utils": ["google-logging-utils@0.0.2", "", {}, "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ=="],
|
||||
@@ -657,7 +721,7 @@
|
||||
|
||||
"locate-path": ["locate-path@5.0.0", "", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="],
|
||||
|
||||
"lodash-es": ["lodash-es@4.18.0", "", {}, "sha512-koAgswPPA+UTaPN64Etp+PGP+WT6oqOS2NMi5yDkMaiGw9qY4VxQbQF0mtKMyr4BlTznWyzePV5UpECTJQmSUA=="],
|
||||
"lodash-es": ["lodash-es@4.18.1", "", {}, "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A=="],
|
||||
|
||||
"lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="],
|
||||
|
||||
@@ -761,6 +825,8 @@
|
||||
|
||||
"require-main-filename": ["require-main-filename@2.0.0", "", {}, "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="],
|
||||
|
||||
"resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="],
|
||||
|
||||
"retry": ["retry@0.12.0", "", {}, "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow=="],
|
||||
|
||||
"router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="],
|
||||
@@ -831,6 +897,8 @@
|
||||
|
||||
"tslib": ["tslib@1.14.1", "", {}, "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="],
|
||||
|
||||
"tsx": ["tsx@4.21.0", "", { "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "bin": { "tsx": "dist/cli.mjs" } }, "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw=="],
|
||||
|
||||
"turndown": ["turndown@7.2.2", "", { "dependencies": { "@mixmark-io/domino": "^2.2.0" } }, "sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ=="],
|
||||
|
||||
"type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="],
|
||||
@@ -881,9 +949,9 @@
|
||||
|
||||
"yaml": ["yaml@2.8.3", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg=="],
|
||||
|
||||
"yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
|
||||
"yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
|
||||
|
||||
"yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="],
|
||||
"yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
|
||||
|
||||
"yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="],
|
||||
|
||||
@@ -891,8 +959,6 @@
|
||||
|
||||
"zod-to-json-schema": ["zod-to-json-schema@3.25.2", "", { "peerDependencies": { "zod": "^3.25.28 || ^4" } }, "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA=="],
|
||||
|
||||
"@anthropic-ai/sandbox-runtime/lodash-es": ["lodash-es@4.17.23", "", {}, "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="],
|
||||
|
||||
"@aws-crypto/crc32/@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="],
|
||||
|
||||
"@aws-crypto/crc32/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
@@ -1085,8 +1151,6 @@
|
||||
|
||||
"@emnapi/runtime/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"@grpc/proto-loader/yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core": ["@opentelemetry/core@1.30.1", "", { "dependencies": { "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-exporter-base": ["@opentelemetry/otlp-exporter-base@0.57.2", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/otlp-transformer": "0.57.2" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-XdxEzL23Urhidyebg5E6jZoaiW5ygP/mRjxLHixogbqwDy2Faduzb5N0o/Oi+XTIJu+iyxXdVORjXax+Qgfxag=="],
|
||||
@@ -1305,6 +1369,8 @@
|
||||
|
||||
"cli-highlight/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
|
||||
|
||||
"cli-highlight/yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
|
||||
|
||||
"cliui/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
@@ -1359,12 +1425,6 @@
|
||||
|
||||
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from": ["@smithy/util-buffer-from@4.2.2", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-transformer/@opentelemetry/api-logs": ["@opentelemetry/api-logs@0.57.2", "", { "dependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A=="],
|
||||
@@ -1431,6 +1491,12 @@
|
||||
|
||||
"cli-highlight/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"cli-highlight/yargs/cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="],
|
||||
|
||||
"cli-highlight/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"cli-highlight/yargs/yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="],
|
||||
|
||||
"cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"cliui/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
@@ -1471,16 +1537,6 @@
|
||||
|
||||
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@4.2.2", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/resources/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
|
||||
"@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
@@ -1501,6 +1557,16 @@
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder": ["@smithy/querystring-builder@2.2.0", "", { "dependencies": { "@smithy/types": "^2.12.0", "@smithy/util-uri-escape": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-L1kSeviUWL+emq3CUVSgdogoM/D9QMFaqxL/dd0X7PCNWmPXqt+ExtrBjqT0V7HLN03Vs9SuiLrG3zy3JGnE5A=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"qrcode/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"qrcode/yargs/cliui/wrap-ansi": ["wrap-ansi@6.2.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="],
|
||||
@@ -1513,16 +1579,16 @@
|
||||
|
||||
"yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/fetch-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="],
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"qrcode/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"qrcode/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
144
docs/litellm-setup.md
Normal file
144
docs/litellm-setup.md
Normal file
@@ -0,0 +1,144 @@
|
||||
# LiteLLM Setup
|
||||
|
||||
OpenClaude can connect to LiteLLM through LiteLLM's OpenAI-compatible proxy.
|
||||
|
||||
## Overview
|
||||
|
||||
LiteLLM is an open-source LLM gateway that provides a unified API to 100+ model providers. By running the LiteLLM Proxy, you can route OpenClaude requests through LiteLLM to access any of its supported providers — all while using OpenClaude's existing OpenAI-compatible provider path.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- LiteLLM installed (`pip install litellm[proxy]`)
|
||||
- A `litellm_config.yaml` or equivalent LiteLLM configuration
|
||||
- LiteLLM Proxy running on a local or remote port
|
||||
|
||||
## 1. Start the LiteLLM Proxy
|
||||
|
||||
### Basic installation
|
||||
|
||||
```bash
|
||||
pip install litellm[proxy]
|
||||
```
|
||||
|
||||
### Configure LiteLLM
|
||||
|
||||
Create a `litellm_config.yaml` with your desired model aliases:
|
||||
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gpt-4o
|
||||
litellm_params:
|
||||
model: openai/gpt-4o
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
|
||||
- model_name: claude-sonnet-4
|
||||
litellm_params:
|
||||
model: anthropic/claude-sonnet-4-5-20250929
|
||||
api_key: os.environ/ANTHROPIC_API_KEY
|
||||
|
||||
- model_name: gemini-2.5-flash
|
||||
litellm_params:
|
||||
model: gemini/gemini-2.5-flash
|
||||
api_key: os.environ/GEMINI_API_KEY
|
||||
|
||||
- model_name: llama-3.3-70b
|
||||
litellm_params:
|
||||
model: together_ai/meta-llama/Llama-3.3-70B-Instruct-Turbo
|
||||
api_key: os.environ/TOGETHER_API_KEY
|
||||
```
|
||||
|
||||
### Run the proxy
|
||||
|
||||
```bash
|
||||
litellm --config litellm_config.yaml --port 4000
|
||||
```
|
||||
|
||||
The proxy will start at `http://localhost:4000` by default.
|
||||
|
||||
## 2. Point OpenClaude to LiteLLM
|
||||
|
||||
### Option A: Environment Variables
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:4000
|
||||
export OPENAI_API_KEY=<your-master-key-or-placeholder>
|
||||
export OPENAI_MODEL=<your-litellm-model-alias>
|
||||
openclaude
|
||||
```
|
||||
|
||||
Replace `<your-litellm-model-alias>` with a model name from your `litellm_config.yaml` (e.g., `gpt-4o`, `claude-sonnet-4`, `gemini-2.5-flash`).
|
||||
|
||||
### Option B: Using /provider
|
||||
|
||||
1. Run `openclaude`
|
||||
2. Type `/provider` to open the provider setup flow
|
||||
3. Choose the **OpenAI-compatible** option
|
||||
4. When prompted for the API key, enter the key required by your LiteLLM proxy
|
||||
If your local LiteLLM setup does not enforce auth, you may still need to enter a placeholder value
|
||||
- 5. When prompted for the base URL, enter `http://localhost:4000`
|
||||
6. 6. When prompted for the model, enter the LiteLLM model name or alias you configured
|
||||
7. 7. Save the provider configuration
|
||||
|
||||
## 3. Example LiteLLM Configs
|
||||
|
||||
### Multi-provider routing with spend tracking
|
||||
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gpt-4o
|
||||
litellm_params:
|
||||
model: openai/gpt-4o
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
|
||||
- model_name: claude-sonnet-4
|
||||
litellm_params:
|
||||
model: anthropic/claude-sonnet-4-5-20250929
|
||||
api_key: os.environ/ANTHROPIC_API_KEY
|
||||
|
||||
- model_name: deepseek-chat
|
||||
litellm_params:
|
||||
model: deepseek/deepseek-chat
|
||||
api_key: os.environ/DEEPSEEK_API_KEY
|
||||
|
||||
litellm_settings:
|
||||
set_verbose: false
|
||||
num_retries: 3
|
||||
```
|
||||
|
||||
### With a master key for auth
|
||||
|
||||
```bash
|
||||
# Start proxy with a master key
|
||||
litellm --config litellm_config.yaml --port 4000 --master_key sk-my-master-key
|
||||
|
||||
# Connect OpenClaude
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:4000
|
||||
export OPENAI_API_KEY=sk-my-master-key
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
openclaude
|
||||
```
|
||||
|
||||
## 4. Notes
|
||||
|
||||
- `OPENAI_MODEL` must match the **LiteLLM model alias** defined in your config, not the upstream raw provider model name.
|
||||
- If your proxy requires authentication, use the proxy key (or `master_key`) in `OPENAI_API_KEY`.
|
||||
- LiteLLM's OpenAI-compatible endpoint accepts the same request format as OpenAI, so OpenClaude works without any code changes.
|
||||
- You can switch between any provider configured in LiteLLM by simply changing the `OPENAI_MODEL` value — no need to reconfigure OpenClaude.
|
||||
|
||||
## 5. Troubleshooting
|
||||
|
||||
| Issue | Likely Cause | Fix |
|
||||
|-------|--------------|-----|
|
||||
| 404 or Model Not Found | Model alias doesn't exist in LiteLLM config | Verify the `model_name` in `litellm_config.yaml` matches `OPENAI_MODEL` |
|
||||
| Connection Refused | LiteLLM proxy isn't running | Start the proxy with `litellm --config litellm_config.yaml --port 4000` |
|
||||
| Auth Failed | Missing or wrong `master_key` | Set the correct key in `OPENAI_API_KEY` |
|
||||
| Upstream provider error | The backend provider key is missing or invalid | Ensure the upstream API key (e.g., `OPENAI_API_KEY`) is set in your LiteLLM proxy process environment |
|
||||
| Tools fail but chat works | The selected model has weak function/tool calling support | Switch to a model with strong tool support (e.g., GPT-4o, Claude Sonnet) |
|
||||
|
||||
## 6. Resources
|
||||
|
||||
- [LiteLLM Proxy Docs](https://docs.litellm.ai/docs/proxy/quick_start)
|
||||
- [LiteLLM Provider List](https://docs.litellm.ai/docs/providers)
|
||||
- [LiteLLM OpenAI-Compatible Endpoints](https://docs.litellm.ai/docs/proxy/openai_compatible_proxy)
|
||||
12
package.json
12
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@gitlawb/openclaude",
|
||||
"version": "0.1.7",
|
||||
"version": "0.1.8",
|
||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -30,6 +30,8 @@
|
||||
"profile:code": "bun run profile:init -- --provider ollama --model qwen2.5-coder:7b",
|
||||
"dev:fast": "bun run profile:fast && bun run dev:ollama:fast",
|
||||
"dev:code": "bun run profile:code && bun run dev:profile",
|
||||
"dev:grpc": "bun run scripts/start-grpc.ts",
|
||||
"dev:grpc:cli": "bun run scripts/grpc-cli.ts",
|
||||
"start": "node dist/cli.mjs",
|
||||
"test": "bun test",
|
||||
"test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-dir=coverage --max-concurrency=1 && bun run scripts/render-coverage-heatmap.ts",
|
||||
@@ -57,6 +59,8 @@
|
||||
"@anthropic-ai/vertex-sdk": "0.14.4",
|
||||
"@commander-js/extra-typings": "12.1.0",
|
||||
"@growthbook/growthbook": "1.6.5",
|
||||
"@grpc/grpc-js": "^1.14.3",
|
||||
"@grpc/proto-loader": "^0.8.0",
|
||||
"@mendable/firecrawl-js": "4.18.1",
|
||||
"@modelcontextprotocol/sdk": "1.29.0",
|
||||
"@opentelemetry/api": "1.9.1",
|
||||
@@ -95,7 +99,7 @@
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.18.0",
|
||||
"lodash-es": "4.18.1",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
@@ -128,6 +132,7 @@
|
||||
"@types/bun": "1.3.11",
|
||||
"@types/node": "25.5.0",
|
||||
"@types/react": "19.2.14",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "5.9.3"
|
||||
},
|
||||
"engines": {
|
||||
@@ -150,5 +155,8 @@
|
||||
"license": "SEE LICENSE FILE",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"overrides": {
|
||||
"lodash-es": "4.18.1"
|
||||
}
|
||||
}
|
||||
|
||||
121
scripts/grpc-cli.ts
Normal file
121
scripts/grpc-cli.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
import * as grpc from '@grpc/grpc-js'
|
||||
import * as protoLoader from '@grpc/proto-loader'
|
||||
import path from 'path'
|
||||
import * as readline from 'readline'
|
||||
|
||||
const PROTO_PATH = path.resolve(import.meta.dirname, '../src/proto/openclaude.proto')
|
||||
|
||||
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
|
||||
keepCase: true,
|
||||
longs: String,
|
||||
enums: String,
|
||||
defaults: true,
|
||||
oneofs: true,
|
||||
})
|
||||
|
||||
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition) as any
|
||||
const openclaudeProto = protoDescriptor.openclaude.v1
|
||||
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout
|
||||
})
|
||||
|
||||
function askQuestion(query: string): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
rl.question(query, resolve)
|
||||
})
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const host = process.env.GRPC_HOST || 'localhost'
|
||||
const port = process.env.GRPC_PORT || '50051'
|
||||
const client = new openclaudeProto.AgentService(
|
||||
`${host}:${port}`,
|
||||
grpc.credentials.createInsecure()
|
||||
)
|
||||
|
||||
let call: grpc.ClientDuplexStream<any, any> | null = null
|
||||
|
||||
const startStream = () => {
|
||||
call = client.Chat()
|
||||
let textStreamed = false
|
||||
|
||||
call.on('data', async (serverMessage: any) => {
|
||||
if (serverMessage.text_chunk) {
|
||||
process.stdout.write(serverMessage.text_chunk.text)
|
||||
textStreamed = true
|
||||
} else if (serverMessage.tool_start) {
|
||||
console.log(`\n\x1b[36m[Tool Call]\x1b[0m \x1b[1m${serverMessage.tool_start.tool_name}\x1b[0m`)
|
||||
console.log(`\x1b[90m${serverMessage.tool_start.arguments_json}\x1b[0m\n`)
|
||||
} else if (serverMessage.tool_result) {
|
||||
console.log(`\n\x1b[32m[Tool Result]\x1b[0m \x1b[1m${serverMessage.tool_result.tool_name}\x1b[0m`)
|
||||
const out = serverMessage.tool_result.output
|
||||
if (out.length > 500) {
|
||||
console.log(`\x1b[90m${out.substring(0, 500)}...\n(Output truncated, total length: ${out.length})\x1b[0m`)
|
||||
} else {
|
||||
console.log(`\x1b[90m${out}\x1b[0m`)
|
||||
}
|
||||
} else if (serverMessage.action_required) {
|
||||
const action = serverMessage.action_required
|
||||
console.log(`\n\x1b[33m[Action Required]\x1b[0m`)
|
||||
const reply = await askQuestion(`\x1b[1m${action.question}\x1b[0m (y/n) > `)
|
||||
|
||||
call?.write({
|
||||
input: {
|
||||
prompt_id: action.prompt_id,
|
||||
reply: reply.trim()
|
||||
}
|
||||
})
|
||||
} else if (serverMessage.done) {
|
||||
if (!textStreamed && serverMessage.done.full_text) {
|
||||
process.stdout.write(serverMessage.done.full_text)
|
||||
}
|
||||
textStreamed = false
|
||||
console.log('\n\x1b[32m[Generation Complete]\x1b[0m')
|
||||
promptUser()
|
||||
} else if (serverMessage.error) {
|
||||
console.error(`\n\x1b[31m[Server Error]\x1b[0m ${serverMessage.error.message}`)
|
||||
promptUser()
|
||||
}
|
||||
})
|
||||
|
||||
call.on('end', () => {
|
||||
console.log('\n\x1b[90m[Stream closed by server]\x1b[0m')
|
||||
// Don't prompt user here, let 'done' or 'error' handlers do it
|
||||
})
|
||||
|
||||
call.on('error', (err: Error) => {
|
||||
console.error('\n\x1b[31m[Stream Error]\x1b[0m', err.message)
|
||||
promptUser()
|
||||
})
|
||||
}
|
||||
|
||||
const promptUser = async () => {
|
||||
const message = await askQuestion('\n\x1b[35m> \x1b[0m')
|
||||
|
||||
if (message.trim().toLowerCase() === '/exit' || message.trim().toLowerCase() === '/quit') {
|
||||
console.log('Bye!')
|
||||
rl.close()
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
if (!call || call.destroyed) {
|
||||
startStream()
|
||||
}
|
||||
|
||||
call!.write({
|
||||
request: {
|
||||
session_id: 'cli-session-1',
|
||||
message: message,
|
||||
working_directory: process.cwd()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
console.log('\x1b[32mOpenClaude gRPC CLI\x1b[0m')
|
||||
console.log('\x1b[90mType /exit to quit.\x1b[0m')
|
||||
promptUser()
|
||||
}
|
||||
|
||||
main()
|
||||
50
scripts/start-grpc.ts
Normal file
50
scripts/start-grpc.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import { GrpcServer } from '../src/grpc/server.ts'
|
||||
import { init } from '../src/entrypoints/init.ts'
|
||||
|
||||
// Polyfill MACRO which is normally injected by the bundler
|
||||
Object.assign(globalThis, {
|
||||
MACRO: {
|
||||
VERSION: '0.1.7',
|
||||
DISPLAY_VERSION: '0.1.7',
|
||||
PACKAGE_URL: '@gitlawb/openclaude',
|
||||
}
|
||||
})
|
||||
|
||||
async function main() {
|
||||
console.log('Starting OpenClaude gRPC Server...')
|
||||
await init()
|
||||
|
||||
// Mirror CLI bootstrap: hydrate secure tokens and resolve provider profile
|
||||
const { enableConfigs } = await import('../src/utils/config.js')
|
||||
enableConfigs()
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../src/utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGeminiAccessTokenFromSecureStorage } = await import('../src/utils/geminiCredentials.js')
|
||||
hydrateGeminiAccessTokenFromSecureStorage()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../src/utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
|
||||
const { buildStartupEnvFromProfile, applyProfileEnvToProcessEnv } = await import('../src/utils/providerProfile.js')
|
||||
const { getProviderValidationError, validateProviderEnvOrExit } = await import('../src/utils/providerValidation.js')
|
||||
const startupEnv = await buildStartupEnvFromProfile({ processEnv: process.env })
|
||||
if (startupEnv !== process.env) {
|
||||
const startupProfileError = await getProviderValidationError(startupEnv)
|
||||
if (startupProfileError) {
|
||||
console.warn(`Warning: ignoring saved provider profile. ${startupProfileError}`)
|
||||
} else {
|
||||
applyProfileEnvToProcessEnv(process.env, startupEnv)
|
||||
}
|
||||
}
|
||||
await validateProviderEnvOrExit()
|
||||
|
||||
const port = process.env.GRPC_PORT ? parseInt(process.env.GRPC_PORT, 10) : 50051
|
||||
const host = process.env.GRPC_HOST || 'localhost'
|
||||
const server = new GrpcServer()
|
||||
|
||||
server.start(port, host)
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error('Fatal error starting gRPC server:', err)
|
||||
process.exit(1)
|
||||
})
|
||||
42
src/commands/model/model.test.tsx
Normal file
42
src/commands/model/model.test.tsx
Normal file
@@ -0,0 +1,42 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
test('opens the model picker without awaiting local model discovery refresh', async () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5-coder-7b-instruct'
|
||||
|
||||
let resolveDiscovery: (() => void) | undefined
|
||||
const discoverOpenAICompatibleModelOptions = mock(
|
||||
() =>
|
||||
new Promise<void>(resolve => {
|
||||
resolveDiscovery = resolve
|
||||
}),
|
||||
)
|
||||
|
||||
mock.module('../../utils/model/openaiModelDiscovery.js', () => ({
|
||||
discoverOpenAICompatibleModelOptions,
|
||||
}))
|
||||
|
||||
const { call } = await import(`./model.js?ts=${Date.now()}-${Math.random()}`)
|
||||
const result = await Promise.race([
|
||||
call(() => {}, {} as never, ''),
|
||||
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
|
||||
])
|
||||
|
||||
resolveDiscovery?.()
|
||||
|
||||
expect(result).not.toBe('timeout')
|
||||
})
|
||||
@@ -4,6 +4,7 @@ import * as React from 'react';
|
||||
import type { CommandResultDisplay } from '../../commands.js';
|
||||
import { ModelPicker } from '../../components/ModelPicker.js';
|
||||
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js';
|
||||
import { fetchBootstrapData } from '../../services/api/bootstrap.js';
|
||||
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
|
||||
import { useAppState, useSetAppState } from '../../state/AppState.js';
|
||||
import type { LocalJSXCommandCall } from '../../types/command.js';
|
||||
@@ -19,6 +20,7 @@ import { getActiveOpenAIModelOptionsCache, setActiveOpenAIModelOptionsCache } fr
|
||||
import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js';
|
||||
import { isModelAllowed } from '../../utils/model/modelAllowlist.js';
|
||||
import { validateModel } from '../../utils/model/validateModel.js';
|
||||
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js';
|
||||
function ModelPickerWrapper(t0) {
|
||||
const $ = _c(17);
|
||||
const {
|
||||
@@ -319,7 +321,9 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
|
||||
});
|
||||
return <SetModelAndClose args={args} onDone={onDone} />;
|
||||
}
|
||||
await refreshOpenAIModelOptionsCache();
|
||||
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
|
||||
void refreshOpenAIModelOptionsCache();
|
||||
}
|
||||
return <ModelPickerWrapper onDone={onDone} />;
|
||||
};
|
||||
function renderModelLabel(model: string | null): string {
|
||||
|
||||
@@ -197,6 +197,21 @@ test('buildProfileSaveMessage maps provider fields without echoing secrets', ()
|
||||
expect(message).not.toContain('sk-secret-12345678')
|
||||
})
|
||||
|
||||
test('buildProfileSaveMessage labels local openai-compatible profiles consistently', () => {
|
||||
const message = buildProfileSaveMessage(
|
||||
'openai',
|
||||
{
|
||||
OPENAI_MODEL: 'gpt-5.4',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
|
||||
)
|
||||
|
||||
expect(message).toContain('Saved Local OpenAI-compatible profile.')
|
||||
expect(message).toContain('Model: gpt-5.4')
|
||||
expect(message).toContain('Endpoint: http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly', () => {
|
||||
const message = buildProfileSaveMessage(
|
||||
'gemini',
|
||||
@@ -230,6 +245,36 @@ test('buildCurrentProviderSummary redacts poisoned model and endpoint values', (
|
||||
expect(summary.endpointLabel).toBe('sk-...5678')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'qwen2.5-coder-7b-instruct',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
persisted: null,
|
||||
})
|
||||
|
||||
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
|
||||
expect(summary.modelLabel).toBe('qwen2.5-coder-7b-instruct')
|
||||
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-5.4',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
persisted: null,
|
||||
})
|
||||
|
||||
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
|
||||
expect(summary.modelLabel).toBe('gpt-5.4')
|
||||
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('getProviderWizardDefaults ignores poisoned current provider values', () => {
|
||||
const defaults = getProviderWizardDefaults({
|
||||
OPENAI_API_KEY: 'sk-secret-12345678',
|
||||
|
||||
@@ -15,6 +15,7 @@ import { Box, Text } from '../../ink.js'
|
||||
import {
|
||||
DEFAULT_CODEX_BASE_URL,
|
||||
DEFAULT_OPENAI_BASE_URL,
|
||||
isLocalProviderUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
} from '../../services/api/providerConfig.js'
|
||||
@@ -52,7 +53,11 @@ import {
|
||||
recommendOllamaModel,
|
||||
type RecommendationGoal,
|
||||
} from '../../utils/providerRecommendation.js'
|
||||
import { hasLocalOllama, listOllamaModels } from '../../utils/providerDiscovery.js'
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
hasLocalOllama,
|
||||
listOllamaModels,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
|
||||
type ProviderChoice = 'auto' | ProviderProfile | 'clear'
|
||||
|
||||
@@ -182,10 +187,8 @@ export function buildCurrentProviderSummary(options?: {
|
||||
let providerLabel = 'OpenAI-compatible'
|
||||
if (request.transport === 'codex_responses') {
|
||||
providerLabel = 'Codex'
|
||||
} else if (request.baseUrl.includes('localhost:11434')) {
|
||||
providerLabel = 'Ollama'
|
||||
} else if (request.baseUrl.includes('localhost:1234')) {
|
||||
providerLabel = 'LM Studio'
|
||||
} else if (isLocalProviderUrl(request.baseUrl)) {
|
||||
providerLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -272,16 +275,20 @@ function buildSavedProfileSummary(
|
||||
),
|
||||
}
|
||||
case 'openai':
|
||||
default:
|
||||
default: {
|
||||
const baseUrl = env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL
|
||||
|
||||
return {
|
||||
providerLabel: 'OpenAI-compatible',
|
||||
providerLabel: isLocalProviderUrl(baseUrl)
|
||||
? getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
: 'OpenAI-compatible',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
env.OPENAI_MODEL ?? 'gpt-4o',
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL,
|
||||
baseUrl,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
@@ -290,6 +297,7 @@ function buildSavedProfileSummary(
|
||||
? 'configured'
|
||||
: undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ import { isBilledAsExtraUsage } from '../../utils/extraUsage.js';
|
||||
import { getFastModeUnavailableReason, isFastModeAvailable, isFastModeCooldown, isFastModeEnabled, isFastModeSupportedByModel } from '../../utils/fastMode.js';
|
||||
import { isFullscreenEnvEnabled } from '../../utils/fullscreen.js';
|
||||
import type { PromptInputHelpers } from '../../utils/handlePromptSubmit.js';
|
||||
import { extractDraggedFilePaths } from '../../utils/dragDropPaths.js';
|
||||
import { getImageFromClipboard, PASTE_THRESHOLD } from '../../utils/imagePaste.js';
|
||||
import type { ImageDimensions } from '../../utils/imageResizer.js';
|
||||
import { cacheImagePath, storeImage } from '../../utils/imageStore.js';
|
||||
@@ -1204,6 +1205,22 @@ function PromptInput({
|
||||
// Clean up pasted text - strip ANSI escape codes and normalize line endings and tabs
|
||||
let text = stripAnsi(rawText).replace(/\r/g, '\n').replaceAll('\t', ' ');
|
||||
|
||||
// Detect file paths from drag-and-drop and convert to @mentions.
|
||||
// When files are dragged into the terminal, the terminal sends their
|
||||
// absolute paths via bracketed paste. Image files are handled by the
|
||||
// image paste handler upstream; here we handle non-image files by
|
||||
// converting them to @mentions so they get attached on submit.
|
||||
const draggedPaths = extractDraggedFilePaths(text);
|
||||
if (draggedPaths.length > 0) {
|
||||
const mentions = draggedPaths
|
||||
.map(p => (p.includes(' ') || p.includes(':') ? `@"${p}"` : `@${p}`))
|
||||
.join(' ');
|
||||
// Ensure spacing around the mention(s) relative to existing input
|
||||
const charBefore = input[cursorOffset - 1];
|
||||
const prefix = charBefore && !/\s/.test(charBefore) ? ' ' : '';
|
||||
text = prefix + mentions + ' ';
|
||||
}
|
||||
|
||||
// Match typed/auto-suggest: `!cmd` pasted into empty input enters bash mode.
|
||||
if (input.length === 0) {
|
||||
const pastedMode = getModeFromInput(text);
|
||||
@@ -1245,12 +1262,23 @@ function PromptInput({
|
||||
if (isNonSpacePrintable(input, key)) return ' ' + input;
|
||||
return input;
|
||||
}, []);
|
||||
// Ref mirrors cursorOffset for use in synchronous loops (e.g. multi-image
|
||||
// paste) where React batches state updates and the closure value is stale.
|
||||
const cursorOffsetRef = useRef(cursorOffset);
|
||||
cursorOffsetRef.current = cursorOffset;
|
||||
|
||||
function insertTextAtCursor(text: string) {
|
||||
// Push current state to buffer before inserting
|
||||
pushToBuffer(input, cursorOffset, pastedContents);
|
||||
const newInput = input.slice(0, cursorOffset) + text + input.slice(cursorOffset);
|
||||
// Use refs for input/cursor so back-to-back calls in the same event
|
||||
// (e.g. onImagePaste loop for multiple dragged images) chain correctly
|
||||
// instead of each reading the same stale closure values.
|
||||
const currentInput = lastInternalInputRef.current;
|
||||
const currentOffset = cursorOffsetRef.current;
|
||||
pushToBuffer(currentInput, currentOffset, pastedContents);
|
||||
const newInput = currentInput.slice(0, currentOffset) + text + currentInput.slice(currentOffset);
|
||||
trackAndSetInput(newInput);
|
||||
setCursorOffset(cursorOffset + text.length);
|
||||
const newOffset = currentOffset + text.length;
|
||||
cursorOffsetRef.current = newOffset;
|
||||
setCursorOffset(newOffset);
|
||||
}
|
||||
const doublePressEscFromEmpty = useDoublePress(() => {}, () => onShowMessageSelector());
|
||||
|
||||
|
||||
@@ -123,8 +123,6 @@ const SuggestionItemRow = memo(function SuggestionItemRow({
|
||||
maxColumnWidth ?? stringWidth(item.displayText) + 5,
|
||||
maxNameWidth,
|
||||
)
|
||||
const displayTextColor = isSelected ? 'inverseText' : item.color
|
||||
const shouldDim = !isSelected
|
||||
|
||||
let displayText = item.displayText
|
||||
if (stringWidth(displayText) > displayTextWidth - 2) {
|
||||
@@ -144,21 +142,17 @@ const SuggestionItemRow = memo(function SuggestionItemRow({
|
||||
const truncatedDescription = item.description
|
||||
? truncateToWidth(item.description.replace(/\s+/g, ' '), descriptionWidth)
|
||||
: ''
|
||||
const lineContent = `${paddedDisplayText}${tagText}${truncatedDescription}`
|
||||
|
||||
return (
|
||||
<Box width="100%" opaque={true} backgroundColor={rowBackgroundColor}>
|
||||
<Text wrap="truncate">
|
||||
<Text color={displayTextColor} dimColor={shouldDim} bold={isSelected}>
|
||||
{paddedDisplayText}
|
||||
</Text>
|
||||
{tagText ? (
|
||||
<Text color={textColor} dimColor={!isSelected}>
|
||||
{tagText}
|
||||
</Text>
|
||||
) : null}
|
||||
<Text color={textColor} dimColor={!isSelected}>
|
||||
{truncatedDescription}
|
||||
</Text>
|
||||
<Text
|
||||
color={textColor}
|
||||
dimColor={!isSelected}
|
||||
bold={isSelected}
|
||||
wrap="truncate"
|
||||
>
|
||||
{lineContent}
|
||||
</Text>
|
||||
</Box>
|
||||
)
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
* Addresses: https://github.com/Gitlawb/openclaude/issues/55
|
||||
*/
|
||||
|
||||
import { isLocalProviderUrl } from '../services/api/providerConfig.js'
|
||||
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
|
||||
|
||||
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
|
||||
|
||||
const ESC = '\x1b['
|
||||
@@ -99,7 +102,7 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
if (useOpenAI) {
|
||||
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
|
||||
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
|
||||
const isLocal = isLocalProviderUrl(baseUrl)
|
||||
let name = 'OpenAI'
|
||||
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
|
||||
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
|
||||
@@ -107,10 +110,8 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
else if (/groq/i.test(baseUrl)) name = 'Groq'
|
||||
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
|
||||
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
|
||||
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
|
||||
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
|
||||
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
|
||||
else if (isLocal) name = 'Local'
|
||||
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
|
||||
// Resolve model alias to actual model name + reasoning effort
|
||||
let displayModel = rawModel
|
||||
|
||||
113
src/components/ThemePicker.test.tsx
Normal file
113
src/components/ThemePicker.test.tsx
Normal file
@@ -0,0 +1,113 @@
|
||||
import { describe, expect, it, mock } from 'bun:test'
|
||||
|
||||
// We can't fully render ThemePicker due to complex dependencies
|
||||
// But we can test the theme options generation logic
|
||||
describe('ThemePicker', () => {
|
||||
describe('theme options', () => {
|
||||
it('generates correct theme options without AUTO_THEME feature flag', () => {
|
||||
// Since we can't easily mock bun:bundle, test the options structure
|
||||
// The real test would require integration testing
|
||||
const expectedOptions = [
|
||||
{ label: "Dark mode", value: "dark" },
|
||||
{ label: "Light mode", value: "light" },
|
||||
{ label: "Dark mode (colorblind-friendly)", value: "dark-daltonized" },
|
||||
{ label: "Light mode (colorblind-friendly)", value: "light-daltonized" },
|
||||
{ label: "Dark mode (ANSI colors only)", value: "dark-ansi" },
|
||||
{ label: "Light mode (ANSI colors only)", value: "light-ansi" },
|
||||
]
|
||||
expect(expectedOptions.length).toBe(6)
|
||||
})
|
||||
|
||||
it('includes auto theme when AUTO_THEME feature is enabled', () => {
|
||||
// Test the structure when auto is present
|
||||
const optionsWithAuto = [
|
||||
{ label: "Auto (match terminal)", value: "auto" },
|
||||
{ label: "Dark mode", value: "dark" },
|
||||
]
|
||||
expect(optionsWithAuto[0].value).toBe('auto')
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleRowFocus callback', () => {
|
||||
it('setPreviewTheme is called with theme setting', () => {
|
||||
const setPreviewTheme = mock()
|
||||
const handleRowFocus = (setting: string) => setPreviewTheme(setting)
|
||||
|
||||
handleRowFocus('dark')
|
||||
expect(setPreviewTheme).toHaveBeenCalledWith('dark')
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleSelect callback', () => {
|
||||
it('calls savePreview and onThemeSelect', () => {
|
||||
const savePreview = mock()
|
||||
const onThemeSelect = mock()
|
||||
const handleSelect = (setting: string) => {
|
||||
savePreview()
|
||||
onThemeSelect(setting)
|
||||
}
|
||||
|
||||
handleSelect('light')
|
||||
expect(savePreview).toHaveBeenCalled()
|
||||
expect(onThemeSelect).toHaveBeenCalledWith('light')
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleCancel callback', () => {
|
||||
it('calls cancelPreview and gracefulShutdown when not skipExitHandling', () => {
|
||||
const cancelPreview = mock()
|
||||
const gracefulShutdown = mock()
|
||||
const handleCancel = (skipExitHandling: boolean, onCancelProp?: () => void) => {
|
||||
cancelPreview()
|
||||
if (skipExitHandling) {
|
||||
onCancelProp?.()
|
||||
} else {
|
||||
gracefulShutdown(0)
|
||||
}
|
||||
}
|
||||
|
||||
handleCancel(false)
|
||||
expect(cancelPreview).toHaveBeenCalled()
|
||||
expect(gracefulShutdown).toHaveBeenCalledWith(0)
|
||||
})
|
||||
|
||||
it('calls onCancelProp when skipExitHandling is true', () => {
|
||||
const cancelPreview = mock()
|
||||
const onCancelProp = mock()
|
||||
const handleCancel = (skipExitHandling: boolean, onCancelProp?: () => void) => {
|
||||
cancelPreview()
|
||||
if (skipExitHandling) {
|
||||
onCancelProp?.()
|
||||
}
|
||||
}
|
||||
|
||||
handleCancel(true, onCancelProp)
|
||||
expect(cancelPreview).toHaveBeenCalled()
|
||||
expect(onCancelProp).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('syntax hint logic', () => {
|
||||
it('shows disabled hint when syntax highlighting is disabled', () => {
|
||||
const syntaxHighlightingDisabled = true
|
||||
const syntaxToggleShortcut = 'Ctrl+T'
|
||||
|
||||
const hint = syntaxHighlightingDisabled
|
||||
? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
|
||||
: `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
|
||||
|
||||
expect(hint).toContain('disabled')
|
||||
})
|
||||
|
||||
it('shows enabled hint when syntax highlighting is active', () => {
|
||||
const syntaxHighlightingDisabled = false
|
||||
const syntaxToggleShortcut = 'Ctrl+T'
|
||||
|
||||
const hint = !syntaxHighlightingDisabled
|
||||
? `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
|
||||
: `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
|
||||
|
||||
expect(hint).toContain('enabled')
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,13 +1,14 @@
|
||||
import { c as _c } from "react-compiler-runtime";
|
||||
import { feature } from 'bun:bundle';
|
||||
import type { StructuredPatchHunk } from 'diff';
|
||||
import * as React from 'react';
|
||||
import { useExitOnCtrlCDWithKeybindings } from '../hooks/useExitOnCtrlCDWithKeybindings.js';
|
||||
import { useExitOnCtrlCDWithKeybindings } from '../hooks/useExitOnCtrlCDWithKeybindings.js'
|
||||
import { useTerminalSize } from '../hooks/useTerminalSize.js';
|
||||
import { Box, Text, usePreviewTheme, useTheme, useThemeSetting } from '../ink.js';
|
||||
import { useRegisterKeybindingContext } from '../keybindings/KeybindingContext.js';
|
||||
import { useKeybinding } from '../keybindings/useKeybinding.js';
|
||||
import { useShortcutDisplay } from '../keybindings/useShortcutDisplay.js';
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js';
|
||||
import type { AppState } from '../state/AppStateStore.js';
|
||||
import { gracefulShutdown } from '../utils/gracefulShutdown.js';
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js';
|
||||
import type { ThemeSetting } from '../utils/theme.js';
|
||||
@@ -16,6 +17,17 @@ import { Byline } from './design-system/Byline.js';
|
||||
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js';
|
||||
import { getColorModuleUnavailableReason, getSyntaxTheme } from './StructuredDiff/colorDiff.js';
|
||||
import { StructuredDiff } from './StructuredDiff.js';
|
||||
|
||||
type StructuredDiffComponent = React.ComponentType<{
|
||||
patch: StructuredPatchHunk
|
||||
dim: boolean
|
||||
filePath: string
|
||||
firstLine: string | null
|
||||
width: number
|
||||
skipHighlighting?: boolean
|
||||
}>
|
||||
const StructuredDiffView = StructuredDiff as StructuredDiffComponent
|
||||
|
||||
export type ThemePickerProps = {
|
||||
onThemeSelect: (setting: ThemeSetting) => void;
|
||||
showIntroText?: boolean;
|
||||
@@ -26,307 +38,224 @@ export type ThemePickerProps = {
|
||||
skipExitHandling?: boolean;
|
||||
/** Called when the user cancels (presses Escape). If skipExitHandling is true and this is provided, it will be called instead of just saving the preview. */
|
||||
onCancel?: () => void;
|
||||
};
|
||||
export function ThemePicker(t0) {
|
||||
const $ = _c(59);
|
||||
const {
|
||||
onThemeSelect,
|
||||
showIntroText: t1,
|
||||
helpText: t2,
|
||||
showHelpTextBelow: t3,
|
||||
hideEscToCancel: t4,
|
||||
skipExitHandling: t5,
|
||||
onCancel: onCancelProp
|
||||
} = t0;
|
||||
const showIntroText = t1 === undefined ? false : t1;
|
||||
const helpText = t2 === undefined ? "" : t2;
|
||||
const showHelpTextBelow = t3 === undefined ? false : t3;
|
||||
const hideEscToCancel = t4 === undefined ? false : t4;
|
||||
const skipExitHandling = t5 === undefined ? false : t5;
|
||||
}
|
||||
|
||||
const DEMO_PATCH: StructuredPatchHunk = {
|
||||
oldStart: 1,
|
||||
newStart: 1,
|
||||
oldLines: 3,
|
||||
newLines: 3,
|
||||
lines: [
|
||||
' function greet() {',
|
||||
'- console.log("Hello, World!");',
|
||||
'+ console.log("Hello, Claude!");',
|
||||
' }',
|
||||
],
|
||||
}
|
||||
|
||||
/**
|
||||
* Theme chooser with live preview. Implemented without react-compiler `_c` memo
|
||||
* caches so preview/subtree reconciliation cannot stick on stale element refs when
|
||||
* `setPreviewTheme` updates the resolved palette.
|
||||
*/
|
||||
export function ThemePicker({
|
||||
onThemeSelect,
|
||||
showIntroText = false,
|
||||
helpText = '',
|
||||
showHelpTextBelow = false,
|
||||
hideEscToCancel = false,
|
||||
skipExitHandling = false,
|
||||
onCancel: onCancelProp,
|
||||
}: ThemePickerProps) {
|
||||
const [theme] = useTheme();
|
||||
const themeSetting = useThemeSetting();
|
||||
const {
|
||||
columns
|
||||
} = useTerminalSize();
|
||||
let t6;
|
||||
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t6 = getColorModuleUnavailableReason();
|
||||
$[0] = t6;
|
||||
} else {
|
||||
t6 = $[0];
|
||||
}
|
||||
const colorModuleUnavailableReason = t6;
|
||||
let t7;
|
||||
if ($[1] !== theme) {
|
||||
t7 = colorModuleUnavailableReason === null ? getSyntaxTheme(theme) : null;
|
||||
$[1] = theme;
|
||||
$[2] = t7;
|
||||
} else {
|
||||
t7 = $[2];
|
||||
}
|
||||
const syntaxTheme = t7;
|
||||
const {
|
||||
setPreviewTheme,
|
||||
savePreview,
|
||||
cancelPreview
|
||||
} = usePreviewTheme();
|
||||
const syntaxHighlightingDisabled = useAppState(_temp) ?? false;
|
||||
const { columns } = useTerminalSize();
|
||||
const colorModuleUnavailableReason = React.useMemo(
|
||||
() => getColorModuleUnavailableReason(),
|
||||
[],
|
||||
)
|
||||
const syntaxTheme =
|
||||
colorModuleUnavailableReason === null ? getSyntaxTheme(theme) : null
|
||||
const { setPreviewTheme, savePreview, cancelPreview } = usePreviewTheme()
|
||||
const syntaxHighlightingDisabled = useAppState(
|
||||
(s: AppState) => s.settings.syntaxHighlightingDisabled ?? false
|
||||
);
|
||||
const setAppState = useSetAppState();
|
||||
useRegisterKeybindingContext("ThemePicker");
|
||||
useRegisterKeybindingContext("ThemePicker", true);
|
||||
const syntaxToggleShortcut = useShortcutDisplay("theme:toggleSyntaxHighlighting", "ThemePicker", "ctrl+t");
|
||||
let t8;
|
||||
if ($[3] !== setAppState || $[4] !== syntaxHighlightingDisabled) {
|
||||
t8 = () => {
|
||||
if (colorModuleUnavailableReason === null) {
|
||||
const newValue = !syntaxHighlightingDisabled;
|
||||
updateSettingsForSource("userSettings", {
|
||||
|
||||
const toggleSyntax = React.useCallback(() => {
|
||||
if (colorModuleUnavailableReason === null) {
|
||||
const newValue = !syntaxHighlightingDisabled
|
||||
updateSettingsForSource("userSettings", {
|
||||
syntaxHighlightingDisabled: newValue
|
||||
});
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
settings: {
|
||||
...prev.settings,
|
||||
syntaxHighlightingDisabled: newValue
|
||||
});
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
settings: {
|
||||
...prev.settings,
|
||||
syntaxHighlightingDisabled: newValue
|
||||
}
|
||||
}));
|
||||
}
|
||||
};
|
||||
$[3] = setAppState;
|
||||
$[4] = syntaxHighlightingDisabled;
|
||||
$[5] = t8;
|
||||
} else {
|
||||
t8 = $[5];
|
||||
}
|
||||
let t9;
|
||||
if ($[6] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t9 = {
|
||||
context: "ThemePicker"
|
||||
};
|
||||
$[6] = t9;
|
||||
} else {
|
||||
t9 = $[6];
|
||||
}
|
||||
useKeybinding("theme:toggleSyntaxHighlighting", t8, t9);
|
||||
const exitState = useExitOnCtrlCDWithKeybindings(skipExitHandling ? _temp2 : undefined);
|
||||
let t10;
|
||||
if ($[7] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t10 = [...(feature("AUTO_THEME") ? [{
|
||||
label: "Auto (match terminal)",
|
||||
value: "auto" as const
|
||||
}] : []), {
|
||||
label: "Dark mode",
|
||||
value: "dark"
|
||||
}, {
|
||||
label: "Light mode",
|
||||
value: "light"
|
||||
}, {
|
||||
label: "Dark mode (colorblind-friendly)",
|
||||
value: "dark-daltonized"
|
||||
}, {
|
||||
label: "Light mode (colorblind-friendly)",
|
||||
value: "light-daltonized"
|
||||
}, {
|
||||
label: "Dark mode (ANSI colors only)",
|
||||
value: "dark-ansi"
|
||||
}, {
|
||||
label: "Light mode (ANSI colors only)",
|
||||
value: "light-ansi"
|
||||
}];
|
||||
$[7] = t10;
|
||||
} else {
|
||||
t10 = $[7];
|
||||
}
|
||||
const themeOptions = t10;
|
||||
let t11;
|
||||
if ($[8] !== showIntroText) {
|
||||
t11 = showIntroText ? <Text>Let's get started.</Text> : <Text bold={true} color="permission">Theme</Text>;
|
||||
$[8] = showIntroText;
|
||||
$[9] = t11;
|
||||
} else {
|
||||
t11 = $[9];
|
||||
}
|
||||
let t12;
|
||||
if ($[10] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t12 = <Text bold={true}>Choose the text style that looks best with your terminal</Text>;
|
||||
$[10] = t12;
|
||||
} else {
|
||||
t12 = $[10];
|
||||
}
|
||||
let t13;
|
||||
if ($[11] !== helpText || $[12] !== showHelpTextBelow) {
|
||||
t13 = helpText && !showHelpTextBelow && <Text dimColor={true}>{helpText}</Text>;
|
||||
$[11] = helpText;
|
||||
$[12] = showHelpTextBelow;
|
||||
$[13] = t13;
|
||||
} else {
|
||||
t13 = $[13];
|
||||
}
|
||||
let t14;
|
||||
if ($[14] !== t13) {
|
||||
t14 = <Box flexDirection="column">{t12}{t13}</Box>;
|
||||
$[14] = t13;
|
||||
$[15] = t14;
|
||||
} else {
|
||||
t14 = $[15];
|
||||
}
|
||||
let t15;
|
||||
if ($[16] !== setPreviewTheme) {
|
||||
t15 = setting => {
|
||||
setPreviewTheme(setting as ThemeSetting);
|
||||
};
|
||||
$[16] = setPreviewTheme;
|
||||
$[17] = t15;
|
||||
} else {
|
||||
t15 = $[17];
|
||||
}
|
||||
let t16;
|
||||
if ($[18] !== onThemeSelect || $[19] !== savePreview) {
|
||||
t16 = setting_0 => {
|
||||
savePreview();
|
||||
onThemeSelect(setting_0 as ThemeSetting);
|
||||
};
|
||||
$[18] = onThemeSelect;
|
||||
$[19] = savePreview;
|
||||
$[20] = t16;
|
||||
} else {
|
||||
t16 = $[20];
|
||||
}
|
||||
let t17;
|
||||
if ($[21] !== cancelPreview || $[22] !== onCancelProp || $[23] !== skipExitHandling) {
|
||||
t17 = skipExitHandling ? () => {
|
||||
cancelPreview();
|
||||
onCancelProp?.();
|
||||
} : async () => {
|
||||
cancelPreview();
|
||||
await gracefulShutdown(0);
|
||||
};
|
||||
$[21] = cancelPreview;
|
||||
$[22] = onCancelProp;
|
||||
$[23] = skipExitHandling;
|
||||
$[24] = t17;
|
||||
} else {
|
||||
t17 = $[24];
|
||||
}
|
||||
let t18;
|
||||
if ($[25] !== t15 || $[26] !== t16 || $[27] !== t17 || $[28] !== themeSetting) {
|
||||
t18 = <Select options={themeOptions} onFocus={t15} onChange={t16} onCancel={t17} visibleOptionCount={themeOptions.length} defaultValue={themeSetting} defaultFocusValue={themeSetting} />;
|
||||
$[25] = t15;
|
||||
$[26] = t16;
|
||||
$[27] = t17;
|
||||
$[28] = themeSetting;
|
||||
$[29] = t18;
|
||||
} else {
|
||||
t18 = $[29];
|
||||
}
|
||||
let t19;
|
||||
if ($[30] !== t11 || $[31] !== t14 || $[32] !== t18) {
|
||||
t19 = <Box flexDirection="column" gap={1}>{t11}{t14}{t18}</Box>;
|
||||
$[30] = t11;
|
||||
$[31] = t14;
|
||||
$[32] = t18;
|
||||
$[33] = t19;
|
||||
} else {
|
||||
t19 = $[33];
|
||||
}
|
||||
let t20;
|
||||
if ($[34] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t20 = {
|
||||
oldStart: 1,
|
||||
newStart: 1,
|
||||
oldLines: 3,
|
||||
newLines: 3,
|
||||
lines: [" function greet() {", "- console.log(\"Hello, World!\");", "+ console.log(\"Hello, Claude!\");", " }"]
|
||||
};
|
||||
$[34] = t20;
|
||||
} else {
|
||||
t20 = $[34];
|
||||
}
|
||||
let t21;
|
||||
if ($[35] !== columns) {
|
||||
t21 = <Box flexDirection="column" borderTop={true} borderBottom={true} borderLeft={false} borderRight={false} borderStyle="dashed" borderColor="subtle"><StructuredDiff patch={t20} dim={false} filePath="demo.js" firstLine={null} width={columns} /></Box>;
|
||||
$[35] = columns;
|
||||
$[36] = t21;
|
||||
} else {
|
||||
t21 = $[36];
|
||||
}
|
||||
const t22 = colorModuleUnavailableReason === "env" ? `Syntax highlighting disabled (via CLAUDE_CODE_SYNTAX_HIGHLIGHT=${process.env.CLAUDE_CODE_SYNTAX_HIGHLIGHT})` : syntaxHighlightingDisabled ? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)` : syntaxTheme ? `Syntax theme: ${syntaxTheme.theme}${syntaxTheme.source ? ` (from ${syntaxTheme.source})` : ""} (${syntaxToggleShortcut} to disable)` : `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`;
|
||||
let t23;
|
||||
if ($[37] !== t22) {
|
||||
t23 = <Text dimColor={true}>{" "}{t22}</Text>;
|
||||
$[37] = t22;
|
||||
$[38] = t23;
|
||||
} else {
|
||||
t23 = $[38];
|
||||
}
|
||||
let t24;
|
||||
if ($[39] !== t21 || $[40] !== t23) {
|
||||
t24 = <Box flexDirection="column" width="100%">{t21}{t23}</Box>;
|
||||
$[39] = t21;
|
||||
$[40] = t23;
|
||||
$[41] = t24;
|
||||
} else {
|
||||
t24 = $[41];
|
||||
}
|
||||
let t25;
|
||||
if ($[42] !== t19 || $[43] !== t24) {
|
||||
t25 = <Box flexDirection="column" gap={1}>{t19}{t24}</Box>;
|
||||
$[42] = t19;
|
||||
$[43] = t24;
|
||||
$[44] = t25;
|
||||
} else {
|
||||
t25 = $[44];
|
||||
}
|
||||
const content = t25;
|
||||
}
|
||||
}));
|
||||
}
|
||||
}, [
|
||||
colorModuleUnavailableReason,
|
||||
syntaxHighlightingDisabled,
|
||||
setAppState,
|
||||
])
|
||||
|
||||
useKeybinding("theme:toggleSyntaxHighlighting", toggleSyntax, {
|
||||
context: "ThemePicker",
|
||||
})
|
||||
|
||||
const exitState = useExitOnCtrlCDWithKeybindings(
|
||||
skipExitHandling ? () => {} : undefined,
|
||||
)
|
||||
|
||||
const themeOptions = React.useMemo(
|
||||
() => [
|
||||
...(feature("AUTO_THEME")
|
||||
? [{ label: "Auto (match terminal)", value: "auto" as const }]
|
||||
: []), {
|
||||
label: "Dark mode",
|
||||
value: "dark" as const
|
||||
}, {
|
||||
label: "Light mode",
|
||||
value: "light" as const
|
||||
}, {
|
||||
label: "Dark mode (colorblind-friendly)",
|
||||
value: "dark-daltonized" as const,
|
||||
}, {
|
||||
label: "Light mode (colorblind-friendly)",
|
||||
value: "light-daltonized" as const,
|
||||
}, {
|
||||
label: "Dark mode (ANSI colors only)",
|
||||
value: "dark-ansi" as const
|
||||
}, {
|
||||
label: "Light mode (ANSI colors only)",
|
||||
value: "light-ansi" as const
|
||||
},],
|
||||
[],
|
||||
)
|
||||
|
||||
const handleRowFocus = React.useCallback(
|
||||
(setting: ThemeSetting) => {
|
||||
setPreviewTheme(setting)
|
||||
},
|
||||
[setPreviewTheme],
|
||||
)
|
||||
|
||||
const handleSelect = React.useCallback(
|
||||
(setting: ThemeSetting) => {
|
||||
savePreview()
|
||||
onThemeSelect(setting)
|
||||
},
|
||||
[savePreview, onThemeSelect],
|
||||
)
|
||||
|
||||
const handleCancel = React.useCallback(() => {
|
||||
cancelPreview()
|
||||
if (skipExitHandling) {
|
||||
onCancelProp?.()
|
||||
} else {
|
||||
void gracefulShutdown(0)
|
||||
}
|
||||
}, [cancelPreview, onCancelProp, skipExitHandling])
|
||||
|
||||
const syntaxHint =
|
||||
colorModuleUnavailableReason === 'env'
|
||||
? `Syntax highlighting disabled (via CLAUDE_CODE_SYNTAX_HIGHLIGHT=${process.env.CLAUDE_CODE_SYNTAX_HIGHLIGHT})`
|
||||
: syntaxHighlightingDisabled
|
||||
? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
|
||||
: syntaxTheme
|
||||
? `Syntax theme: ${syntaxTheme.theme}${syntaxTheme.source ? ` (from ${syntaxTheme.source})` : ''} (${syntaxToggleShortcut} to disable)`
|
||||
: `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
|
||||
|
||||
const header = showIntroText ? (
|
||||
<Text>{"Let's get started."}</Text>
|
||||
) : (
|
||||
<Text bold color="permission">
|
||||
Theme
|
||||
</Text>
|
||||
)
|
||||
|
||||
const introBlock = (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>Choose the text style that looks best with your terminal</Text>
|
||||
{helpText && !showHelpTextBelow ? (
|
||||
<Text dimColor>{helpText}</Text>
|
||||
) : null}
|
||||
</Box>
|
||||
)
|
||||
|
||||
const content = (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Box flexDirection="column" gap={1}>
|
||||
{header}
|
||||
{introBlock}
|
||||
<Select
|
||||
options={themeOptions}
|
||||
onFocus={handleRowFocus}
|
||||
onChange={handleSelect}
|
||||
onCancel={handleCancel}
|
||||
visibleOptionCount={themeOptions.length}
|
||||
defaultValue={themeSetting}
|
||||
defaultFocusValue={themeSetting}
|
||||
/>
|
||||
</Box>
|
||||
<Box flexDirection="column" width="100%">
|
||||
<Box
|
||||
key={theme}
|
||||
flexDirection="column"
|
||||
borderTop
|
||||
borderBottom
|
||||
borderLeft={false}
|
||||
borderRight={false}
|
||||
borderStyle="dashed"
|
||||
borderColor="subtle"
|
||||
>
|
||||
<StructuredDiffView
|
||||
patch={DEMO_PATCH}
|
||||
dim={false}
|
||||
filePath="demo.js"
|
||||
firstLine={null}
|
||||
width={columns}
|
||||
/>
|
||||
</Box>
|
||||
<Text dimColor>
|
||||
{' '}
|
||||
{syntaxHint}
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
)
|
||||
|
||||
if (!showIntroText) {
|
||||
let t26;
|
||||
if ($[45] !== content) {
|
||||
t26 = <Box flexDirection="column">{content}</Box>;
|
||||
$[45] = content;
|
||||
$[46] = t26;
|
||||
} else {
|
||||
t26 = $[46];
|
||||
}
|
||||
let t27;
|
||||
if ($[47] !== helpText || $[48] !== showHelpTextBelow) {
|
||||
t27 = showHelpTextBelow && helpText && <Box marginLeft={3}><Text dimColor={true}>{helpText}</Text></Box>;
|
||||
$[47] = helpText;
|
||||
$[48] = showHelpTextBelow;
|
||||
$[49] = t27;
|
||||
} else {
|
||||
t27 = $[49];
|
||||
}
|
||||
let t28;
|
||||
if ($[50] !== exitState || $[51] !== hideEscToCancel) {
|
||||
t28 = !hideEscToCancel && <Box><Text dimColor={true} italic={true}>{exitState.pending ? <>Press {exitState.keyName} again to exit</> : <Byline><KeyboardShortcutHint shortcut="Enter" action="select" /><KeyboardShortcutHint shortcut="Esc" action="cancel" /></Byline>}</Text></Box>;
|
||||
$[50] = exitState;
|
||||
$[51] = hideEscToCancel;
|
||||
$[52] = t28;
|
||||
} else {
|
||||
t28 = $[52];
|
||||
}
|
||||
let t29;
|
||||
if ($[53] !== t27 || $[54] !== t28) {
|
||||
t29 = <Box marginTop={1}>{t27}{t28}</Box>;
|
||||
$[53] = t27;
|
||||
$[54] = t28;
|
||||
$[55] = t29;
|
||||
} else {
|
||||
t29 = $[55];
|
||||
}
|
||||
let t30;
|
||||
if ($[56] !== t26 || $[57] !== t29) {
|
||||
t30 = <>{t26}{t29}</>;
|
||||
$[56] = t26;
|
||||
$[57] = t29;
|
||||
$[58] = t30;
|
||||
} else {
|
||||
t30 = $[58];
|
||||
}
|
||||
return t30;
|
||||
return (
|
||||
<>
|
||||
<Box flexDirection="column">{content}</Box>
|
||||
{showHelpTextBelow && helpText ? (
|
||||
<Box marginLeft={3}>
|
||||
<Text dimColor>{helpText}</Text>
|
||||
</Box>
|
||||
) : null}
|
||||
{!hideEscToCancel ? (
|
||||
<Box marginTop={1}>
|
||||
<Text dimColor italic>
|
||||
{exitState.pending ? (
|
||||
<>Press {exitState.keyName} again to exit</>
|
||||
) : (
|
||||
<Byline>
|
||||
<KeyboardShortcutHint shortcut="Enter" action="select" />
|
||||
<KeyboardShortcutHint shortcut="Esc" action="cancel" />
|
||||
</Byline>
|
||||
)}
|
||||
</Text>
|
||||
</Box>
|
||||
) : null}
|
||||
</>
|
||||
)
|
||||
}
|
||||
return content;
|
||||
}
|
||||
function _temp2() {}
|
||||
function _temp(s) {
|
||||
return s.settings.syntaxHighlightingDisabled;
|
||||
|
||||
return content
|
||||
}
|
||||
|
||||
252
src/grpc/server.ts
Normal file
252
src/grpc/server.ts
Normal file
@@ -0,0 +1,252 @@
|
||||
import * as grpc from '@grpc/grpc-js'
|
||||
import * as protoLoader from '@grpc/proto-loader'
|
||||
import path from 'path'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { QueryEngine } from '../QueryEngine.js'
|
||||
import { getTools } from '../tools.js'
|
||||
import { getDefaultAppState } from '../state/AppStateStore.js'
|
||||
import { AppState } from '../state/AppState.js'
|
||||
import { FileStateCache, READ_FILE_STATE_CACHE_SIZE } from '../utils/fileStateCache.js'
|
||||
|
||||
const PROTO_PATH = path.resolve(import.meta.dirname, '../proto/openclaude.proto')
|
||||
|
||||
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
|
||||
keepCase: true,
|
||||
longs: String,
|
||||
enums: String,
|
||||
defaults: true,
|
||||
oneofs: true,
|
||||
})
|
||||
|
||||
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition) as any
|
||||
const openclaudeProto = protoDescriptor.openclaude.v1
|
||||
|
||||
const MAX_SESSIONS = 1000
|
||||
|
||||
export class GrpcServer {
|
||||
private server: grpc.Server
|
||||
private sessions: Map<string, any[]> = new Map()
|
||||
|
||||
constructor() {
|
||||
this.server = new grpc.Server()
|
||||
this.server.addService(openclaudeProto.AgentService.service, {
|
||||
Chat: this.handleChat.bind(this),
|
||||
})
|
||||
}
|
||||
|
||||
start(port: number = 50051, host: string = 'localhost') {
|
||||
this.server.bindAsync(
|
||||
`${host}:${port}`,
|
||||
grpc.ServerCredentials.createInsecure(),
|
||||
(error, boundPort) => {
|
||||
if (error) {
|
||||
console.error('Failed to start gRPC server', error)
|
||||
return
|
||||
}
|
||||
console.log(`gRPC Server running at ${host}:${boundPort}`)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
private handleChat(call: grpc.ServerDuplexStream<any, any>) {
|
||||
let engine: QueryEngine | null = null
|
||||
let appState: AppState = getDefaultAppState()
|
||||
const fileCache: FileStateCache = new FileStateCache(READ_FILE_STATE_CACHE_SIZE, 25 * 1024 * 1024)
|
||||
|
||||
// To handle ActionRequired (ask user for permission)
|
||||
const pendingRequests = new Map<string, (reply: string) => void>()
|
||||
|
||||
// Accumulated messages from previous turns for multi-turn context
|
||||
let previousMessages: any[] = []
|
||||
let sessionId = ''
|
||||
let interrupted = false
|
||||
|
||||
call.on('data', async (clientMessage) => {
|
||||
try {
|
||||
if (clientMessage.request) {
|
||||
if (engine) {
|
||||
call.write({
|
||||
error: {
|
||||
message: 'A request is already in progress on this stream',
|
||||
code: 'ALREADY_EXISTS'
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
interrupted = false
|
||||
const req = clientMessage.request
|
||||
sessionId = req.session_id || ''
|
||||
previousMessages = []
|
||||
|
||||
// Load previous messages from session store (cross-stream persistence)
|
||||
if (sessionId && this.sessions.has(sessionId)) {
|
||||
previousMessages = [...this.sessions.get(sessionId)!]
|
||||
}
|
||||
|
||||
const toolNameById = new Map<string, string>()
|
||||
|
||||
engine = new QueryEngine({
|
||||
cwd: req.working_directory || process.cwd(),
|
||||
tools: getTools(appState.toolPermissionContext), // Gets all available tools
|
||||
commands: [], // Slash commands
|
||||
mcpClients: [],
|
||||
agents: [],
|
||||
...(previousMessages.length > 0 ? { initialMessages: previousMessages } : {}),
|
||||
includePartialMessages: true,
|
||||
canUseTool: async (tool, input, context, assistantMsg, toolUseID) => {
|
||||
if (toolUseID) {
|
||||
toolNameById.set(toolUseID, tool.name)
|
||||
}
|
||||
// Notify client of the tool call first
|
||||
call.write({
|
||||
tool_start: {
|
||||
tool_name: tool.name,
|
||||
arguments_json: JSON.stringify(input),
|
||||
tool_use_id: toolUseID
|
||||
}
|
||||
})
|
||||
|
||||
// Ask user for permission
|
||||
const promptId = randomUUID()
|
||||
const question = `Approve ${tool.name}?`
|
||||
call.write({
|
||||
action_required: {
|
||||
prompt_id: promptId,
|
||||
question,
|
||||
type: 'CONFIRM_COMMAND'
|
||||
}
|
||||
})
|
||||
|
||||
return new Promise((resolve) => {
|
||||
pendingRequests.set(promptId, (reply) => {
|
||||
if (reply.toLowerCase() === 'yes' || reply.toLowerCase() === 'y') {
|
||||
resolve({ behavior: 'allow' })
|
||||
} else {
|
||||
resolve({ behavior: 'deny', reason: 'User denied via gRPC' })
|
||||
}
|
||||
})
|
||||
})
|
||||
},
|
||||
getAppState: () => appState,
|
||||
setAppState: (updater) => { appState = updater(appState) },
|
||||
readFileCache: fileCache,
|
||||
userSpecifiedModel: req.model,
|
||||
fallbackModel: req.model,
|
||||
})
|
||||
|
||||
// Track accumulated response data for FinalResponse
|
||||
let fullText = ''
|
||||
let promptTokens = 0
|
||||
let completionTokens = 0
|
||||
|
||||
const generator = engine.submitMessage(req.message)
|
||||
|
||||
for await (const msg of generator) {
|
||||
if (msg.type === 'stream_event') {
|
||||
if (msg.event.type === 'content_block_delta' && msg.event.delta.type === 'text_delta') {
|
||||
call.write({
|
||||
text_chunk: {
|
||||
text: msg.event.delta.text
|
||||
}
|
||||
})
|
||||
fullText += msg.event.delta.text
|
||||
}
|
||||
} else if (msg.type === 'user') {
|
||||
// Extract tool results
|
||||
const content = msg.message.content
|
||||
if (Array.isArray(content)) {
|
||||
for (const block of content) {
|
||||
if (block.type === 'tool_result') {
|
||||
let outputStr = ''
|
||||
if (typeof block.content === 'string') {
|
||||
outputStr = block.content
|
||||
} else if (Array.isArray(block.content)) {
|
||||
outputStr = block.content.map(c => c.type === 'text' ? c.text : '').join('\n')
|
||||
}
|
||||
call.write({
|
||||
tool_result: {
|
||||
tool_name: toolNameById.get(block.tool_use_id) ?? block.tool_use_id,
|
||||
tool_use_id: block.tool_use_id,
|
||||
output: outputStr,
|
||||
is_error: block.is_error || false
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result') {
|
||||
// Extract real token counts and final text from the result
|
||||
if (msg.subtype === 'success') {
|
||||
if (msg.result) {
|
||||
fullText = msg.result
|
||||
}
|
||||
promptTokens = msg.usage?.input_tokens ?? 0
|
||||
completionTokens = msg.usage?.output_tokens ?? 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!interrupted) {
|
||||
// Save messages for multi-turn context in subsequent requests
|
||||
previousMessages = [...engine.getMessages()]
|
||||
|
||||
// Persist to session store for cross-stream resumption
|
||||
if (sessionId) {
|
||||
if (!this.sessions.has(sessionId) && this.sessions.size >= MAX_SESSIONS) {
|
||||
// Evict oldest session (Map preserves insertion order)
|
||||
this.sessions.delete(this.sessions.keys().next().value)
|
||||
}
|
||||
this.sessions.set(sessionId, previousMessages)
|
||||
}
|
||||
|
||||
call.write({
|
||||
done: {
|
||||
full_text: fullText,
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
engine = null
|
||||
|
||||
} else if (clientMessage.input) {
|
||||
const promptId = clientMessage.input.prompt_id
|
||||
const reply = clientMessage.input.reply
|
||||
if (pendingRequests.has(promptId)) {
|
||||
pendingRequests.get(promptId)!(reply)
|
||||
pendingRequests.delete(promptId)
|
||||
}
|
||||
} else if (clientMessage.cancel) {
|
||||
interrupted = true
|
||||
if (engine) {
|
||||
engine.interrupt()
|
||||
}
|
||||
call.end()
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error("Error processing stream:", err)
|
||||
call.write({
|
||||
error: {
|
||||
message: err.message || "Internal server error",
|
||||
code: "INTERNAL"
|
||||
}
|
||||
})
|
||||
call.end()
|
||||
}
|
||||
})
|
||||
|
||||
call.on('end', () => {
|
||||
interrupted = true
|
||||
// Unblock any pending permission prompts so canUseTool can return
|
||||
for (const resolve of pendingRequests.values()) {
|
||||
resolve('no')
|
||||
}
|
||||
if (engine) {
|
||||
engine.interrupt()
|
||||
}
|
||||
engine = null
|
||||
pendingRequests.clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
101
src/proto/openclaude.proto
Normal file
101
src/proto/openclaude.proto
Normal file
@@ -0,0 +1,101 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package openclaude.v1;
|
||||
|
||||
// Main Agent Service
|
||||
service AgentService {
|
||||
// Bidirectional stream: client sends tasks and answers to agent prompts,
|
||||
// server streams text tokens, tool states, and requests permissions.
|
||||
rpc Chat(stream ClientMessage) returns (stream ServerMessage);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------
|
||||
// MESSAGES FROM CLIENT (Input)
|
||||
// ---------------------------------------------------------
|
||||
message ClientMessage {
|
||||
oneof payload {
|
||||
// 1. Initial request (first message in the stream)
|
||||
ChatRequest request = 2;
|
||||
|
||||
// 2. User response to an agent prompt (e.g., command confirmation)
|
||||
UserInput input = 3;
|
||||
|
||||
// 3. Interrupt signal (if the user clicks "Stop generation")
|
||||
CancelSignal cancel = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message ChatRequest {
|
||||
string message = 1;
|
||||
string working_directory = 2; // Where the agent should execute commands
|
||||
reserved 3; // Reserved to prevent accidental reuse
|
||||
optional string model = 4;
|
||||
string session_id = 5; // Non-empty = cross-stream session persistence
|
||||
}
|
||||
|
||||
message UserInput {
|
||||
string reply = 1; // Text response (e.g., "y", "no", or clarification)
|
||||
string prompt_id = 2; // ID of the prompt we are responding to
|
||||
}
|
||||
|
||||
message CancelSignal {
|
||||
string reason = 1;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------
|
||||
// MESSAGES FROM SERVER (Output / Events)
|
||||
// ---------------------------------------------------------
|
||||
message ServerMessage {
|
||||
// Using oneof guarantees that only one type of event arrives at a time
|
||||
oneof event {
|
||||
TextChunk text_chunk = 1; // Chunk of text from LLM
|
||||
ToolCallStart tool_start = 2; // Agent started using a tool
|
||||
ToolCallResult tool_result = 3; // Tool returned a result
|
||||
ActionRequired action_required = 4;// Agent requires human intervention
|
||||
FinalResponse done = 5; // Generation successfully completed
|
||||
ErrorResponse error = 6; // A critical error occurred
|
||||
}
|
||||
}
|
||||
|
||||
// Stream text chunk
|
||||
message TextChunk {
|
||||
string text = 1;
|
||||
}
|
||||
|
||||
// Agent decided to use a tool (bash, read_file, etc.)
|
||||
message ToolCallStart {
|
||||
string tool_name = 1;
|
||||
string arguments_json = 2; // Arguments in JSON format
|
||||
string tool_use_id = 3; // Correlation ID matching ToolCallResult
|
||||
}
|
||||
|
||||
// Result of tool execution
|
||||
message ToolCallResult {
|
||||
string tool_name = 1;
|
||||
string output = 2; // stdout/stderr or file contents
|
||||
bool is_error = 3; // Did the command itself fail
|
||||
string tool_use_id = 4; // Correlation ID matching ToolCallStart
|
||||
}
|
||||
|
||||
// Agent paused work and is waiting for user decision
|
||||
message ActionRequired {
|
||||
string prompt_id = 1; // Client must return this ID in UserInput
|
||||
string question = 2; // Question text (e.g., "Execute 'rm -rf /'?")
|
||||
enum ActionType {
|
||||
CONFIRM_COMMAND = 0; // Yes/No
|
||||
REQUEST_INFORMATION = 1; // Text input
|
||||
}
|
||||
ActionType type = 3;
|
||||
}
|
||||
|
||||
// Final statistics
|
||||
message FinalResponse {
|
||||
string full_text = 1; // The entire generated text
|
||||
int32 prompt_tokens = 2;
|
||||
int32 completion_tokens = 3;
|
||||
}
|
||||
|
||||
message ErrorResponse {
|
||||
string message = 1;
|
||||
string code = 2;
|
||||
}
|
||||
@@ -237,6 +237,7 @@ import { useOfficialMarketplaceNotification } from 'src/hooks/useOfficialMarketp
|
||||
import { usePromptsFromClaudeInChrome } from 'src/hooks/usePromptsFromClaudeInChrome.js';
|
||||
import { getTipToShowOnSpinner, recordShownTip } from 'src/services/tips/tipScheduler.js';
|
||||
import type { Theme } from 'src/utils/theme.js';
|
||||
import { isPromptTypingSuppressionActive } from './replInputSuppression.js';
|
||||
import { checkAndDisableBypassPermissionsIfNeeded, checkAndDisableAutoModeIfNeeded, useKickOffCheckAndDisableBypassPermissionsIfNeeded, useKickOffCheckAndDisableAutoModeIfNeeded } from 'src/utils/permissions/bypassPermissionsKillswitch.js';
|
||||
import { SandboxManager } from 'src/utils/sandbox/sandbox-adapter.js';
|
||||
import { SANDBOX_NETWORK_ACCESS_TOOL_NAME } from 'src/cli/structuredIO.js';
|
||||
@@ -1336,6 +1337,7 @@ export function REPL({
|
||||
const [inputValue, setInputValueRaw] = useState(() => consumeEarlyInput());
|
||||
const inputValueRef = useRef(inputValue);
|
||||
inputValueRef.current = inputValue;
|
||||
const promptTypingSuppressionActive = isPromptTypingSuppressionActive(isPromptInputActive, inputValue);
|
||||
const insertTextRef = useRef<{
|
||||
insert: (text: string) => void;
|
||||
setInputWithCursor: (value: string, cursor: number) => void;
|
||||
@@ -2028,7 +2030,7 @@ export function REPL({
|
||||
if (isMessageSelectorVisible) return 'message-selector';
|
||||
|
||||
// Suppress interrupt dialogs while user is actively typing
|
||||
if (isPromptInputActive) return undefined;
|
||||
if (promptTypingSuppressionActive) return undefined;
|
||||
if (sandboxPermissionRequestQueue[0]) return 'sandbox-permission';
|
||||
|
||||
// Permission/interactive dialogs (show unless blocked by toolJSX)
|
||||
@@ -2071,7 +2073,7 @@ export function REPL({
|
||||
const focusedInputDialog = getFocusedInputDialog();
|
||||
|
||||
// True when permission prompts exist but are hidden because the user is typing
|
||||
const hasSuppressedDialogs = isPromptInputActive && (sandboxPermissionRequestQueue[0] || toolUseConfirmQueue[0] || promptQueue[0] || workerSandboxPermissions.queue[0] || elicitation.queue[0] || showingCostDialog);
|
||||
const hasSuppressedDialogs = promptTypingSuppressionActive && (sandboxPermissionRequestQueue[0] || toolUseConfirmQueue[0] || promptQueue[0] || workerSandboxPermissions.queue[0] || elicitation.queue[0] || showingCostDialog);
|
||||
|
||||
// Keep ref in sync so timer callbacks can read the current value
|
||||
focusedInputDialogRef.current = focusedInputDialog;
|
||||
|
||||
18
src/screens/replInputSuppression.test.ts
Normal file
18
src/screens/replInputSuppression.test.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
|
||||
import { isPromptTypingSuppressionActive } from './replInputSuppression.js'
|
||||
|
||||
describe('isPromptTypingSuppressionActive', () => {
|
||||
it('suppresses dialogs when early input already exists', () => {
|
||||
expect(isPromptTypingSuppressionActive(false, 'hello')).toBe(true)
|
||||
})
|
||||
|
||||
it('does not suppress dialogs for empty or whitespace-only input', () => {
|
||||
expect(isPromptTypingSuppressionActive(false, '')).toBe(false)
|
||||
expect(isPromptTypingSuppressionActive(false, ' ')).toBe(false)
|
||||
})
|
||||
|
||||
it('keeps suppression active while the typing flag is set', () => {
|
||||
expect(isPromptTypingSuppressionActive(true, '')).toBe(true)
|
||||
})
|
||||
})
|
||||
6
src/screens/replInputSuppression.ts
Normal file
6
src/screens/replInputSuppression.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export function isPromptTypingSuppressionActive(
|
||||
isPromptInputActive: boolean,
|
||||
inputValue: string,
|
||||
): boolean {
|
||||
return isPromptInputActive || inputValue.trim().length > 0
|
||||
}
|
||||
@@ -14,7 +14,16 @@ import { lazySchema } from '../../utils/lazySchema.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js'
|
||||
import type { ModelOption } from '../../utils/model/modelOptions.js'
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
listOpenAICompatibleModels,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
import {
|
||||
getAdditionalModelOptionsCacheScope,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const bootstrapResponseSchema = lazySchema(() =>
|
||||
z.object({
|
||||
@@ -39,6 +48,12 @@ const bootstrapResponseSchema = lazySchema(() =>
|
||||
|
||||
type BootstrapResponse = z.infer<ReturnType<typeof bootstrapResponseSchema>>
|
||||
|
||||
type BootstrapCachePayload = {
|
||||
clientData: Record<string, unknown> | null
|
||||
additionalModelOptions: ModelOption[]
|
||||
additionalModelOptionsScope: string
|
||||
}
|
||||
|
||||
async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
if (isEssentialTrafficOnly()) {
|
||||
logForDebugging('[Bootstrap] Skipped: Nonessential traffic disabled')
|
||||
@@ -108,22 +123,70 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchLocalOpenAIModelOptions(): Promise<BootstrapCachePayload | null> {
|
||||
const scope = getAdditionalModelOptionsCacheScope()
|
||||
if (!scope?.startsWith('openai:')) {
|
||||
return null
|
||||
}
|
||||
|
||||
const { baseUrl } = resolveProviderRequest()
|
||||
const models = await listOpenAICompatibleModels({
|
||||
baseUrl,
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
})
|
||||
|
||||
if (models === null) {
|
||||
logForDebugging('[Bootstrap] Local OpenAI model discovery failed')
|
||||
return null
|
||||
}
|
||||
|
||||
const providerLabel = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
|
||||
return {
|
||||
clientData: getGlobalConfig().clientDataCache ?? null,
|
||||
additionalModelOptionsScope: scope,
|
||||
additionalModelOptions: models.map(model => ({
|
||||
value: model,
|
||||
label: model,
|
||||
description: `Detected from ${providerLabel}`,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch bootstrap data from the API and persist to disk cache.
|
||||
*/
|
||||
export async function fetchBootstrapData(): Promise<void> {
|
||||
try {
|
||||
const response = await fetchBootstrapAPI()
|
||||
if (!response) return
|
||||
const scope = getAdditionalModelOptionsCacheScope()
|
||||
let payload: BootstrapCachePayload | null = null
|
||||
|
||||
const clientData = response.client_data ?? null
|
||||
const additionalModelOptions = response.additional_model_options ?? []
|
||||
if (scope === 'firstParty') {
|
||||
const response = await fetchBootstrapAPI()
|
||||
if (!response) return
|
||||
|
||||
payload = {
|
||||
clientData: response.client_data ?? null,
|
||||
additionalModelOptions: response.additional_model_options ?? [],
|
||||
additionalModelOptionsScope: scope,
|
||||
}
|
||||
} else if (scope?.startsWith('openai:')) {
|
||||
payload = await fetchLocalOpenAIModelOptions()
|
||||
if (!payload) return
|
||||
} else {
|
||||
logForDebugging('[Bootstrap] Skipped: no additional model source')
|
||||
return
|
||||
}
|
||||
|
||||
const { clientData, additionalModelOptions, additionalModelOptionsScope } =
|
||||
payload
|
||||
|
||||
// Only persist if data actually changed — avoids a config write on every startup.
|
||||
const config = getGlobalConfig()
|
||||
if (
|
||||
isEqual(config.clientDataCache, clientData) &&
|
||||
isEqual(config.additionalModelOptionsCache, additionalModelOptions)
|
||||
isEqual(config.additionalModelOptionsCache, additionalModelOptions) &&
|
||||
config.additionalModelOptionsCacheScope === additionalModelOptionsScope
|
||||
) {
|
||||
logForDebugging('[Bootstrap] Cache unchanged, skipping write')
|
||||
return
|
||||
@@ -134,6 +197,7 @@ export async function fetchBootstrapData(): Promise<void> {
|
||||
...current,
|
||||
clientDataCache: clientData,
|
||||
additionalModelOptionsCache: additionalModelOptions,
|
||||
additionalModelOptionsCacheScope: additionalModelOptionsScope,
|
||||
}))
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
|
||||
@@ -14,12 +14,19 @@ import {
|
||||
} from './providerConfig.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
while (tempDirs.length > 0) {
|
||||
const dir = tempDirs.pop()
|
||||
if (dir) rmSync(dir, { recursive: true, force: true })
|
||||
}
|
||||
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
})
|
||||
|
||||
function createTempAuthJson(payload: Record<string, unknown>): string {
|
||||
@@ -62,12 +69,26 @@ describe('Codex provider config', () => {
|
||||
})
|
||||
|
||||
test('resolves codexplan alias to Codex transport with reasoning', () => {
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
|
||||
const resolved = resolveProviderRequest({ model: 'codexplan' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
expect(resolved.reasoning).toEqual({ effort: 'high' })
|
||||
})
|
||||
|
||||
test('does not force Codex transport when a local non-Codex base URL is explicit', () => {
|
||||
const resolved = resolveProviderRequest({
|
||||
model: 'codexplan',
|
||||
baseUrl: 'http://127.0.0.1:8080/v1',
|
||||
})
|
||||
|
||||
expect(resolved.transport).toBe('chat_completions')
|
||||
expect(resolved.baseUrl).toBe('http://127.0.0.1:8080/v1')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
})
|
||||
|
||||
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', () => {
|
||||
// On Windows, env vars can leak as the literal string "undefined" instead of
|
||||
// the JS value undefined when not properly unset (issue #336).
|
||||
|
||||
@@ -557,8 +557,12 @@ export function getAssistantMessageFromError(
|
||||
const stripped = error.message.replace(/^429\s+/, '')
|
||||
const innerMessage = stripped.match(/"message"\s*:\s*"([^"]*)"/)?.[1]
|
||||
const detail = innerMessage || stripped
|
||||
const retryAfter = (error as APIError).headers?.get?.('retry-after')
|
||||
const retryHint = retryAfter && !isNaN(Number(retryAfter))
|
||||
? `Try again in ${retryAfter} seconds.`
|
||||
: 'Try again in a few seconds.'
|
||||
return createAssistantAPIErrorMessage({
|
||||
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || `this may be a temporary capacity issue${getAPIProvider() === 'firstParty' ? ' — check status.anthropic.com' : ''}`}`,
|
||||
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || 'this may be a temporary capacity issue'} — ${retryHint}`,
|
||||
error: 'rate_limit',
|
||||
})
|
||||
}
|
||||
|
||||
@@ -573,3 +573,80 @@ test('sanitizes malformed MCP tool schemas before sending them to OpenAI', async
|
||||
expect(properties?.priority?.enum).toEqual([0, 1, 2, 3])
|
||||
expect(properties?.priority).not.toHaveProperty('default')
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Issue #202 — consecutive role coalescing (Devstral, Mistral strict templates)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeNonStreamResponse(content = 'ok'): Response {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-test',
|
||||
model: 'test-model',
|
||||
choices: [{ message: { role: 'assistant', content }, finish_reason: 'stop' }],
|
||||
usage: { prompt_tokens: 5, completion_tokens: 1, total_tokens: 6 },
|
||||
}),
|
||||
{ headers: { 'Content-Type': 'application/json' } },
|
||||
)
|
||||
}
|
||||
|
||||
test('coalesces consecutive user messages to avoid alternation errors (issue #202)', async () => {
|
||||
let sentMessages: Array<{ role: string; content: unknown }> | undefined
|
||||
|
||||
globalThis.fetch = (async (_input: unknown, init: RequestInit | undefined) => {
|
||||
sentMessages = JSON.parse(String(init?.body)).messages
|
||||
return makeNonStreamResponse()
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'test-model',
|
||||
system: 'sys',
|
||||
messages: [
|
||||
{ role: 'user', content: 'first message' },
|
||||
{ role: 'user', content: 'second message' },
|
||||
],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(sentMessages?.length).toBe(2) // system + 1 merged user
|
||||
expect(sentMessages?.[0]?.role).toBe('system')
|
||||
expect(sentMessages?.[1]?.role).toBe('user')
|
||||
const userContent = sentMessages?.[1]?.content as string
|
||||
expect(userContent).toContain('first message')
|
||||
expect(userContent).toContain('second message')
|
||||
})
|
||||
|
||||
test('coalesces consecutive assistant messages preserving tool_calls (issue #202)', async () => {
|
||||
let sentMessages: Array<{ role: string; content: unknown; tool_calls?: unknown[] }> | undefined
|
||||
|
||||
globalThis.fetch = (async (_input: unknown, init: RequestInit | undefined) => {
|
||||
sentMessages = JSON.parse(String(init?.body)).messages
|
||||
return makeNonStreamResponse()
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'test-model',
|
||||
system: 'sys',
|
||||
messages: [
|
||||
{ role: 'user', content: 'go' },
|
||||
{ role: 'assistant', content: 'thinking...' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [{ type: 'tool_use', id: 'call_1', name: 'Bash', input: { command: 'ls' } }],
|
||||
},
|
||||
{ role: 'user', content: [{ type: 'tool_result', tool_use_id: 'call_1', content: 'file.txt' }] },
|
||||
],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
// system + user + merged assistant + tool
|
||||
const assistantMsgs = sentMessages?.filter(m => m.role === 'assistant')
|
||||
expect(assistantMsgs?.length).toBe(1) // two assistant turns merged into one
|
||||
expect(assistantMsgs?.[0]?.tool_calls?.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
@@ -295,7 +295,41 @@ function convertMessages(
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
// Coalescing pass: merge consecutive messages of the same role.
|
||||
// OpenAI/vLLM/Ollama require strict user↔assistant alternation.
|
||||
// Multiple consecutive tool messages are allowed (assistant → tool* → user).
|
||||
// Consecutive user or assistant messages must be merged to avoid Jinja
|
||||
// template errors like "roles must alternate" (Devstral, Mistral models).
|
||||
const coalesced: OpenAIMessage[] = []
|
||||
for (const msg of result) {
|
||||
const prev = coalesced[coalesced.length - 1]
|
||||
|
||||
if (prev && prev.role === msg.role && msg.role !== 'tool' && msg.role !== 'system') {
|
||||
const prevContent = prev.content
|
||||
const curContent = msg.content
|
||||
|
||||
if (typeof prevContent === 'string' && typeof curContent === 'string') {
|
||||
prev.content = prevContent + (prevContent && curContent ? '\n' : '') + curContent
|
||||
} else {
|
||||
const toArray = (
|
||||
c: string | Array<{ type: string; text?: string; image_url?: { url: string } }> | undefined,
|
||||
): Array<{ type: string; text?: string; image_url?: { url: string } }> => {
|
||||
if (!c) return []
|
||||
if (typeof c === 'string') return c ? [{ type: 'text', text: c }] : []
|
||||
return c
|
||||
}
|
||||
prev.content = [...toArray(prevContent), ...toArray(curContent)]
|
||||
}
|
||||
|
||||
if (msg.tool_calls?.length) {
|
||||
prev.tool_calls = [...(prev.tool_calls ?? []), ...msg.tool_calls]
|
||||
}
|
||||
} else {
|
||||
coalesced.push(msg)
|
||||
}
|
||||
}
|
||||
|
||||
return coalesced
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,6 +1,22 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { isLocalProviderUrl } from './providerConfig.js'
|
||||
import {
|
||||
getAdditionalModelOptionsCacheScope,
|
||||
isLocalProviderUrl,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
test('treats localhost endpoints as local', () => {
|
||||
expect(isLocalProviderUrl('http://localhost:11434/v1')).toBe(true)
|
||||
@@ -33,3 +49,37 @@ test('treats public hosts as remote', () => {
|
||||
expect(isLocalProviderUrl('https://example.com/v1')).toBe(false)
|
||||
expect(isLocalProviderUrl('http://[2001:4860:4860::8888]:11434/v1')).toBe(false)
|
||||
})
|
||||
|
||||
test('creates a cache scope for local openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:1234/v1'
|
||||
process.env.OPENAI_MODEL = 'llama-3.2-3b-instruct'
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe(
|
||||
'openai:http://localhost:1234/v1',
|
||||
)
|
||||
})
|
||||
|
||||
test('keeps codex alias models on chat completions for local openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-5.4'
|
||||
|
||||
expect(resolveProviderRequest()).toMatchObject({
|
||||
transport: 'chat_completions',
|
||||
requestedModel: 'gpt-5.4',
|
||||
resolvedModel: 'gpt-5.4',
|
||||
baseUrl: 'http://127.0.0.1:8080/v1',
|
||||
})
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe(
|
||||
'openai:http://127.0.0.1:8080/v1',
|
||||
)
|
||||
})
|
||||
|
||||
test('skips local model cache scope for remote openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBeNull()
|
||||
})
|
||||
|
||||
@@ -219,6 +219,14 @@ export function isCodexAlias(model: string): boolean {
|
||||
return base in CODEX_ALIAS_MODELS
|
||||
}
|
||||
|
||||
export function shouldUseCodexTransport(
|
||||
model: string,
|
||||
baseUrl: string | undefined,
|
||||
): boolean {
|
||||
const explicitBaseUrl = asEnvUrl(baseUrl)
|
||||
return isCodexBaseUrl(explicitBaseUrl) || (!explicitBaseUrl && isCodexAlias(model))
|
||||
}
|
||||
|
||||
export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
try {
|
||||
@@ -302,13 +310,8 @@ export function resolveProviderRequest(options?: {
|
||||
asEnvUrl(options?.baseUrl) ??
|
||||
asEnvUrl(process.env.OPENAI_BASE_URL) ??
|
||||
asEnvUrl(process.env.OPENAI_API_BASE)
|
||||
// Use Codex transport only when:
|
||||
// - the base URL is explicitly the Codex endpoint, OR
|
||||
// - the model is a Codex alias AND no custom base URL has been set
|
||||
// A custom OPENAI_BASE_URL (e.g. Azure, OpenRouter) always wins over
|
||||
// model-name-based Codex detection to prevent auth failures (#200, #203).
|
||||
const transport: ProviderTransport =
|
||||
isCodexBaseUrl(rawBaseUrl) || (!rawBaseUrl && isCodexAlias(requestedModel))
|
||||
shouldUseCodexTransport(requestedModel, rawBaseUrl)
|
||||
? 'codex_responses'
|
||||
: 'chat_completions'
|
||||
|
||||
@@ -337,6 +340,30 @@ export function resolveProviderRequest(options?: {
|
||||
}
|
||||
}
|
||||
|
||||
export function getAdditionalModelOptionsCacheScope(): string | null {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)) {
|
||||
return 'firstParty'
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const request = resolveProviderRequest()
|
||||
if (request.transport !== 'chat_completions') {
|
||||
return null
|
||||
}
|
||||
|
||||
if (!isLocalProviderUrl(request.baseUrl)) {
|
||||
return null
|
||||
}
|
||||
|
||||
return `openai:${request.baseUrl.toLowerCase()}`
|
||||
}
|
||||
|
||||
export function resolveCodexAuthPath(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
|
||||
85
src/utils/attachments.extractors.test.ts
Normal file
85
src/utils/attachments.extractors.test.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
extractAtMentionedFiles,
|
||||
extractMcpResourceMentions,
|
||||
} from './attachments.js'
|
||||
|
||||
// Contract tests for the two @-mention extractors.
|
||||
//
|
||||
// Scope: the narrow contract between `extractAtMentionedFiles` and
|
||||
// `extractMcpResourceMentions` where both are called on the same input
|
||||
// and must not both claim the same token. The motivating bug is that
|
||||
// `extractMcpResourceMentions`'s `\b` anchor lets it backtrack over the
|
||||
// closing quote of a quoted file mention, producing a ghost match for
|
||||
// `@"C:\Users\..."`. These tests pin the boundary so any regression in
|
||||
// the MCP regex is caught immediately.
|
||||
describe('extractor contract', () => {
|
||||
describe('extractMcpResourceMentions must return empty for', () => {
|
||||
const cases: Array<[string, string]> = [
|
||||
// Primary bug: the quoted form that PromptInput emits for Windows
|
||||
// paths today. `\b` backtracks past the trailing `"` and produces
|
||||
// a ghost MCP match on current HEAD.
|
||||
['a quoted Windows drive-letter path', '@"C:\\Users\\me\\file.txt"'],
|
||||
// Even if the quote layer were stripped, a bare drive letter
|
||||
// followed by a path separator is never an MCP resource.
|
||||
['an unquoted Windows drive-letter path', '@C:\\Users\\me\\file.txt'],
|
||||
// Sanity: quoted POSIX paths with no `:` at all never matched the
|
||||
// MCP regex and must keep not matching after the fix.
|
||||
['a quoted POSIX path with a space', '@"/Users/foo/my file.ts"'],
|
||||
['an unquoted POSIX path', '@/Users/foo/bar.ts'],
|
||||
// Quoted POSIX path that embeds a `:` in the filename — the quote
|
||||
// layer must shield it from MCP matching, same as the Windows case.
|
||||
['a quoted POSIX path with a colon in the name', '@"/tmp/weird:name.txt"'],
|
||||
]
|
||||
test.each(cases)('%s', (_label, input) => {
|
||||
expect(extractMcpResourceMentions(input)).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe('extractMcpResourceMentions still matches legitimate MCP mentions', () => {
|
||||
// Regression guard for the fix. If someone tightens the MCP regex
|
||||
// too aggressively, these break and the intent is clear.
|
||||
const cases: Array<[string, string, string[]]> = [
|
||||
[
|
||||
'a simple server:resource token',
|
||||
'@server:resource/path',
|
||||
['server:resource/path'],
|
||||
],
|
||||
[
|
||||
'a plugin-scoped server name with a dash',
|
||||
'@asana-plugin:project-status/123',
|
||||
['asana-plugin:project-status/123'],
|
||||
],
|
||||
[
|
||||
'an MCP mention inline in prose',
|
||||
'please check @server:res here',
|
||||
['server:res'],
|
||||
],
|
||||
]
|
||||
test.each(cases)('%s', (_label, input, expected) => {
|
||||
expect(extractMcpResourceMentions(input)).toEqual(expected)
|
||||
})
|
||||
})
|
||||
|
||||
describe('extractAtMentionedFiles extracts the file paths it should', () => {
|
||||
// Asserted separately from the MCP side: the bug is purely in the
|
||||
// MCP extractor over-matching, so these assertions are the
|
||||
// "baseline still works" half of the contract.
|
||||
const cases: Array<[string, string, string[]]> = [
|
||||
[
|
||||
'a quoted Windows drive-letter path',
|
||||
'@"C:\\Users\\me\\file.txt"',
|
||||
['C:\\Users\\me\\file.txt'],
|
||||
],
|
||||
[
|
||||
'a quoted POSIX path with a space',
|
||||
'@"/Users/foo/my file.ts"',
|
||||
['/Users/foo/my file.ts'],
|
||||
],
|
||||
['an unquoted POSIX path', '@/Users/foo/bar.ts', ['/Users/foo/bar.ts']],
|
||||
]
|
||||
test.each(cases)('%s', (_label, input, expected) => {
|
||||
expect(extractAtMentionedFiles(input)).toEqual(expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -2793,11 +2793,30 @@ export function extractAtMentionedFiles(content: string): string[] {
|
||||
export function extractMcpResourceMentions(content: string): string[] {
|
||||
// Extract MCP resources mentioned with @ symbol in format @server:uri
|
||||
// Example: "@server1:resource/path" would extract "server1:resource/path"
|
||||
const atMentionRegex = /(^|\s)@([^\s]+:[^\s]+)\b/g
|
||||
//
|
||||
// Two guards against Windows-path / quoted-file collisions (see
|
||||
// `attachments.extractors.test.ts`):
|
||||
//
|
||||
// 1. `(?!")` right after `@` drops quoted tokens entirely. The earlier
|
||||
// form (without the lookahead and with `[^\s]` character classes)
|
||||
// backtracked past the closing `"` at the `\b` anchor and produced
|
||||
// ghost matches like `"C:\Users\...\file.txt` for any quoted file
|
||||
// mention containing a colon.
|
||||
// 2. The `"` added to the character classes is belt-and-braces: even
|
||||
// if the lookahead were later removed or bypassed, the engine can
|
||||
// no longer consume a quote character mid-match.
|
||||
const atMentionRegex = /(^|\s)@(?!")([^\s"]+:[^\s"]+)\b/g
|
||||
const matches = content.match(atMentionRegex) || []
|
||||
|
||||
// Remove the prefix (everything before @) from each match
|
||||
return uniq(matches.map(match => match.slice(match.indexOf('@') + 1)))
|
||||
return uniq(
|
||||
matches
|
||||
.map(match => match.slice(match.indexOf('@') + 1))
|
||||
// Post-match filter: a single-letter "server" followed by `:\` or
|
||||
// `:/` is always a Windows drive-letter prefix, never a real MCP
|
||||
// resource. This covers the unquoted `@C:\Users\...` case that
|
||||
// the regex alone cannot disambiguate from `@server:resource`.
|
||||
.filter(m => !/^[A-Za-z]:[\\/]/.test(m)),
|
||||
)
|
||||
}
|
||||
|
||||
export function extractAgentMentions(content: string): string[] {
|
||||
|
||||
@@ -576,6 +576,7 @@ export type GlobalConfig = {
|
||||
|
||||
// Additional model options for the model picker (fetched during bootstrap).
|
||||
additionalModelOptionsCache?: ModelOption[]
|
||||
additionalModelOptionsCacheScope?: string
|
||||
|
||||
// Additional model options discovered from OpenAI-compatible endpoints.
|
||||
openaiAdditionalModelOptionsCache?: ModelOption[]
|
||||
|
||||
100
src/utils/dragDropPaths.test.ts
Normal file
100
src/utils/dragDropPaths.test.ts
Normal file
@@ -0,0 +1,100 @@
|
||||
import { afterAll, describe, expect, test } from 'bun:test'
|
||||
import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from 'fs'
|
||||
import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
import { extractDraggedFilePaths } from './dragDropPaths.js'
|
||||
|
||||
describe('extractDraggedFilePaths', () => {
|
||||
// Paths that exist on any system.
|
||||
const thisFile = import.meta.path
|
||||
const packageJson = `${process.cwd()}/package.json`
|
||||
|
||||
// Fixtures created synchronously at describe-load time (not in
|
||||
// `beforeAll`) so their paths are available to `test.each` tables,
|
||||
// which are built before any hook runs.
|
||||
const tmpDir = mkdtempSync(join(tmpdir(), 'dragdrop-test-'))
|
||||
const spacedFile = join(tmpDir, 'my file.txt')
|
||||
writeFileSync(spacedFile, 'test')
|
||||
const scopedDir = join(tmpDir, '@types')
|
||||
mkdirSync(scopedDir)
|
||||
const atSignFile = join(scopedDir, 'index.d.ts')
|
||||
writeFileSync(atSignFile, 'test')
|
||||
|
||||
afterAll(() => {
|
||||
rmSync(tmpDir, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
describe('returns an empty array', () => {
|
||||
const emptyCases: Array<[string, string]> = [
|
||||
['a non-absolute path', 'relative/path/file.ts'],
|
||||
['a plain image path', '/Users/foo/image.png'],
|
||||
['an uppercase image extension', '/Users/foo/SHOT.PNG'],
|
||||
['a double-quoted image path', '"/Users/foo/shot.png"'],
|
||||
['a single-quoted image path', "'/Users/foo/shot.jpg'"],
|
||||
['regular prose text', 'hello world this is text'],
|
||||
['a nonexistent absolute path', '/definitely/nonexistent/file.ts'],
|
||||
['a single-quoted nonexistent path', "'/definitely/nonexistent.ts'"],
|
||||
['an empty string', ''],
|
||||
['whitespace only', ' \n '],
|
||||
// Mixed-segment cases: all-or-nothing policy means a single bad
|
||||
// entry disqualifies the whole paste.
|
||||
['a mix where one path does not exist', `${thisFile}\n/nonexistent/file.ts`],
|
||||
['a mix where one segment is an image', `${thisFile}\n/Users/foo/shot.png`],
|
||||
]
|
||||
test.each(emptyCases)('for %s', (_label, input) => {
|
||||
expect(extractDraggedFilePaths(input)).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe('resolves a single path', () => {
|
||||
const singleCases: Array<[string, string, string]> = [
|
||||
['a plain absolute path', thisFile, thisFile],
|
||||
['a double-quoted path', `"${thisFile}"`, thisFile],
|
||||
['a single-quoted path', `'${thisFile}'`, thisFile],
|
||||
['a path with leading/trailing whitespace', ` ${thisFile} `, thisFile],
|
||||
// Realistic: dragging something under `node_modules/@types/...`.
|
||||
// `@` inside the path must not collide with the mention prefix
|
||||
// that the caller prepends downstream.
|
||||
['a path containing an `@` segment', atSignFile, atSignFile],
|
||||
]
|
||||
test.each(singleCases)('from %s', (_label, input, expected) => {
|
||||
expect(extractDraggedFilePaths(input)).toEqual([expected])
|
||||
})
|
||||
})
|
||||
|
||||
describe('resolves multiple paths', () => {
|
||||
const multiCases: Array<[string, string, string[]]> = [
|
||||
[
|
||||
'newline-separated',
|
||||
`${thisFile}\n${packageJson}`,
|
||||
[thisFile, packageJson],
|
||||
],
|
||||
[
|
||||
'space-separated (Finder drag)',
|
||||
`${thisFile} ${packageJson}`,
|
||||
[thisFile, packageJson],
|
||||
],
|
||||
]
|
||||
test.each(multiCases)('when input is %s', (_label, input, expected) => {
|
||||
expect(extractDraggedFilePaths(input)).toEqual(expected)
|
||||
})
|
||||
})
|
||||
|
||||
// Backslash-escaped paths are a Finder/macOS + Linux convention — on
|
||||
// Windows the shell-escape step is skipped, so these cases do not apply.
|
||||
if (process.platform !== 'win32') {
|
||||
describe('handles backslash-escaped paths', () => {
|
||||
test('returns empty for an escaped image path', () => {
|
||||
// The image check must apply after escape stripping so Finder
|
||||
// image drags still route to the image paste handler.
|
||||
expect(extractDraggedFilePaths('/Users/foo/my\\ shot.png')).toEqual([])
|
||||
})
|
||||
|
||||
test('resolves an escaped real file with a space in its name', () => {
|
||||
// Raw form matches what a terminal delivers on Finder drag.
|
||||
const escaped = spacedFile.replace(/ /g, '\\ ')
|
||||
expect(extractDraggedFilePaths(escaped)).toEqual([spacedFile])
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
55
src/utils/dragDropPaths.ts
Normal file
55
src/utils/dragDropPaths.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
import { existsSync } from 'fs'
|
||||
import { isAbsolute } from 'path'
|
||||
|
||||
// Inlined to avoid pulling the full `imagePaste.ts` module (which imports
|
||||
// `bun:bundle`) into this file's dependency graph. Must stay in sync with
|
||||
// `IMAGE_EXTENSION_REGEX` in `./imagePaste.ts`.
|
||||
const IMAGE_EXTENSION_REGEX = /\.(png|jpe?g|gif|webp)$/i
|
||||
|
||||
/**
|
||||
* Detect absolute file paths in pasted text (typically from drag-and-drop).
|
||||
* Returns the cleaned paths if ALL segments are existing non-image files,
|
||||
* or an empty array otherwise.
|
||||
*
|
||||
* Splitting logic mirrors usePasteHandler: space preceding `/` or a Windows
|
||||
* drive letter, plus newline separators.
|
||||
*/
|
||||
export function extractDraggedFilePaths(text: string): string[] {
|
||||
const segments = text
|
||||
.split(/ (?=\/|[A-Za-z]:\\)/)
|
||||
.flatMap(part => part.split('\n'))
|
||||
.map(s => s.trim())
|
||||
.filter(Boolean)
|
||||
|
||||
if (segments.length === 0) return []
|
||||
|
||||
const cleaned: string[] = []
|
||||
|
||||
for (const raw of segments) {
|
||||
// Strip outer quotes and shell-escape backslashes
|
||||
let p = raw
|
||||
if (
|
||||
(p.startsWith('"') && p.endsWith('"')) ||
|
||||
(p.startsWith("'") && p.endsWith("'"))
|
||||
) {
|
||||
p = p.slice(1, -1)
|
||||
}
|
||||
if (process.platform !== 'win32') {
|
||||
p = p.replace(/\\(.)/g, '$1')
|
||||
}
|
||||
|
||||
// Image files are handled by the upstream image paste handler.
|
||||
// Check against the cleaned path so quoted/escaped image paths like
|
||||
// `"/foo/shot.png"` or `/foo/my\ shot.png` are reliably excluded.
|
||||
if (IMAGE_EXTENSION_REGEX.test(p)) return []
|
||||
if (!isAbsolute(p)) return []
|
||||
// Verify the path actually exists on disk. Plain `fs.existsSync` is
|
||||
// used intentionally here instead of the wrapped `getFsImplementation`
|
||||
// to keep this module free of the heavy `fsOperations` dependency
|
||||
// chain — this is a pure existence check with no permission semantics.
|
||||
if (!existsSync(p)) return []
|
||||
cleaned.push(p)
|
||||
}
|
||||
|
||||
return cleaned
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import { getAPIProvider } from './model/providers.js'
|
||||
import type { BetaUsage as Usage } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
|
||||
import type {
|
||||
ContentBlock,
|
||||
@@ -1765,6 +1766,7 @@ export function stripCallerFieldFromAssistantMessage(
|
||||
id: block.id,
|
||||
name: block.name,
|
||||
input: block.input,
|
||||
...(getAPIProvider() === 'gemini' && (block as any).extra_content ? { extra_content: (block as any).extra_content } : {})
|
||||
}
|
||||
}),
|
||||
},
|
||||
@@ -2221,21 +2223,24 @@ export function normalizeMessagesForAPI(
|
||||
|
||||
// When tool search is enabled, preserve all fields including 'caller'
|
||||
if (toolSearchEnabled) {
|
||||
const { extra_content, ...restBlock } = block as any
|
||||
return {
|
||||
...block,
|
||||
...restBlock,
|
||||
name: canonicalName,
|
||||
input: normalizedInput,
|
||||
...(getAPIProvider() === 'gemini' && extra_content ? { extra_content } : {})
|
||||
}
|
||||
}
|
||||
|
||||
// When tool search is NOT enabled, explicitly construct tool_use
|
||||
// block with only standard API fields to avoid sending fields like
|
||||
// 'caller' that may be stored in sessions from tool search runs
|
||||
return {
|
||||
return {
|
||||
type: 'tool_use' as const,
|
||||
id: block.id,
|
||||
name: canonicalName,
|
||||
input: normalizedInput,
|
||||
...(getAPIProvider() === 'gemini' && (block as any).extra_content ? { extra_content: (block as any).extra_content } : {})
|
||||
}
|
||||
}
|
||||
return block
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered
|
||||
import { getInitialMainLoopModel } from '../../bootstrap/state.js'
|
||||
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js'
|
||||
import {
|
||||
isClaudeAISubscriber,
|
||||
isMaxSubscriber,
|
||||
@@ -44,6 +45,25 @@ export type ModelOption = {
|
||||
descriptionForModel?: string
|
||||
}
|
||||
|
||||
function getScopedAdditionalModelOptions(): ModelOption[] {
|
||||
const config = getGlobalConfig()
|
||||
const activeScope = getAdditionalModelOptionsCacheScope()
|
||||
|
||||
if (!activeScope) {
|
||||
return []
|
||||
}
|
||||
|
||||
if (config.additionalModelOptionsCacheScope !== undefined) {
|
||||
return config.additionalModelOptionsCacheScope === activeScope
|
||||
? (config.additionalModelOptionsCache ?? [])
|
||||
: []
|
||||
}
|
||||
|
||||
return activeScope === 'firstParty'
|
||||
? (config.additionalModelOptionsCache ?? [])
|
||||
: []
|
||||
}
|
||||
|
||||
export function getDefaultOptionForUser(fastMode = false): ModelOption {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
const currentModel = renderDefaultModelSetting(
|
||||
@@ -408,6 +428,16 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
|
||||
return standardOptions
|
||||
}
|
||||
|
||||
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
|
||||
const activeOpenAIOptions = getActiveOpenAIModelOptionsCache()
|
||||
return [
|
||||
getDefaultOptionForUser(fastMode),
|
||||
...(activeOpenAIOptions.length > 0
|
||||
? activeOpenAIOptions
|
||||
: getScopedAdditionalModelOptions()),
|
||||
]
|
||||
}
|
||||
|
||||
// PAYG 1P API: Default (Sonnet) + Sonnet 1M + Opus 4.6 + Opus 1M + Haiku
|
||||
if (getAPIProvider() === 'firstParty') {
|
||||
const payg1POptions = [getDefaultOptionForUser(fastMode)]
|
||||
@@ -566,13 +596,8 @@ export function getModelOptions(fastMode = false): ModelOption[] {
|
||||
})
|
||||
}
|
||||
|
||||
const additionalOptions =
|
||||
getAPIProvider() === 'openai'
|
||||
? getActiveOpenAIModelOptionsCache()
|
||||
: getGlobalConfig().additionalModelOptionsCache ?? []
|
||||
|
||||
// Append additional model options fetched during bootstrap/endpoints.
|
||||
for (const opt of additionalOptions) {
|
||||
// Append additional model options fetched during bootstrap
|
||||
for (const opt of getScopedAdditionalModelOptions()) {
|
||||
if (!options.some(existing => existing.value === opt.value)) {
|
||||
options.push(opt)
|
||||
}
|
||||
|
||||
@@ -23,9 +23,13 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'gpt-4.1-nano': 1_047_576,
|
||||
'gpt-4-turbo': 128_000,
|
||||
'gpt-4': 8_192,
|
||||
'o1': 200_000,
|
||||
'o1-mini': 128_000,
|
||||
'o1-preview': 128_000,
|
||||
'o1-pro': 200_000,
|
||||
'o3': 200_000,
|
||||
'o3-mini': 200_000,
|
||||
'o4-mini': 200_000,
|
||||
'o3': 200_000,
|
||||
|
||||
// DeepSeek (V3: 128k context per official docs)
|
||||
'deepseek-chat': 128_000,
|
||||
@@ -63,6 +67,9 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'phi4:14b': 16_384,
|
||||
'gemma2:27b': 8_192,
|
||||
'codellama:13b': 16_384,
|
||||
'llama3.2:1b': 128_000,
|
||||
'qwen3:8b': 128_000,
|
||||
'codestral': 32_768,
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -82,9 +89,13 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
'gpt-4.1-nano': 32_768,
|
||||
'gpt-4-turbo': 4_096,
|
||||
'gpt-4': 4_096,
|
||||
'o1': 100_000,
|
||||
'o1-mini': 65_536,
|
||||
'o1-preview': 32_768,
|
||||
'o1-pro': 100_000,
|
||||
'o3': 100_000,
|
||||
'o3-mini': 100_000,
|
||||
'o4-mini': 100_000,
|
||||
'o3': 100_000,
|
||||
|
||||
// DeepSeek
|
||||
'deepseek-chat': 8_192,
|
||||
@@ -120,6 +131,9 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
'phi4:14b': 4_096,
|
||||
'gemma2:27b': 4_096,
|
||||
'codellama:13b': 4_096,
|
||||
'llama3.2:1b': 4_096,
|
||||
'qwen3:8b': 8_192,
|
||||
'codestral': 8_192,
|
||||
}
|
||||
|
||||
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {
|
||||
|
||||
@@ -7,6 +7,9 @@ const originalEnv = {
|
||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
||||
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
@@ -16,6 +19,9 @@ afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
async function importFreshProvidersModule() {
|
||||
@@ -29,6 +35,9 @@ function clearProviderEnv(): void {
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.OPENAI_MODEL
|
||||
}
|
||||
|
||||
test('first-party provider keeps Anthropic account setup flow enabled', () => {
|
||||
@@ -69,3 +78,32 @@ test('GEMINI takes precedence over GitHub when both are set', async () => {
|
||||
|
||||
expect(getAPIProvider()).toBe('gemini')
|
||||
})
|
||||
|
||||
test('explicit local openai-compatible base URLs stay on the openai provider', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-5.4'
|
||||
|
||||
const { getAPIProvider } = await importFreshProvidersModule()
|
||||
expect(getAPIProvider()).toBe('openai')
|
||||
})
|
||||
|
||||
test('codex aliases still resolve to the codex provider without a non-codex base URL', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_MODEL = 'codexplan'
|
||||
|
||||
const { getAPIProvider } = await importFreshProvidersModule()
|
||||
expect(getAPIProvider()).toBe('codex')
|
||||
})
|
||||
|
||||
test('official OpenAI base URLs now keep provider detection on openai for aliases', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-5.4'
|
||||
|
||||
const { getAPIProvider } = await importFreshProvidersModule()
|
||||
expect(getAPIProvider()).toBe('openai')
|
||||
})
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js'
|
||||
import { isCodexAlias } from '../../services/api/providerConfig.js'
|
||||
import { shouldUseCodexTransport } from '../../services/api/providerConfig.js'
|
||||
import { isEnvTruthy } from '../envUtils.js'
|
||||
|
||||
export type APIProvider =
|
||||
@@ -34,11 +34,10 @@ export function usesAnthropicAccountFlow(): boolean {
|
||||
return getAPIProvider() === 'firstParty'
|
||||
}
|
||||
function isCodexModel(): boolean {
|
||||
const model = (process.env.OPENAI_MODEL || '').trim()
|
||||
if (!model) return false
|
||||
// Delegate to the canonical alias table in providerConfig to keep
|
||||
// the two Codex detection systems (provider type + transport) in sync.
|
||||
return isCodexAlias(model)
|
||||
return shouldUseCodexTransport(
|
||||
process.env.OPENAI_MODEL || '',
|
||||
process.env.OPENAI_BASE_URL ?? process.env.OPENAI_API_BASE,
|
||||
)
|
||||
}
|
||||
|
||||
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {
|
||||
|
||||
71
src/utils/plugins/pluginLoader.test.ts
Normal file
71
src/utils/plugins/pluginLoader.test.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import type { LoadedPlugin } from '../../types/plugin.js'
|
||||
import { mergePluginSources } from './pluginLoader.js'
|
||||
|
||||
function marketplacePlugin(
|
||||
name: string,
|
||||
marketplace: string,
|
||||
enabled: boolean,
|
||||
): LoadedPlugin {
|
||||
const pluginId = `${name}@${marketplace}`
|
||||
return {
|
||||
name,
|
||||
manifest: { name } as LoadedPlugin['manifest'],
|
||||
path: `/tmp/${pluginId}`,
|
||||
source: pluginId,
|
||||
repository: pluginId,
|
||||
enabled,
|
||||
}
|
||||
}
|
||||
|
||||
describe('mergePluginSources', () => {
|
||||
test('keeps the enabled copy when duplicate marketplace plugins disagree on enabled state', () => {
|
||||
const enabledOfficial = marketplacePlugin(
|
||||
'frontend-design',
|
||||
'claude-plugins-official',
|
||||
true,
|
||||
)
|
||||
const disabledLegacy = marketplacePlugin(
|
||||
'frontend-design',
|
||||
'claude-code-plugins',
|
||||
false,
|
||||
)
|
||||
|
||||
const result = mergePluginSources({
|
||||
session: [],
|
||||
marketplace: [disabledLegacy, enabledOfficial],
|
||||
builtin: [],
|
||||
})
|
||||
|
||||
expect(result.plugins).toEqual([enabledOfficial])
|
||||
expect(result.errors).toEqual([])
|
||||
})
|
||||
|
||||
test('keeps the later copy when duplicate marketplace plugins are both enabled', () => {
|
||||
const legacy = marketplacePlugin(
|
||||
'frontend-design',
|
||||
'claude-code-plugins',
|
||||
true,
|
||||
)
|
||||
const official = marketplacePlugin(
|
||||
'frontend-design',
|
||||
'claude-plugins-official',
|
||||
true,
|
||||
)
|
||||
|
||||
const result = mergePluginSources({
|
||||
session: [],
|
||||
marketplace: [legacy, official],
|
||||
builtin: [],
|
||||
})
|
||||
|
||||
expect(result.plugins).toEqual([official])
|
||||
expect(result.errors).toHaveLength(1)
|
||||
expect(result.errors[0]).toMatchObject({
|
||||
type: 'generic-error',
|
||||
source: legacy.source,
|
||||
plugin: legacy.name,
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -3045,24 +3045,63 @@ export function mergePluginSources(sources: {
|
||||
})
|
||||
|
||||
const sessionNames = new Set(sessionPlugins.map(p => p.name))
|
||||
const marketplacePlugins = sources.marketplace.filter(p => {
|
||||
if (sessionNames.has(p.name)) {
|
||||
// Different marketplaces can enable the same short plugin name, but
|
||||
// downstream command/skill loading scopes by plugin.name.
|
||||
const marketplacePluginsByName = new Map<string, LoadedPlugin>()
|
||||
for (const plugin of sources.marketplace) {
|
||||
if (sessionNames.has(plugin.name)) {
|
||||
logForDebugging(
|
||||
`Plugin "${p.name}" from --plugin-dir overrides installed version`,
|
||||
`Plugin "${plugin.name}" from --plugin-dir overrides installed version`,
|
||||
)
|
||||
return false
|
||||
continue
|
||||
}
|
||||
return true
|
||||
})
|
||||
const existing = marketplacePluginsByName.get(plugin.name)
|
||||
if (!existing) {
|
||||
marketplacePluginsByName.set(plugin.name, plugin)
|
||||
continue
|
||||
}
|
||||
|
||||
const winner = selectMarketplacePlugin(existing, plugin)
|
||||
const dropped = winner === existing ? plugin : existing
|
||||
marketplacePluginsByName.set(plugin.name, winner)
|
||||
|
||||
logForDebugging(
|
||||
`Ignoring duplicate marketplace plugin "${plugin.name}" from ${dropped.source}; using ${winner.source}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
if (existing.enabled && plugin.enabled) {
|
||||
errors.push({
|
||||
type: 'generic-error',
|
||||
source: dropped.source,
|
||||
plugin: plugin.name,
|
||||
error: `Duplicate marketplace plugin "${plugin.name}" ignored: using "${winner.source}" and skipping "${dropped.source}" to avoid short-name collisions`,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Session first, then non-overridden marketplace, then builtin.
|
||||
// Downstream first-match consumers see session plugins before
|
||||
// installed ones for any that slipped past the name filter.
|
||||
return {
|
||||
plugins: [...sessionPlugins, ...marketplacePlugins, ...sources.builtin],
|
||||
plugins: [
|
||||
...sessionPlugins,
|
||||
...marketplacePluginsByName.values(),
|
||||
...sources.builtin,
|
||||
],
|
||||
errors,
|
||||
}
|
||||
}
|
||||
|
||||
function selectMarketplacePlugin(
|
||||
current: LoadedPlugin,
|
||||
candidate: LoadedPlugin,
|
||||
): LoadedPlugin {
|
||||
if (current.enabled !== candidate.enabled) {
|
||||
return candidate.enabled ? candidate : current
|
||||
}
|
||||
|
||||
return candidate
|
||||
}
|
||||
|
||||
/**
|
||||
* Main plugin loading function that discovers and loads all plugins.
|
||||
*
|
||||
|
||||
78
src/utils/providerDiscovery.test.ts
Normal file
78
src/utils/providerDiscovery.test.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
listOpenAICompatibleModels,
|
||||
} from './providerDiscovery.js'
|
||||
|
||||
const originalFetch = globalThis.fetch
|
||||
const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.fetch = originalFetch
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
})
|
||||
|
||||
test('lists models from a local openai-compatible /models endpoint', async () => {
|
||||
globalThis.fetch = mock((input, init) => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
expect(url).toBe('http://localhost:1234/v1/models')
|
||||
expect(init?.headers).toEqual({ Authorization: 'Bearer local-key' })
|
||||
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
data: [
|
||||
{ id: 'qwen2.5-coder-7b-instruct' },
|
||||
{ id: 'llama-3.2-3b-instruct' },
|
||||
{ id: 'qwen2.5-coder-7b-instruct' },
|
||||
],
|
||||
}),
|
||||
{ status: 200 },
|
||||
),
|
||||
)
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
listOpenAICompatibleModels({
|
||||
baseUrl: 'http://localhost:1234/v1',
|
||||
apiKey: 'local-key',
|
||||
}),
|
||||
).resolves.toEqual([
|
||||
'qwen2.5-coder-7b-instruct',
|
||||
'llama-3.2-3b-instruct',
|
||||
])
|
||||
})
|
||||
|
||||
test('returns null when a local openai-compatible /models request fails', async () => {
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(new Response('not available', { status: 503 })),
|
||||
) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
listOpenAICompatibleModels({ baseUrl: 'http://localhost:1234/v1' }),
|
||||
).resolves.toBeNull()
|
||||
})
|
||||
|
||||
test('detects LM Studio from the default localhost port', () => {
|
||||
expect(getLocalOpenAICompatibleProviderLabel('http://localhost:1234/v1')).toBe(
|
||||
'LM Studio',
|
||||
)
|
||||
})
|
||||
|
||||
test('detects common local openai-compatible providers by hostname', () => {
|
||||
expect(
|
||||
getLocalOpenAICompatibleProviderLabel('http://localai.local:8080/v1'),
|
||||
).toBe('LocalAI')
|
||||
expect(
|
||||
getLocalOpenAICompatibleProviderLabel('http://vllm.local:8000/v1'),
|
||||
).toBe('vLLM')
|
||||
})
|
||||
|
||||
test('falls back to a generic local openai-compatible label', () => {
|
||||
expect(
|
||||
getLocalOpenAICompatibleProviderLabel('http://127.0.0.1:8080/v1'),
|
||||
).toBe('Local OpenAI-compatible')
|
||||
})
|
||||
@@ -1,4 +1,5 @@
|
||||
import type { OllamaModelDescriptor } from './providerRecommendation.ts'
|
||||
import { DEFAULT_OPENAI_BASE_URL } from '../services/api/providerConfig.js'
|
||||
|
||||
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
|
||||
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
|
||||
@@ -53,6 +54,64 @@ export function getAtomicChatChatBaseUrl(baseUrl?: string): string {
|
||||
return `${getAtomicChatApiBaseUrl(baseUrl)}/v1`
|
||||
}
|
||||
|
||||
export function getOpenAICompatibleModelsBaseUrl(baseUrl?: string): string {
|
||||
return (
|
||||
baseUrl || process.env.OPENAI_BASE_URL || DEFAULT_OPENAI_BASE_URL
|
||||
).replace(/\/+$/, '')
|
||||
}
|
||||
|
||||
export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string {
|
||||
try {
|
||||
const parsed = new URL(getOpenAICompatibleModelsBaseUrl(baseUrl))
|
||||
const host = parsed.host.toLowerCase()
|
||||
const hostname = parsed.hostname.toLowerCase()
|
||||
const path = parsed.pathname.toLowerCase()
|
||||
const haystack = `${hostname} ${path}`
|
||||
|
||||
if (
|
||||
host.endsWith(':1234') ||
|
||||
haystack.includes('lmstudio') ||
|
||||
haystack.includes('lm-studio')
|
||||
) {
|
||||
return 'LM Studio'
|
||||
}
|
||||
if (host.endsWith(':11434') || haystack.includes('ollama')) {
|
||||
return 'Ollama'
|
||||
}
|
||||
if (haystack.includes('localai')) {
|
||||
return 'LocalAI'
|
||||
}
|
||||
if (haystack.includes('jan')) {
|
||||
return 'Jan'
|
||||
}
|
||||
if (haystack.includes('kobold')) {
|
||||
return 'KoboldCpp'
|
||||
}
|
||||
if (haystack.includes('llama.cpp') || haystack.includes('llamacpp')) {
|
||||
return 'llama.cpp'
|
||||
}
|
||||
if (haystack.includes('vllm')) {
|
||||
return 'vLLM'
|
||||
}
|
||||
if (
|
||||
haystack.includes('open-webui') ||
|
||||
haystack.includes('openwebui')
|
||||
) {
|
||||
return 'Open WebUI'
|
||||
}
|
||||
if (
|
||||
haystack.includes('text-generation-webui') ||
|
||||
haystack.includes('oobabooga')
|
||||
) {
|
||||
return 'text-generation-webui'
|
||||
}
|
||||
} catch {
|
||||
// Fall back to the generic label when the base URL is malformed.
|
||||
}
|
||||
|
||||
return 'Local OpenAI-compatible'
|
||||
}
|
||||
|
||||
export async function hasLocalOllama(baseUrl?: string): Promise<boolean> {
|
||||
const { signal, clear } = withTimeoutSignal(1200)
|
||||
try {
|
||||
@@ -111,6 +170,46 @@ export async function listOllamaModels(
|
||||
}
|
||||
}
|
||||
|
||||
export async function listOpenAICompatibleModels(options?: {
|
||||
baseUrl?: string
|
||||
apiKey?: string
|
||||
}): Promise<string[] | null> {
|
||||
const { signal, clear } = withTimeoutSignal(5000)
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${getOpenAICompatibleModelsBaseUrl(options?.baseUrl)}/models`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: options?.apiKey
|
||||
? {
|
||||
Authorization: `Bearer ${options.apiKey}`,
|
||||
}
|
||||
: undefined,
|
||||
signal,
|
||||
},
|
||||
)
|
||||
if (!response.ok) {
|
||||
return null
|
||||
}
|
||||
|
||||
const data = (await response.json()) as {
|
||||
data?: Array<{ id?: string }>
|
||||
}
|
||||
|
||||
return Array.from(
|
||||
new Set(
|
||||
(data.data ?? [])
|
||||
.filter(model => Boolean(model.id))
|
||||
.map(model => model.id!),
|
||||
),
|
||||
)
|
||||
} catch {
|
||||
return null
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> {
|
||||
const { signal, clear } = withTimeoutSignal(1200)
|
||||
try {
|
||||
|
||||
Reference in New Issue
Block a user