Compare commits

..

1 Commits

Author SHA1 Message Date
gnanam1990
cce4b5afa4 fix(ripgrep): use @vscode/ripgrep package as the builtin source (#911)
The vendored-binary lookup at vendor/ripgrep/<arch>-<platform>/rg never
resolved in this fork — that directory does not ship — so users without
a system rg had no working fallback. Switch to the @vscode/ripgrep
package so Microsoft maintains the platform/arch matrix and the binary
is delivered via npm.

- src/utils/ripgrep.ts: replace hand-rolled vendor-path resolution with
  rgPath from @vscode/ripgrep. Lazy require so a missing package falls
  through to the system rg branch instead of throwing at import.
  Drop builtinExists from the config args; builtinCommand is now a
  string-or-null. The system override (USE_BUILTIN_RIPGREP=0), the
  Bun-compiled standalone embedded mode, the macOS codesign hook, and
  all retry/timeout/error logic are preserved untouched.
- scripts/build.ts: mark @vscode/ripgrep as external. The package
  resolves rgPath via __dirname at runtime, so bundling would freeze
  the build host's absolute path into dist/cli.mjs.
- src/utils/ripgrep.test.ts: update for the new config shape and add
  tests covering USE_BUILTIN_RIPGREP=0, embedded mode, last-resort
  fallback, and null builtin path.

Tested locally on Linux (Bun 1.3.13). macOS (codesign hook) and
Windows (rg.exe extension) need contributor verification.
2026-04-28 09:21:07 +05:30
10 changed files with 115 additions and 159 deletions

View File

@@ -28,6 +28,7 @@
"@opentelemetry/sdk-trace-base": "2.6.1", "@opentelemetry/sdk-trace-base": "2.6.1",
"@opentelemetry/sdk-trace-node": "2.6.1", "@opentelemetry/sdk-trace-node": "2.6.1",
"@opentelemetry/semantic-conventions": "1.40.0", "@opentelemetry/semantic-conventions": "1.40.0",
"@vscode/ripgrep": "^1.17.1",
"ajv": "8.18.0", "ajv": "8.18.0",
"auto-bind": "5.0.1", "auto-bind": "5.0.1",
"axios": "1.15.0", "axios": "1.15.0",
@@ -461,6 +462,8 @@
"@types/react": ["@types/react@19.2.14", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w=="], "@types/react": ["@types/react@19.2.14", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w=="],
"@vscode/ripgrep": ["@vscode/ripgrep@1.17.1", "", { "dependencies": { "https-proxy-agent": "^7.0.2", "proxy-from-env": "^1.1.0", "yauzl": "^2.9.2" } }, "sha512-xTs7DGyAO3IsJYOCTBP8LnTvPiYVKEuyv8s0xyJDBXfs8rhBfqnZPvb6xDT+RnwWzcXqW27xLS/aGrkjX7lNWw=="],
"accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="], "accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="],
"agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], "agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="],
@@ -491,6 +494,8 @@
"bowser": ["bowser@2.14.1", "", {}, "sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg=="], "bowser": ["bowser@2.14.1", "", {}, "sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg=="],
"buffer-crc32": ["buffer-crc32@0.2.13", "", {}, "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ=="],
"buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="], "buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="],
"bun-types": ["bun-types@1.3.11", "", { "dependencies": { "@types/node": "*" } }, "sha512-1KGPpoxQWl9f6wcZh57LvrPIInQMn2TQ7jsgxqpRzg+l0QPOFvJVH7HmvHo/AiPgwXy+/Thf6Ov3EdVn1vOabg=="], "bun-types": ["bun-types@1.3.11", "", { "dependencies": { "@types/node": "*" } }, "sha512-1KGPpoxQWl9f6wcZh57LvrPIInQMn2TQ7jsgxqpRzg+l0QPOFvJVH7HmvHo/AiPgwXy+/Thf6Ov3EdVn1vOabg=="],
@@ -609,6 +614,8 @@
"fast-xml-parser": ["fast-xml-parser@5.5.8", "", { "dependencies": { "fast-xml-builder": "^1.1.4", "path-expression-matcher": "^1.2.0", "strnum": "^2.2.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-Z7Fh2nVQSb2d+poDViM063ix2ZGt9jmY1nWhPfHBOK2Hgnb/OW3P4Et3P/81SEej0J7QbWtJqxO05h8QYfK7LQ=="], "fast-xml-parser": ["fast-xml-parser@5.5.8", "", { "dependencies": { "fast-xml-builder": "^1.1.4", "path-expression-matcher": "^1.2.0", "strnum": "^2.2.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-Z7Fh2nVQSb2d+poDViM063ix2ZGt9jmY1nWhPfHBOK2Hgnb/OW3P4Et3P/81SEej0J7QbWtJqxO05h8QYfK7LQ=="],
"fd-slicer": ["fd-slicer@1.1.0", "", { "dependencies": { "pend": "~1.2.0" } }, "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g=="],
"fflate": ["fflate@0.8.2", "", {}, "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A=="], "fflate": ["fflate@0.8.2", "", {}, "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A=="],
"figures": ["figures@6.1.0", "", { "dependencies": { "is-unicode-supported": "^2.0.0" } }, "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg=="], "figures": ["figures@6.1.0", "", { "dependencies": { "is-unicode-supported": "^2.0.0" } }, "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg=="],
@@ -787,6 +794,8 @@
"path-to-regexp": ["path-to-regexp@8.4.1", "", {}, "sha512-fvU78fIjZ+SBM9YwCknCvKOUKkLVqtWDVctl0s7xIqfmfb38t2TT4ZU2gHm+Z8xGwgW+QWEU3oQSAzIbo89Ggw=="], "path-to-regexp": ["path-to-regexp@8.4.1", "", {}, "sha512-fvU78fIjZ+SBM9YwCknCvKOUKkLVqtWDVctl0s7xIqfmfb38t2TT4ZU2gHm+Z8xGwgW+QWEU3oQSAzIbo89Ggw=="],
"pend": ["pend@1.2.0", "", {}, "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg=="],
"picomatch": ["picomatch@4.0.4", "", {}, "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A=="], "picomatch": ["picomatch@4.0.4", "", {}, "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A=="],
"pkce-challenge": ["pkce-challenge@5.0.1", "", {}, "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ=="], "pkce-challenge": ["pkce-challenge@5.0.1", "", {}, "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ=="],
@@ -801,7 +810,7 @@
"proxy-addr": ["proxy-addr@2.0.7", "", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="], "proxy-addr": ["proxy-addr@2.0.7", "", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="],
"proxy-from-env": ["proxy-from-env@2.1.0", "", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="], "proxy-from-env": ["proxy-from-env@1.1.0", "", {}, "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="],
"qrcode": ["qrcode@1.5.4", "", { "dependencies": { "dijkstrajs": "^1.0.1", "pngjs": "^5.0.0", "yargs": "^15.3.1" }, "bin": { "qrcode": "bin/qrcode" } }, "sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg=="], "qrcode": ["qrcode@1.5.4", "", { "dependencies": { "dijkstrajs": "^1.0.1", "pngjs": "^5.0.0", "yargs": "^15.3.1" }, "bin": { "qrcode": "bin/qrcode" } }, "sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg=="],
@@ -953,6 +962,8 @@
"yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="], "yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
"yauzl": ["yauzl@2.10.0", "", { "dependencies": { "buffer-crc32": "~0.2.3", "fd-slicer": "~1.1.0" } }, "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g=="],
"yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="], "yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="],
"zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], "zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="],
@@ -1369,6 +1380,8 @@
"@smithy/uuid/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], "@smithy/uuid/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
"axios/proxy-from-env": ["proxy-from-env@2.1.0", "", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="],
"cli-highlight/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "cli-highlight/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
"cli-highlight/yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="], "cli-highlight/yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
@@ -1429,6 +1442,8 @@
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from": ["@smithy/util-buffer-from@4.2.2", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q=="], "@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from": ["@smithy/util-buffer-from@4.2.2", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q=="],
"@mendable/firecrawl-js/axios/proxy-from-env": ["proxy-from-env@2.1.0", "", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="],
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="], "@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-transformer/@opentelemetry/api-logs": ["@opentelemetry/api-logs@0.57.2", "", { "dependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A=="], "@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-transformer/@opentelemetry/api-logs": ["@opentelemetry/api-logs@0.57.2", "", { "dependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A=="],
@@ -1509,6 +1524,8 @@
"cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
"firecrawl/axios/proxy-from-env": ["proxy-from-env@2.1.0", "", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="],
"form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], "form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="],
"qrcode/yargs/cliui": ["cliui@6.0.0", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^6.2.0" } }, "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ=="], "qrcode/yargs/cliui": ["cliui@6.0.0", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^6.2.0" } }, "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ=="],

View File

@@ -74,6 +74,7 @@
"@opentelemetry/sdk-trace-base": "2.6.1", "@opentelemetry/sdk-trace-base": "2.6.1",
"@opentelemetry/sdk-trace-node": "2.6.1", "@opentelemetry/sdk-trace-node": "2.6.1",
"@opentelemetry/semantic-conventions": "1.40.0", "@opentelemetry/semantic-conventions": "1.40.0",
"@vscode/ripgrep": "^1.17.1",
"ajv": "8.18.0", "ajv": "8.18.0",
"auto-bind": "5.0.1", "auto-bind": "5.0.1",
"axios": "1.15.0", "axios": "1.15.0",

View File

@@ -472,6 +472,11 @@ ${exports}
'@aws-sdk/credential-providers', '@aws-sdk/credential-providers',
'@azure/identity', '@azure/identity',
'google-auth-library', 'google-auth-library',
// @vscode/ripgrep ships a platform-specific binary alongside its
// index.js and resolves the path via __dirname at runtime. Bundling
// would freeze the build host's absolute path into dist/cli.mjs, so we
// keep it external and rely on the npm package being installed.
'@vscode/ripgrep',
], ],
}) })

View File

@@ -28,38 +28,6 @@ test('maps endpoint_not_found category markers to actionable setup guidance', ()
expect(text).toContain('/v1') expect(text).toContain('/v1')
}) })
test('endpoint_not_found from a remote host shows the actual host, not Ollama (issue #926)', () => {
const error = APIError.generate(
404,
undefined,
'OpenAI API error 404: Not Found [openai_category=endpoint_not_found,host=integrate.api.nvidia.com] Hint: Endpoint at integrate.api.nvidia.com returned 404.',
new Headers(),
)
const message = getAssistantMessageFromError(error, 'moonshotai/kimi-k2.5-thinking')
const text = getFirstText(message)
expect(text).toContain('integrate.api.nvidia.com')
expect(text).toContain('moonshotai/kimi-k2.5-thinking')
expect(text).not.toContain('Ollama')
expect(text).not.toContain('11434')
})
test('endpoint_not_found without a host falls back to the Ollama-aware message', () => {
const error = APIError.generate(
404,
undefined,
'OpenAI API error 404: Not Found [openai_category=endpoint_not_found] Hint: Confirm OPENAI_BASE_URL includes /v1.',
new Headers(),
)
const message = getAssistantMessageFromError(error, 'qwen2.5-coder:7b')
const text = getFirstText(message)
expect(text).toContain('Provider endpoint was not found')
expect(text).toContain('Ollama')
})
test('maps tool_call_incompatible category markers to model/tool guidance', () => { test('maps tool_call_incompatible category markers to model/tool guidance', () => {
const error = APIError.generate( const error = APIError.generate(
400, 400,

View File

@@ -51,9 +51,7 @@ import {
import { shouldProcessRateLimits } from '../rateLimitMocking.js' // Used for /mock-limits command import { shouldProcessRateLimits } from '../rateLimitMocking.js' // Used for /mock-limits command
import { extractConnectionErrorDetails, formatAPIError } from './errorUtils.js' import { extractConnectionErrorDetails, formatAPIError } from './errorUtils.js'
import { import {
extractOpenAICategoryHost,
extractOpenAICategoryMarker, extractOpenAICategoryMarker,
isLocalhostLikeHost,
type OpenAICompatibilityFailureCategory, type OpenAICompatibilityFailureCategory,
} from './openaiErrorClassification.js' } from './openaiErrorClassification.js'
@@ -70,29 +68,25 @@ function mapOpenAICompatibilityFailureToAssistantMessage(options: {
category: OpenAICompatibilityFailureCategory category: OpenAICompatibilityFailureCategory
model: string model: string
rawMessage: string rawMessage: string
host?: string
}): AssistantMessage { }): AssistantMessage {
const switchCmd = getIsNonInteractiveSession() ? '--model' : '/model' const switchCmd = getIsNonInteractiveSession() ? '--model' : '/model'
const compactHint = getIsNonInteractiveSession() const compactHint = getIsNonInteractiveSession()
? 'Reduce prompt size or start a new session.' ? 'Reduce prompt size or start a new session.'
: 'Run /compact or start a new session with /new.' : 'Run /compact or start a new session with /new.'
const isLocalhost = options.host === undefined || isLocalhostLikeHost(options.host)
switch (options.category) { switch (options.category) {
case 'localhost_resolution_failed': case 'localhost_resolution_failed':
case 'connection_refused': case 'connection_refused':
return createAssistantAPIErrorMessage({ return createAssistantAPIErrorMessage({
content: isLocalhost content:
? 'Could not connect to the local OpenAI-compatible provider. Ensure the local server is running, then use OPENAI_BASE_URL=http://127.0.0.1:11434/v1 for Ollama.' 'Could not connect to the local OpenAI-compatible provider. Ensure the local server is running, then use OPENAI_BASE_URL=http://127.0.0.1:11434/v1 for Ollama.',
: `Could not connect to the provider at ${options.host}. Verify OPENAI_BASE_URL is correct and that the host is reachable.`,
error: 'unknown', error: 'unknown',
}) })
case 'endpoint_not_found': case 'endpoint_not_found':
return createAssistantAPIErrorMessage({ return createAssistantAPIErrorMessage({
content: isLocalhost content:
? 'Provider endpoint was not found. Confirm OPENAI_BASE_URL targets an OpenAI-compatible /v1 endpoint (for Ollama: http://127.0.0.1:11434/v1).' 'Provider endpoint was not found. Confirm OPENAI_BASE_URL targets an OpenAI-compatible /v1 endpoint (for Ollama: http://127.0.0.1:11434/v1).',
: `Provider endpoint at ${options.host} returned 404. Verify OPENAI_BASE_URL is correct and that the selected model (${options.model}) is supported by this provider.`,
error: 'invalid_request', error: 'invalid_request',
}) })
@@ -573,7 +567,6 @@ export function getAssistantMessageFromError(
category: openaiCategory, category: openaiCategory,
model, model,
rawMessage: error.message, rawMessage: error.message,
host: extractOpenAICategoryHost(error.message),
}) })
} }
} }

View File

@@ -4,10 +4,8 @@ import {
buildOpenAICompatibilityErrorMessage, buildOpenAICompatibilityErrorMessage,
classifyOpenAIHttpFailure, classifyOpenAIHttpFailure,
classifyOpenAINetworkFailure, classifyOpenAINetworkFailure,
extractOpenAICategoryHost,
extractOpenAICategoryMarker, extractOpenAICategoryMarker,
formatOpenAICategoryMarker, formatOpenAICategoryMarker,
isLocalhostLikeHost,
} from './openaiErrorClassification.js' } from './openaiErrorClassification.js'
test('classifies localhost ECONNREFUSED as connection_refused', () => { test('classifies localhost ECONNREFUSED as connection_refused', () => {
@@ -97,58 +95,3 @@ test('ignores unknown category markers during extraction', () => {
const malformed = 'OpenAI API error 500 [openai_category=totally_fake_category]' const malformed = 'OpenAI API error 500 [openai_category=totally_fake_category]'
expect(extractOpenAICategoryMarker(malformed)).toBeUndefined() expect(extractOpenAICategoryMarker(malformed)).toBeUndefined()
}) })
test('endpoint_not_found 404 from a remote host gets a host-aware hint (issue #926)', () => {
const failure = classifyOpenAIHttpFailure({
status: 404,
body: 'Not Found',
url: 'https://integrate.api.nvidia.com/v1/chat/completions',
})
expect(failure.category).toBe('endpoint_not_found')
expect(failure.requestUrl).toBe('https://integrate.api.nvidia.com/v1/chat/completions')
expect(failure.hint).toContain('integrate.api.nvidia.com')
expect(failure.hint).not.toContain('local providers')
})
test('endpoint_not_found 404 from localhost keeps the Ollama-flavored hint', () => {
const failure = classifyOpenAIHttpFailure({
status: 404,
body: 'Not Found',
url: 'http://127.0.0.1:11434/v1/chat/completions',
})
expect(failure.category).toBe('endpoint_not_found')
expect(failure.hint).toContain('local providers')
})
test('marker round-trip preserves host segment', () => {
const formatted = buildOpenAICompatibilityErrorMessage(
'OpenAI API error 404: Not Found',
{
category: 'endpoint_not_found',
hint: 'Endpoint at integrate.api.nvidia.com returned 404.',
requestUrl: 'https://integrate.api.nvidia.com/v1/chat/completions',
},
)
expect(formatted).toContain('[openai_category=endpoint_not_found,host=integrate.api.nvidia.com]')
expect(extractOpenAICategoryMarker(formatted)).toBe('endpoint_not_found')
expect(extractOpenAICategoryHost(formatted)).toBe('integrate.api.nvidia.com')
})
test('marker without host stays backward-compatible', () => {
const marker = formatOpenAICategoryMarker('endpoint_not_found')
expect(marker).toBe('[openai_category=endpoint_not_found]')
expect(extractOpenAICategoryMarker(marker)).toBe('endpoint_not_found')
expect(extractOpenAICategoryHost(marker)).toBeUndefined()
})
test('isLocalhostLikeHost matches loopback variants', () => {
expect(isLocalhostLikeHost('localhost')).toBe(true)
expect(isLocalhostLikeHost('127.0.0.1')).toBe(true)
expect(isLocalhostLikeHost('127.0.0.5')).toBe(true)
expect(isLocalhostLikeHost('::1')).toBe(true)
expect(isLocalhostLikeHost('integrate.api.nvidia.com')).toBe(false)
expect(isLocalhostLikeHost(undefined)).toBe(false)
})

View File

@@ -21,7 +21,6 @@ export type OpenAICompatibilityFailure = {
hint?: string hint?: string
code?: string code?: string
status?: number status?: number
requestUrl?: string
} }
const OPENAI_CATEGORY_MARKER_PREFIX = '[openai_category=' const OPENAI_CATEGORY_MARKER_PREFIX = '[openai_category='
@@ -97,11 +96,6 @@ function isLocalhostLikeHostname(hostname: string | null): boolean {
return /^127\./.test(hostname) return /^127\./.test(hostname)
} }
export function isLocalhostLikeHost(host: string | null | undefined): boolean {
if (!host) return false
return isLocalhostLikeHostname(host.toLowerCase())
}
function isContextOverflowMessage(body: string): boolean { function isContextOverflowMessage(body: string): boolean {
const lower = body.toLowerCase() const lower = body.toLowerCase()
return ( return (
@@ -155,18 +149,14 @@ function isModelNotFoundMessage(body: string): boolean {
export function formatOpenAICategoryMarker( export function formatOpenAICategoryMarker(
category: OpenAICompatibilityFailureCategory, category: OpenAICompatibilityFailureCategory,
host?: string,
): string { ): string {
if (host && /^[A-Za-z0-9.\-:]+$/.test(host)) {
return `${OPENAI_CATEGORY_MARKER_PREFIX}${category},host=${host}]`
}
return `${OPENAI_CATEGORY_MARKER_PREFIX}${category}]` return `${OPENAI_CATEGORY_MARKER_PREFIX}${category}]`
} }
export function extractOpenAICategoryMarker( export function extractOpenAICategoryMarker(
message: string, message: string,
): OpenAICompatibilityFailureCategory | undefined { ): OpenAICompatibilityFailureCategory | undefined {
const match = message.match(/\[openai_category=([a-z_]+)(?:,host=[^\]]+)?]/) const match = message.match(/\[openai_category=([a-z_]+)]/)
const category = match?.[1] const category = match?.[1]
if (!category || !isOpenAICompatibilityFailureCategory(category)) { if (!category || !isOpenAICompatibilityFailureCategory(category)) {
@@ -176,17 +166,11 @@ export function extractOpenAICategoryMarker(
return category return category
} }
export function extractOpenAICategoryHost(message: string): string | undefined {
const match = message.match(/\[openai_category=[a-z_]+,host=([A-Za-z0-9.\-:]+)]/)
return match?.[1]
}
export function buildOpenAICompatibilityErrorMessage( export function buildOpenAICompatibilityErrorMessage(
baseMessage: string, baseMessage: string,
failure: Pick<OpenAICompatibilityFailure, 'category' | 'hint' | 'requestUrl'>, failure: Pick<OpenAICompatibilityFailure, 'category' | 'hint'>,
): string { ): string {
const host = failure.requestUrl ? getHostname(failure.requestUrl) ?? undefined : undefined const marker = formatOpenAICategoryMarker(failure.category)
const marker = formatOpenAICategoryMarker(failure.category, host)
const hint = failure.hint ? ` Hint: ${failure.hint}` : '' const hint = failure.hint ? ` Hint: ${failure.hint}` : ''
return `${baseMessage} ${marker}${hint}` return `${baseMessage} ${marker}${hint}`
} }
@@ -263,11 +247,8 @@ export function classifyOpenAINetworkFailure(
export function classifyOpenAIHttpFailure(options: { export function classifyOpenAIHttpFailure(options: {
status: number status: number
body: string body: string
url?: string
}): OpenAICompatibilityFailure { }): OpenAICompatibilityFailure {
const body = options.body ?? '' const body = options.body ?? ''
const hostname = options.url ? getHostname(options.url) : null
const isLocalHost = isLocalhostLikeHostname(hostname)
if (options.status === 401 || options.status === 403) { if (options.status === 401 || options.status === 403) {
return { return {
@@ -303,17 +284,13 @@ export function classifyOpenAIHttpFailure(options: {
} }
if (options.status === 404) { if (options.status === 404) {
const isRemote = hostname !== null && !isLocalHost
return { return {
source: 'http', source: 'http',
category: 'endpoint_not_found', category: 'endpoint_not_found',
retryable: false, retryable: false,
status: options.status, status: options.status,
message: body, message: body,
requestUrl: options.url, hint: 'Endpoint was not found. Confirm OPENAI_BASE_URL includes /v1 for OpenAI-compatible local providers.',
hint: isRemote
? `Endpoint at ${hostname} returned 404. Verify OPENAI_BASE_URL is correct and the requested model is supported by this provider.`
: 'Endpoint was not found. Confirm OPENAI_BASE_URL includes /v1 for OpenAI-compatible local providers.',
} }
} }

View File

@@ -1935,9 +1935,7 @@ class OpenAIShimMessages {
classifyOpenAIHttpFailure({ classifyOpenAIHttpFailure({
status, status,
body: errorBody, body: errorBody,
url: requestUrl,
}) })
const failureWithUrl = { ...failure, requestUrl: failure.requestUrl ?? requestUrl }
const redactedUrl = redactUrlForDiagnostics(requestUrl) const redactedUrl = redactUrlForDiagnostics(requestUrl)
logForDebugging( logForDebugging(
@@ -1950,7 +1948,7 @@ class OpenAIShimMessages {
parsedBody, parsedBody,
buildOpenAICompatibilityErrorMessage( buildOpenAICompatibilityErrorMessage(
`OpenAI API error ${status}: ${errorBody}${rateHint}`, `OpenAI API error ${status}: ${errorBody}${rateHint}`,
failureWithUrl, failure,
), ),
responseHeaders, responseHeaders,
) )

View File

@@ -5,16 +5,15 @@ import { resolveRipgrepConfig, wrapRipgrepUnavailableError } from './ripgrep.js'
const MOCK_BUILTIN_PATH = path.normalize( const MOCK_BUILTIN_PATH = path.normalize(
process.platform === 'win32' process.platform === 'win32'
? `vendor/ripgrep/${process.arch}-win32/rg.exe` ? `node_modules/@vscode/ripgrep/bin/rg.exe`
: `vendor/ripgrep/${process.arch}-${process.platform}/rg`, : `node_modules/@vscode/ripgrep/bin/rg`,
) )
test('ripgrepCommand falls back to system rg when builtin binary is missing', () => { test('falls back to system rg when @vscode/ripgrep cannot be resolved', () => {
const config = resolveRipgrepConfig({ const config = resolveRipgrepConfig({
userWantsSystemRipgrep: false, userWantsSystemRipgrep: false,
bundledMode: false, bundledMode: false,
builtinCommand: MOCK_BUILTIN_PATH, builtinCommand: null,
builtinExists: false,
systemExecutablePath: '/usr/bin/rg', systemExecutablePath: '/usr/bin/rg',
processExecPath: '/fake/bun', processExecPath: '/fake/bun',
}) })
@@ -26,12 +25,11 @@ test('ripgrepCommand falls back to system rg when builtin binary is missing', ()
}) })
}) })
test('ripgrepCommand keeps builtin mode when bundled binary exists', () => { test('uses builtin @vscode/ripgrep path when the package resolves', () => {
const config = resolveRipgrepConfig({ const config = resolveRipgrepConfig({
userWantsSystemRipgrep: false, userWantsSystemRipgrep: false,
bundledMode: false, bundledMode: false,
builtinCommand: MOCK_BUILTIN_PATH, builtinCommand: MOCK_BUILTIN_PATH,
builtinExists: true,
systemExecutablePath: '/usr/bin/rg', systemExecutablePath: '/usr/bin/rg',
processExecPath: '/fake/bun', processExecPath: '/fake/bun',
}) })
@@ -43,10 +41,59 @@ test('ripgrepCommand keeps builtin mode when bundled binary exists', () => {
}) })
}) })
test('honors USE_BUILTIN_RIPGREP=0 by selecting system rg even when builtin is available', () => {
const config = resolveRipgrepConfig({
userWantsSystemRipgrep: true,
bundledMode: false,
builtinCommand: MOCK_BUILTIN_PATH,
systemExecutablePath: '/usr/bin/rg',
processExecPath: '/fake/bun',
})
expect(config).toMatchObject({
mode: 'system',
command: 'rg',
args: [],
})
})
test('keeps embedded mode for Bun-compiled standalone executables', () => {
const config = resolveRipgrepConfig({
userWantsSystemRipgrep: false,
bundledMode: true,
builtinCommand: null,
systemExecutablePath: '/usr/bin/rg',
processExecPath: '/opt/openclaude/bin/openclaude',
})
expect(config).toMatchObject({
mode: 'embedded',
command: '/opt/openclaude/bin/openclaude',
args: ['--no-config'],
argv0: 'rg',
})
})
test('falls through to system rg as a last resort even when not on PATH', () => {
const config = resolveRipgrepConfig({
userWantsSystemRipgrep: false,
bundledMode: false,
builtinCommand: null,
systemExecutablePath: 'rg',
processExecPath: '/fake/bun',
})
expect(config).toMatchObject({
mode: 'system',
command: 'rg',
args: [],
})
})
test('wrapRipgrepUnavailableError explains missing packaged fallback', () => { test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
const error = wrapRipgrepUnavailableError( const error = wrapRipgrepUnavailableError(
{ code: 'ENOENT', message: 'spawn rg ENOENT' }, { code: 'ENOENT', message: 'spawn rg ENOENT' },
{ mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe', args: [] }, { mode: 'builtin', command: 'C:\\fake\\node_modules\\@vscode\\ripgrep\\bin\\rg.exe', args: [] },
'win32', 'win32',
) )

View File

@@ -5,7 +5,6 @@ import memoize from 'lodash-es/memoize.js'
import { homedir } from 'os' import { homedir } from 'os'
import * as path from 'path' import * as path from 'path'
import { logEvent } from 'src/services/analytics/index.js' import { logEvent } from 'src/services/analytics/index.js'
import { fileURLToPath } from 'url'
import { isInBundledMode } from './bundledMode.js' import { isInBundledMode } from './bundledMode.js'
import { logForDebugging } from './debug.js' import { logForDebugging } from './debug.js'
import { isEnvDefinedFalsy } from './envUtils.js' import { isEnvDefinedFalsy } from './envUtils.js'
@@ -15,13 +14,6 @@ import { logError } from './log.js'
import { getPlatform } from './platform.js' import { getPlatform } from './platform.js'
import { countCharInString } from './stringUtils.js' import { countCharInString } from './stringUtils.js'
const __filename = fileURLToPath(import.meta.url)
// we use node:path.join instead of node:url.resolve because the former doesn't encode spaces
const __dirname = path.join(
__filename,
process.env.NODE_ENV === 'test' ? '../../../' : '../',
)
type RipgrepConfig = { type RipgrepConfig = {
mode: 'system' | 'builtin' | 'embedded' mode: 'system' | 'builtin' | 'embedded'
command: string command: string
@@ -35,11 +27,31 @@ function isErrnoException(error: unknown): error is NodeJS.ErrnoException {
return error instanceof Error return error instanceof Error
} }
/**
* Returns the ripgrep binary path provided by the @vscode/ripgrep package.
* The package downloads a platform/arch-specific binary at npm install time
* (cached under the package's bin/ directory). Returns null when the package
* cannot be resolved — for example when running as a Bun-compiled standalone
* executable that doesn't ship node_modules.
*/
function resolveBuiltinRgPath(): string | null {
try {
// Lazy require so the resolution failure path stays graceful at import
// time. The package only exports `rgPath`, so we do not need the rest.
const mod = require('@vscode/ripgrep') as { rgPath?: string }
if (mod.rgPath && existsSync(mod.rgPath)) {
return mod.rgPath
}
} catch {
// Falls through to null — caller decides the fallback.
}
return null
}
type ResolveRipgrepConfigArgs = { type ResolveRipgrepConfigArgs = {
userWantsSystemRipgrep: boolean userWantsSystemRipgrep: boolean
bundledMode: boolean bundledMode: boolean
builtinCommand: string builtinCommand: string | null
builtinExists: boolean
systemExecutablePath: string systemExecutablePath: string
processExecPath?: string processExecPath?: string
} }
@@ -48,7 +60,6 @@ export function resolveRipgrepConfig({
userWantsSystemRipgrep, userWantsSystemRipgrep,
bundledMode, bundledMode,
builtinCommand, builtinCommand,
builtinExists,
systemExecutablePath, systemExecutablePath,
processExecPath = process.execPath, processExecPath = process.execPath,
}: ResolveRipgrepConfigArgs): RipgrepConfig { }: ResolveRipgrepConfigArgs): RipgrepConfig {
@@ -66,7 +77,7 @@ export function resolveRipgrepConfig({
} }
} }
if (builtinExists) { if (builtinCommand) {
return { mode: 'builtin', command: builtinCommand, args: [] } return { mode: 'builtin', command: builtinCommand, args: [] }
} }
@@ -74,7 +85,9 @@ export function resolveRipgrepConfig({
return { mode: 'system', command: 'rg', args: [] } return { mode: 'system', command: 'rg', args: [] }
} }
return { mode: 'builtin', command: builtinCommand, args: [] } // Last resort — leaves error reporting to the executor when no binary
// can be located. wrapRipgrepUnavailableError() surfaces an install hint.
return { mode: 'system', command: 'rg', args: [] }
} }
const getRipgrepConfig = memoize((): RipgrepConfig => { const getRipgrepConfig = memoize((): RipgrepConfig => {
@@ -82,19 +95,13 @@ const getRipgrepConfig = memoize((): RipgrepConfig => {
process.env.USE_BUILTIN_RIPGREP, process.env.USE_BUILTIN_RIPGREP,
) )
const bundledMode = isInBundledMode() const bundledMode = isInBundledMode()
const rgRoot = path.resolve(__dirname, 'vendor', 'ripgrep') const builtinCommand = resolveBuiltinRgPath()
const builtinCommand =
process.platform === 'win32'
? path.resolve(rgRoot, `${process.arch}-win32`, 'rg.exe')
: path.resolve(rgRoot, `${process.arch}-${process.platform}`, 'rg')
const builtinExists = existsSync(builtinCommand)
const { cmd: systemExecutablePath } = findExecutable('rg', []) const { cmd: systemExecutablePath } = findExecutable('rg', [])
return resolveRipgrepConfig({ return resolveRipgrepConfig({
userWantsSystemRipgrep, userWantsSystemRipgrep,
bundledMode, bundledMode,
builtinCommand, builtinCommand,
builtinExists,
systemExecutablePath, systemExecutablePath,
}) })
}) })