Compare commits
1 Commits
v0.5.0
...
fix/pr543-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
15e5d19f49 |
24
.env.example
24
.env.example
@@ -225,30 +225,6 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
|
|||||||
# GOOGLE_CLOUD_PROJECT=your-gcp-project-id
|
# GOOGLE_CLOUD_PROJECT=your-gcp-project-id
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
|
||||||
# Option 9: NVIDIA NIM
|
|
||||||
# -----------------------------------------------------------------------------
|
|
||||||
# NVIDIA NIM provides hosted inference endpoints for NVIDIA models.
|
|
||||||
# Get your API key from https://build.nvidia.com/
|
|
||||||
#
|
|
||||||
# CLAUDE_CODE_USE_OPENAI=1
|
|
||||||
# NVIDIA_API_KEY=nvapi-your-key-here
|
|
||||||
# OPENAI_BASE_URL=https://integrate.api.nvidia.com/v1
|
|
||||||
# OPENAI_MODEL=nvidia/llama-3.1-nemotron-70b-instruct
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
|
||||||
# Option 10: MiniMax
|
|
||||||
# -----------------------------------------------------------------------------
|
|
||||||
# MiniMax API provides text generation models.
|
|
||||||
# Get your API key from https://platform.minimax.io/
|
|
||||||
#
|
|
||||||
# CLAUDE_CODE_USE_OPENAI=1
|
|
||||||
# MINIMAX_API_KEY=your-minimax-key-here
|
|
||||||
# OPENAI_BASE_URL=https://api.minimax.io/v1
|
|
||||||
# OPENAI_MODEL=MiniMax-M2.5
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# OPTIONAL TUNING
|
# OPTIONAL TUNING
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|||||||
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
@@ -11,7 +11,6 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-please:
|
release-please:
|
||||||
if: ${{ github.repository == 'Gitlawb/openclaude' }}
|
|
||||||
name: Release Please
|
name: Release Please
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
{
|
{
|
||||||
".": "0.5.0"
|
".": "0.3.0"
|
||||||
}
|
}
|
||||||
|
|||||||
38
CHANGELOG.md
38
CHANGELOG.md
@@ -1,43 +1,5 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## [0.5.0](https://github.com/Gitlawb/openclaude/compare/v0.4.0...v0.5.0) (2026-04-20)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* add OPENCLAUDE_DISABLE_STRICT_TOOLS env var to opt out of strict MCP tool schema normalization ([#770](https://github.com/Gitlawb/openclaude/issues/770)) ([e6e8d9a](https://github.com/Gitlawb/openclaude/commit/e6e8d9a24897e4c9ef08b72df20fabbf8ef27f38))
|
|
||||||
* mask provider api key input ([#772](https://github.com/Gitlawb/openclaude/issues/772)) ([13e9f22](https://github.com/Gitlawb/openclaude/commit/13e9f22a83a2b0f85f557b1e12c9442ba61241e4))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* allow provider recovery during startup ([#765](https://github.com/Gitlawb/openclaude/issues/765)) ([f828171](https://github.com/Gitlawb/openclaude/commit/f828171ef1ab94e2acf73a28a292799e4e26cc0d))
|
|
||||||
* **api:** drop orphan tool results to satisfy strict role sequence ([#745](https://github.com/Gitlawb/openclaude/issues/745)) ([b786b76](https://github.com/Gitlawb/openclaude/commit/b786b765f01f392652eaf28ed3579a96b7260a53))
|
|
||||||
* **help:** prevent /help tab crash from undefined descriptions ([#732](https://github.com/Gitlawb/openclaude/issues/732)) ([3d1979f](https://github.com/Gitlawb/openclaude/commit/3d1979ff066db32415e0c8321af916d81f5f2621))
|
|
||||||
* **mcp:** sync required array with properties in tool schemas ([#754](https://github.com/Gitlawb/openclaude/issues/754)) ([002a8f1](https://github.com/Gitlawb/openclaude/commit/002a8f1f6de2fcfc917165d828501d3047bad61f))
|
|
||||||
* remove cached mcpClient in diagnostic tracking to prevent stale references ([#727](https://github.com/Gitlawb/openclaude/issues/727)) ([2c98be7](https://github.com/Gitlawb/openclaude/commit/2c98be700274a4241963b5f43530bf3bd8f8963f))
|
|
||||||
* use raw context window for auto-compact percentage display ([#748](https://github.com/Gitlawb/openclaude/issues/748)) ([55c5f26](https://github.com/Gitlawb/openclaude/commit/55c5f262a9a5a8be0aa9ae8dc6c7dafc465eb2c6))
|
|
||||||
|
|
||||||
## [0.4.0](https://github.com/Gitlawb/openclaude/compare/v0.3.0...v0.4.0) (2026-04-17)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* add Alibaba Coding Plan (DashScope) provider support ([#509](https://github.com/Gitlawb/openclaude/issues/509)) ([43ac6db](https://github.com/Gitlawb/openclaude/commit/43ac6dba75537282da1e2ad8f855082bc4e25f1e))
|
|
||||||
* add NVIDIA NIM and MiniMax provider support ([#552](https://github.com/Gitlawb/openclaude/issues/552)) ([51191d6](https://github.com/Gitlawb/openclaude/commit/51191d61326e1f8319d70b3a3c0d9229e185a564))
|
|
||||||
* add ripgrep to Dockerfile for faster file searching ([#688](https://github.com/Gitlawb/openclaude/issues/688)) ([12dd375](https://github.com/Gitlawb/openclaude/commit/12dd3755c619cc27af3b151ae8fdb9d425a7b9a2))
|
|
||||||
* **api:** classify openai-compatible provider failures ([#708](https://github.com/Gitlawb/openclaude/issues/708)) ([80a00ac](https://github.com/Gitlawb/openclaude/commit/80a00acc2c6dc4657a78de7366f7a9ebc920bfbb))
|
|
||||||
* **vscode:** add full chat interface to OpenClaude extension ([#608](https://github.com/Gitlawb/openclaude/issues/608)) ([fbcd928](https://github.com/Gitlawb/openclaude/commit/fbcd928f7f8511da795aea3ad318bddf0ab9a1a7))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* focus "Done" option after completing provider manager actions ([#718](https://github.com/Gitlawb/openclaude/issues/718)) ([d6f5130](https://github.com/Gitlawb/openclaude/commit/d6f5130c204d8ffe582212466768706cd7fd6774))
|
|
||||||
* **models:** prevent /models crash from non-string saved model values ([#691](https://github.com/Gitlawb/openclaude/issues/691)) ([6b2121d](https://github.com/Gitlawb/openclaude/commit/6b2121da12189fa7ce1f33394d18abd24cf8a01b))
|
|
||||||
* prevent crash in commands tab when description is undefined ([#730](https://github.com/Gitlawb/openclaude/issues/730)) ([eed77e6](https://github.com/Gitlawb/openclaude/commit/eed77e6579866a98384dcc948a0ad6406614ede3))
|
|
||||||
* strip comments before scanning for missing imports ([#676](https://github.com/Gitlawb/openclaude/issues/676)) ([a00b792](https://github.com/Gitlawb/openclaude/commit/a00b7928de9662ffb7ef6abd8cd040afe6f4f122))
|
|
||||||
* **ui:** show correct endpoint URL in intro screen for custom Anthropic endpoints ([#735](https://github.com/Gitlawb/openclaude/issues/735)) ([3424663](https://github.com/Gitlawb/openclaude/commit/34246635fb9a09499047a52e7f96ca9b36c8a85a))
|
|
||||||
|
|
||||||
## [0.3.0](https://github.com/Gitlawb/openclaude/compare/v0.2.3...v0.3.0) (2026-04-14)
|
## [0.3.0](https://github.com/Gitlawb/openclaude/compare/v0.2.3...v0.3.0) (2026-04-14)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -36,11 +36,14 @@ COPY --from=build /app/node_modules/ node_modules/
|
|||||||
COPY --from=build /app/package.json package.json
|
COPY --from=build /app/package.json package.json
|
||||||
COPY README.md ./
|
COPY README.md ./
|
||||||
|
|
||||||
# Install git and ripgrep — many CLI tool operations depend on them
|
# Install git — many CLI tool operations depend on it
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends git ripgrep \
|
RUN apt-get update && apt-get install -y --no-install-recommends git \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Run as non-root user
|
# Run as non-root user
|
||||||
USER node
|
RUN groupadd --gid 1000 appuser && useradd --uid 1000 --gid appuser --shell /bin/bash --create-home appuser
|
||||||
|
USER appuser
|
||||||
|
WORKDIR /home/appuser
|
||||||
|
ENV HOME=/home/appuser
|
||||||
|
|
||||||
ENTRYPOINT ["node", "/app/dist/cli.mjs"]
|
ENTRYPOINT ["node", "/app/dist/cli.mjs"]
|
||||||
|
|||||||
19
README.md
19
README.md
@@ -15,10 +15,6 @@ OpenClaude is also mirrored to GitLawb:
|
|||||||
|
|
||||||
[Quick Start](#quick-start) | [Setup Guides](#setup-guides) | [Providers](#supported-providers) | [Source Build](#source-build-and-local-development) | [VS Code Extension](#vs-code-extension) | [Community](#community)
|
[Quick Start](#quick-start) | [Setup Guides](#setup-guides) | [Providers](#supported-providers) | [Source Build](#source-build-and-local-development) | [VS Code Extension](#vs-code-extension) | [Community](#community)
|
||||||
|
|
||||||
## Star History
|
|
||||||
|
|
||||||
[](https://www.star-history.com/?repos=gitlawb%2Fopenclaude&type=date&legend=top-left)
|
|
||||||
|
|
||||||
## Why OpenClaude
|
## Why OpenClaude
|
||||||
|
|
||||||
- Use one CLI across cloud APIs and local model backends
|
- Use one CLI across cloud APIs and local model backends
|
||||||
@@ -92,16 +88,6 @@ $env:OPENAI_MODEL="qwen2.5-coder:7b"
|
|||||||
openclaude
|
openclaude
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using Ollama's launch command
|
|
||||||
|
|
||||||
If you have [Ollama](https://ollama.com) installed, you can skip the env var setup entirely:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ollama launch openclaude --model qwen2.5-coder:7b
|
|
||||||
```
|
|
||||||
|
|
||||||
This automatically sets `ANTHROPIC_BASE_URL`, model routing, and auth so all API traffic goes through your local Ollama instance. Works with any model you have pulled — local or cloud.
|
|
||||||
|
|
||||||
## Setup Guides
|
## Setup Guides
|
||||||
|
|
||||||
Beginner-friendly guides:
|
Beginner-friendly guides:
|
||||||
@@ -124,7 +110,7 @@ Advanced and source-build guides:
|
|||||||
| GitHub Models | `/onboard-github` | Interactive onboarding with saved credentials |
|
| GitHub Models | `/onboard-github` | Interactive onboarding with saved credentials |
|
||||||
| Codex OAuth | `/provider` | Opens ChatGPT sign-in in your browser and stores Codex credentials securely |
|
| Codex OAuth | `/provider` | Opens ChatGPT sign-in in your browser and stores Codex credentials securely |
|
||||||
| Codex | `/provider` | Uses existing Codex CLI auth, OpenClaude secure storage, or env credentials |
|
| Codex | `/provider` | Uses existing Codex CLI auth, OpenClaude secure storage, or env credentials |
|
||||||
| Ollama | `/provider`, env vars, or `ollama launch` | Local inference with no API key |
|
| Ollama | `/provider` or env vars | Local inference with no API key |
|
||||||
| Atomic Chat | advanced setup | Local Apple Silicon backend |
|
| Atomic Chat | advanced setup | Local Apple Silicon backend |
|
||||||
| Bedrock / Vertex / Foundry | env vars | Additional provider integrations for supported environments |
|
| Bedrock / Vertex / Foundry | env vars | Additional provider integrations for supported environments |
|
||||||
|
|
||||||
@@ -331,8 +317,7 @@ For larger changes, open an issue first so the scope is clear before implementat
|
|||||||
- `bun run build`
|
- `bun run build`
|
||||||
- `bun run test:coverage`
|
- `bun run test:coverage`
|
||||||
- `bun run smoke`
|
- `bun run smoke`
|
||||||
- focused `bun test ...` runs for files and flows you changed
|
- focused `bun test ...` runs for touched areas
|
||||||
|
|
||||||
|
|
||||||
## Disclaimer
|
## Disclaimer
|
||||||
|
|
||||||
|
|||||||
@@ -84,16 +84,6 @@ OpenRouter model availability changes over time. If a model stops working, try a
|
|||||||
|
|
||||||
### Ollama
|
### Ollama
|
||||||
|
|
||||||
Using `ollama launch` (recommended if you have Ollama installed):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ollama launch openclaude --model llama3.3:70b
|
|
||||||
```
|
|
||||||
|
|
||||||
This handles all environment setup automatically — no env vars needed. Works with any local or cloud model available in your Ollama instance.
|
|
||||||
|
|
||||||
Using environment variables manually:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama3.3:70b
|
ollama pull llama3.3:70b
|
||||||
|
|
||||||
|
|||||||
67
docs/repo-map.md
Normal file
67
docs/repo-map.md
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# Codebase Intelligence — Repo Map
|
||||||
|
|
||||||
|
The repo map feature gives the AI model structural awareness of your codebase at the start of each session. Instead of the model needing to explore the repository with `Grep`, `Glob`, and `Read` calls, it starts with a ranked summary of the most important files and their key signatures.
|
||||||
|
|
||||||
|
## How it works
|
||||||
|
|
||||||
|
1. **File enumeration** — Lists all tracked files via `git ls-files` (falls back to a manual directory walk when not in a git repo)
|
||||||
|
2. **Symbol extraction** — Parses each supported source file with tree-sitter to extract function, class, type, and interface definitions, plus cross-file references
|
||||||
|
3. **Reference graph** — Builds a directed graph where an edge from file A to file B means A references a symbol defined in B. Edges are weighted by reference count multiplied by the IDF (inverse document frequency) of the symbol name — common names like `get`, `set`, `value` contribute less
|
||||||
|
4. **PageRank** — Ranks files by structural importance using PageRank. Files imported by many others rank highest
|
||||||
|
5. **Rendering** — Walks ranked files top-down, emitting file paths and definition signatures, stopping when the token budget is reached
|
||||||
|
|
||||||
|
Results are cached to disk (`~/.openclaude/repomap-cache/`) keyed by file path, mtime, and size. Only changed files are re-parsed on subsequent runs.
|
||||||
|
|
||||||
|
## Supported languages
|
||||||
|
|
||||||
|
- TypeScript (`.ts`, `.tsx`)
|
||||||
|
- JavaScript (`.js`, `.jsx`, `.mjs`, `.cjs`)
|
||||||
|
- Python (`.py`)
|
||||||
|
|
||||||
|
Additional language grammars will be added in future releases.
|
||||||
|
|
||||||
|
## Enabling auto-injection
|
||||||
|
|
||||||
|
The repo map is gated behind the `REPO_MAP` feature flag, **off by default**. To enable auto-injection into the session context:
|
||||||
|
|
||||||
|
Set the environment variable before launching:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
REPO_MAP=1 openclaude
|
||||||
|
```
|
||||||
|
|
||||||
|
Or add it to your shell profile for persistent use.
|
||||||
|
|
||||||
|
When enabled, the map is built once per session and prepended to the system context alongside git status and CLAUDE.md content. The default budget is 1024 tokens.
|
||||||
|
|
||||||
|
Auto-injection is skipped in:
|
||||||
|
- Bare mode (`--bare`)
|
||||||
|
- Remote sessions (`CLAUDE_CODE_REMOTE`)
|
||||||
|
|
||||||
|
## The /repomap slash command
|
||||||
|
|
||||||
|
The `/repomap` command is always available regardless of the feature flag. It lets you inspect and tune the map interactively.
|
||||||
|
|
||||||
|
```
|
||||||
|
/repomap # Show the map with default settings (1024 tokens)
|
||||||
|
/repomap --tokens 4096 # Increase the token budget for a larger map
|
||||||
|
/repomap --focus src/tools/ # Boost specific paths in the ranking
|
||||||
|
/repomap --focus src/context.ts # Can use multiple --focus flags
|
||||||
|
/repomap --stats # Show cache statistics
|
||||||
|
/repomap --invalidate # Clear cache and rebuild from scratch
|
||||||
|
```
|
||||||
|
|
||||||
|
## The RepoMap tool
|
||||||
|
|
||||||
|
The model can also call the `RepoMap` tool on demand during a session. This is useful when:
|
||||||
|
- The model needs structural context mid-conversation
|
||||||
|
- The user asks about specific areas (the model can pass `focus_files` or `focus_symbols`)
|
||||||
|
- A larger token budget is needed than the auto-injected default
|
||||||
|
|
||||||
|
## Known limitations
|
||||||
|
|
||||||
|
- **Signatures only** — The map shows function/class/type declarations, not implementations. The model still needs `Read` to see function bodies.
|
||||||
|
- **Cold build time** — First build on large repos (2000+ files) can take 20-30 seconds due to WASM-based parsing. Subsequent builds use the disk cache and complete in under 100ms.
|
||||||
|
- **Language coverage** — Only TypeScript, JavaScript, and Python are supported. Files in other languages are skipped.
|
||||||
|
- **TypeScript references** — The TypeScript tree-sitter query captures type annotations and `new` expressions as references, but not plain function calls. This means the ranking slightly favors type-heavy hub files.
|
||||||
|
- **Git dependency** — File enumeration uses `git ls-files` by default. Non-git repos fall back to a directory walk with hardcoded exclusions.
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@gitlawb/openclaude",
|
"name": "@gitlawb/openclaude",
|
||||||
"version": "0.5.0",
|
"version": "0.3.0",
|
||||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"bin": {
|
"bin": {
|
||||||
@@ -95,8 +95,12 @@
|
|||||||
"fuse.js": "7.1.0",
|
"fuse.js": "7.1.0",
|
||||||
"get-east-asian-width": "1.5.0",
|
"get-east-asian-width": "1.5.0",
|
||||||
"google-auth-library": "9.15.1",
|
"google-auth-library": "9.15.1",
|
||||||
|
"graphology": "^0.26.0",
|
||||||
|
"graphology-operators": "^1.6.0",
|
||||||
|
"graphology-pagerank": "^1.1.0",
|
||||||
"https-proxy-agent": "7.0.6",
|
"https-proxy-agent": "7.0.6",
|
||||||
"ignore": "7.0.5",
|
"ignore": "7.0.5",
|
||||||
|
"js-tiktoken": "^1.0.16",
|
||||||
"indent-string": "5.0.0",
|
"indent-string": "5.0.0",
|
||||||
"jsonc-parser": "3.3.1",
|
"jsonc-parser": "3.3.1",
|
||||||
"lodash-es": "4.18.1",
|
"lodash-es": "4.18.1",
|
||||||
@@ -117,11 +121,13 @@
|
|||||||
"strip-ansi": "7.2.0",
|
"strip-ansi": "7.2.0",
|
||||||
"supports-hyperlinks": "3.2.0",
|
"supports-hyperlinks": "3.2.0",
|
||||||
"tree-kill": "1.2.2",
|
"tree-kill": "1.2.2",
|
||||||
|
"tree-sitter-wasms": "^0.1.12",
|
||||||
"turndown": "7.2.2",
|
"turndown": "7.2.2",
|
||||||
"type-fest": "4.41.0",
|
"type-fest": "4.41.0",
|
||||||
"undici": "7.24.6",
|
"undici": "7.24.6",
|
||||||
"usehooks-ts": "3.1.1",
|
"usehooks-ts": "3.1.1",
|
||||||
"vscode-languageserver-protocol": "3.17.5",
|
"vscode-languageserver-protocol": "3.17.5",
|
||||||
|
"web-tree-sitter": "^0.25.0",
|
||||||
"wrap-ansi": "9.0.2",
|
"wrap-ansi": "9.0.2",
|
||||||
"ws": "8.20.0",
|
"ws": "8.20.0",
|
||||||
"xss": "1.0.15",
|
"xss": "1.0.15",
|
||||||
|
|||||||
@@ -367,17 +367,9 @@ export const SeverityNumber = {};
|
|||||||
const full = pathMod.join(dir, ent.name)
|
const full = pathMod.join(dir, ent.name)
|
||||||
if (ent.isDirectory()) { walk(full); continue }
|
if (ent.isDirectory()) { walk(full); continue }
|
||||||
if (!/\.(ts|tsx)$/.test(ent.name)) continue
|
if (!/\.(ts|tsx)$/.test(ent.name)) continue
|
||||||
const rawCode: string = fs.readFileSync(full, 'utf-8')
|
const code: string = fs.readFileSync(full, 'utf-8')
|
||||||
const fileDir = pathMod.dirname(full)
|
const fileDir = pathMod.dirname(full)
|
||||||
|
|
||||||
// Strip comments before scanning for imports/requires.
|
|
||||||
// The regex scanner matches require()/import() patterns
|
|
||||||
// inside JSDoc comments, causing false-positive missing
|
|
||||||
// module detection that breaks the build with noop stubs.
|
|
||||||
const code = rawCode
|
|
||||||
.replace(/\/\*[\s\S]*?\*\//g, '') // block comments
|
|
||||||
.replace(/\/\/.*$/gm, '') // line comments
|
|
||||||
|
|
||||||
// Collect static imports: import { X } from '...'
|
// Collect static imports: import { X } from '...'
|
||||||
for (const m of code.matchAll(/import\s+(?:\{([^}]*)\}|(\w+))?\s*(?:,\s*\{([^}]*)\})?\s*from\s+['"](.*?)['"]/g)) {
|
for (const m of code.matchAll(/import\s+(?:\{([^}]*)\}|(\w+))?\s*(?:,\s*\{([^}]*)\})?\s*from\s+['"](.*?)['"]/g)) {
|
||||||
checkAndRegister(m[4], fileDir, m[1] || m[3] || '')
|
checkAndRegister(m[4], fileDir, m[1] || m[3] || '')
|
||||||
|
|||||||
@@ -11,12 +11,7 @@ import { MCPServerDesktopImportDialog } from '../../components/MCPServerDesktopI
|
|||||||
import { render } from '../../ink.js';
|
import { render } from '../../ink.js';
|
||||||
import { KeybindingSetup } from '../../keybindings/KeybindingProviderSetup.js';
|
import { KeybindingSetup } from '../../keybindings/KeybindingProviderSetup.js';
|
||||||
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
|
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
|
||||||
import {
|
import { clearMcpClientConfig, clearServerTokensFromLocalStorage, readClientSecret, saveMcpClientSecret } from '../../services/mcp/auth.js';
|
||||||
clearMcpClientConfig,
|
|
||||||
clearServerTokensFromSecureStorage,
|
|
||||||
readClientSecret,
|
|
||||||
saveMcpClientSecret,
|
|
||||||
} from '../../services/mcp/auth.js'
|
|
||||||
import { doctorAllServers, doctorServer, type McpDoctorReport, type McpDoctorScopeFilter } from '../../services/mcp/doctor.js';
|
import { doctorAllServers, doctorServer, type McpDoctorReport, type McpDoctorScopeFilter } from '../../services/mcp/doctor.js';
|
||||||
import { connectToServer, getMcpServerConnectionBatchSize } from '../../services/mcp/client.js';
|
import { connectToServer, getMcpServerConnectionBatchSize } from '../../services/mcp/client.js';
|
||||||
import { addMcpConfig, getAllMcpConfigs, getMcpConfigByName, getMcpConfigsByScope, removeMcpConfig } from '../../services/mcp/config.js';
|
import { addMcpConfig, getAllMcpConfigs, getMcpConfigByName, getMcpConfigsByScope, removeMcpConfig } from '../../services/mcp/config.js';
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
import { formatDescriptionWithSource } from './commands.js'
|
|
||||||
|
|
||||||
describe('formatDescriptionWithSource', () => {
|
|
||||||
test('returns empty text for prompt commands missing a description', () => {
|
|
||||||
const command = {
|
|
||||||
name: 'example',
|
|
||||||
type: 'prompt',
|
|
||||||
source: 'builtin',
|
|
||||||
description: undefined,
|
|
||||||
} as any
|
|
||||||
|
|
||||||
expect(formatDescriptionWithSource(command)).toBe('')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('formats plugin commands with missing description safely', () => {
|
|
||||||
const command = {
|
|
||||||
name: 'example',
|
|
||||||
type: 'prompt',
|
|
||||||
source: 'plugin',
|
|
||||||
description: undefined,
|
|
||||||
pluginInfo: {
|
|
||||||
pluginManifest: {
|
|
||||||
name: 'MyPlugin',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
} as any
|
|
||||||
|
|
||||||
expect(formatDescriptionWithSource(command)).toBe('(MyPlugin) ')
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -22,6 +22,7 @@ import ctx_viz from './commands/ctx_viz/index.js'
|
|||||||
import doctor from './commands/doctor/index.js'
|
import doctor from './commands/doctor/index.js'
|
||||||
import onboardGithub from './commands/onboard-github/index.js'
|
import onboardGithub from './commands/onboard-github/index.js'
|
||||||
import memory from './commands/memory/index.js'
|
import memory from './commands/memory/index.js'
|
||||||
|
import repomap from './commands/repomap/index.js'
|
||||||
import help from './commands/help/index.js'
|
import help from './commands/help/index.js'
|
||||||
import ide from './commands/ide/index.js'
|
import ide from './commands/ide/index.js'
|
||||||
import init from './commands/init.js'
|
import init from './commands/init.js'
|
||||||
@@ -307,6 +308,7 @@ const COMMANDS = memoize((): Command[] => [
|
|||||||
releaseNotes,
|
releaseNotes,
|
||||||
reloadPlugins,
|
reloadPlugins,
|
||||||
rename,
|
rename,
|
||||||
|
repomap,
|
||||||
resume,
|
resume,
|
||||||
session,
|
session,
|
||||||
skills,
|
skills,
|
||||||
@@ -740,23 +742,23 @@ export function getCommand(commandName: string, commands: Command[]): Command {
|
|||||||
*/
|
*/
|
||||||
export function formatDescriptionWithSource(cmd: Command): string {
|
export function formatDescriptionWithSource(cmd: Command): string {
|
||||||
if (cmd.type !== 'prompt') {
|
if (cmd.type !== 'prompt') {
|
||||||
return cmd.description ?? ''
|
return cmd.description
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.kind === 'workflow') {
|
if (cmd.kind === 'workflow') {
|
||||||
return `${cmd.description ?? ''} (workflow)`
|
return `${cmd.description} (workflow)`
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.source === 'plugin') {
|
if (cmd.source === 'plugin') {
|
||||||
const pluginName = cmd.pluginInfo?.pluginManifest.name
|
const pluginName = cmd.pluginInfo?.pluginManifest.name
|
||||||
if (pluginName) {
|
if (pluginName) {
|
||||||
return `(${pluginName}) ${cmd.description ?? ''}`
|
return `(${pluginName}) ${cmd.description}`
|
||||||
}
|
}
|
||||||
return `${cmd.description ?? ''} (plugin)`
|
return `${cmd.description} (plugin)`
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.source === 'builtin' || cmd.source === 'mcp') {
|
if (cmd.source === 'builtin' || cmd.source === 'mcp') {
|
||||||
return cmd.description ?? ''
|
return cmd.description
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.source === 'bundled') {
|
if (cmd.source === 'bundled') {
|
||||||
|
|||||||
@@ -401,7 +401,7 @@ test('buildCodexProfileEnv derives oauth source from secure storage when no expl
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
test('explicitly declared env takes precedence over applySavedProfileToCurrentSession', async () => {
|
test('applySavedProfileToCurrentSession switches the current env to the saved Codex profile', async () => {
|
||||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||||
const { applySavedProfileToCurrentSession } = await import(
|
const { applySavedProfileToCurrentSession } = await import(
|
||||||
'../../utils/providerProfile.js?apply-saved-profile-codex'
|
'../../utils/providerProfile.js?apply-saved-profile-codex'
|
||||||
@@ -430,18 +430,18 @@ test('explicitly declared env takes precedence over applySavedProfileToCurrentSe
|
|||||||
|
|
||||||
expect(warning).toBeNull()
|
expect(warning).toBeNull()
|
||||||
expect(processEnv.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
expect(processEnv.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||||
expect(processEnv.OPENAI_MODEL).toBe('gpt-4o')
|
expect(processEnv.OPENAI_MODEL).toBe('codexplan')
|
||||||
expect(processEnv.OPENAI_BASE_URL).toBe(
|
expect(processEnv.OPENAI_BASE_URL).toBe(
|
||||||
"https://api.openai.com/v1",
|
'https://chatgpt.com/backend-api/codex',
|
||||||
)
|
)
|
||||||
expect(processEnv.CODEX_API_KEY).toBeUndefined()
|
expect(processEnv.CODEX_API_KEY).toBe('codex-live')
|
||||||
expect(processEnv.CHATGPT_ACCOUNT_ID).toBeUndefined()
|
expect(processEnv.CHATGPT_ACCOUNT_ID).toBe('acct_codex')
|
||||||
expect(processEnv.OPENAI_API_KEY).toBe("sk-openai")
|
expect(processEnv.OPENAI_API_KEY).toBeUndefined()
|
||||||
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
|
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
|
||||||
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBeUndefined()
|
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBeUndefined()
|
||||||
})
|
})
|
||||||
|
|
||||||
test('explicitly declared env takes precedence over applySavedProfileToCurrentSession', async () => {
|
test('applySavedProfileToCurrentSession ignores stale Codex env overrides for OAuth-backed profiles', async () => {
|
||||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||||
const { applySavedProfileToCurrentSession } = await import(
|
const { applySavedProfileToCurrentSession } = await import(
|
||||||
'../../utils/providerProfile.js?apply-saved-profile-codex-oauth'
|
'../../utils/providerProfile.js?apply-saved-profile-codex-oauth'
|
||||||
@@ -465,13 +465,13 @@ test('explicitly declared env takes precedence over applySavedProfileToCurrentSe
|
|||||||
processEnv,
|
processEnv,
|
||||||
})
|
})
|
||||||
|
|
||||||
expect(warning).not.toBeUndefined()
|
expect(warning).toBeNull()
|
||||||
expect(processEnv.OPENAI_MODEL).toBe('gpt-4o')
|
expect(processEnv.OPENAI_MODEL).toBe('codexplan')
|
||||||
expect(processEnv.OPENAI_BASE_URL).toBe(
|
expect(processEnv.OPENAI_BASE_URL).toBe(
|
||||||
"https://api.openai.com/v1",
|
'https://chatgpt.com/backend-api/codex',
|
||||||
)
|
)
|
||||||
expect(processEnv.CODEX_API_KEY).toBe("stale-codex-key")
|
expect(processEnv.CODEX_API_KEY).toBeUndefined()
|
||||||
expect(processEnv.CHATGPT_ACCOUNT_ID).toBe('acct_stale')
|
expect(processEnv.CHATGPT_ACCOUNT_ID).not.toBe('acct_stale')
|
||||||
expect(processEnv.CHATGPT_ACCOUNT_ID).toBeTruthy()
|
expect(processEnv.CHATGPT_ACCOUNT_ID).toBeTruthy()
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -487,8 +487,8 @@ test('buildCurrentProviderSummary redacts poisoned model and endpoint values', (
|
|||||||
})
|
})
|
||||||
|
|
||||||
expect(summary.providerLabel).toBe('OpenAI-compatible')
|
expect(summary.providerLabel).toBe('OpenAI-compatible')
|
||||||
expect(summary.modelLabel).toBe('sk-...678')
|
expect(summary.modelLabel).toBe('sk-...5678')
|
||||||
expect(summary.endpointLabel).toBe('sk-...678')
|
expect(summary.endpointLabel).toBe('sk-...5678')
|
||||||
})
|
})
|
||||||
|
|
||||||
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
|
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
|
||||||
|
|||||||
17
src/commands/repomap/index.ts
Normal file
17
src/commands/repomap/index.ts
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
/**
|
||||||
|
* /repomap command - minimal metadata only.
|
||||||
|
* Implementation is lazy-loaded from repomap.ts to reduce startup time.
|
||||||
|
*/
|
||||||
|
import type { Command } from '../../commands.js'
|
||||||
|
|
||||||
|
const repomap = {
|
||||||
|
type: 'local',
|
||||||
|
name: 'repomap',
|
||||||
|
description:
|
||||||
|
'Show or configure the repository structural map (codebase intelligence)',
|
||||||
|
isHidden: false,
|
||||||
|
supportsNonInteractive: true,
|
||||||
|
load: () => import('./repomap.js'),
|
||||||
|
} satisfies Command
|
||||||
|
|
||||||
|
export default repomap
|
||||||
56
src/commands/repomap/repomap.test.ts
Normal file
56
src/commands/repomap/repomap.test.ts
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
import { describe, expect, test } from 'bun:test'
|
||||||
|
import { parseArgs } from './repomap.js'
|
||||||
|
|
||||||
|
describe('/repomap argument parsing', () => {
|
||||||
|
test('defaults to 1024 tokens with no flags', () => {
|
||||||
|
const result = parseArgs('')
|
||||||
|
expect(result.tokens).toBe(2048)
|
||||||
|
expect(result.focus).toEqual([])
|
||||||
|
expect(result.invalidate).toBe(false)
|
||||||
|
expect(result.stats).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('parses --tokens flag', () => {
|
||||||
|
const result = parseArgs('--tokens 4096')
|
||||||
|
expect(result.tokens).toBe(4096)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('rejects --tokens below 256', () => {
|
||||||
|
const result = parseArgs('--tokens 100')
|
||||||
|
expect(result.tokens).toBe(2048) // falls back to default
|
||||||
|
})
|
||||||
|
|
||||||
|
test('rejects --tokens above 16384', () => {
|
||||||
|
const result = parseArgs('--tokens 20000')
|
||||||
|
expect(result.tokens).toBe(2048) // falls back to default
|
||||||
|
})
|
||||||
|
|
||||||
|
test('parses --focus flag', () => {
|
||||||
|
const result = parseArgs('--focus src/tools/')
|
||||||
|
expect(result.focus).toEqual(['src/tools/'])
|
||||||
|
})
|
||||||
|
|
||||||
|
test('parses multiple --focus flags', () => {
|
||||||
|
const result = parseArgs('--focus src/tools/ --focus src/context.ts')
|
||||||
|
expect(result.focus).toEqual(['src/tools/', 'src/context.ts'])
|
||||||
|
})
|
||||||
|
|
||||||
|
test('parses --invalidate flag', () => {
|
||||||
|
const result = parseArgs('--invalidate')
|
||||||
|
expect(result.invalidate).toBe(true)
|
||||||
|
expect(result.stats).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('parses --stats flag', () => {
|
||||||
|
const result = parseArgs('--stats')
|
||||||
|
expect(result.stats).toBe(true)
|
||||||
|
expect(result.invalidate).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('parses combined flags', () => {
|
||||||
|
const result = parseArgs('--tokens 2048 --focus src/tools/ --invalidate')
|
||||||
|
expect(result.tokens).toBe(2048)
|
||||||
|
expect(result.focus).toEqual(['src/tools/'])
|
||||||
|
expect(result.invalidate).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
93
src/commands/repomap/repomap.ts
Normal file
93
src/commands/repomap/repomap.ts
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
import type { LocalCommandCall } from '../../types/command.js'
|
||||||
|
import { getCwd } from '../../utils/cwd.js'
|
||||||
|
|
||||||
|
/** Parse CLI-style arguments from the command string. */
|
||||||
|
export function parseArgs(args: string): {
|
||||||
|
tokens: number
|
||||||
|
focus: string[]
|
||||||
|
invalidate: boolean
|
||||||
|
stats: boolean
|
||||||
|
} {
|
||||||
|
const parts = args.trim().split(/\s+/).filter(Boolean)
|
||||||
|
let tokens = 2048
|
||||||
|
const focus: string[] = []
|
||||||
|
let invalidate = false
|
||||||
|
let stats = false
|
||||||
|
|
||||||
|
for (let i = 0; i < parts.length; i++) {
|
||||||
|
const part = parts[i]!
|
||||||
|
if (part === '--tokens' && i + 1 < parts.length) {
|
||||||
|
const n = parseInt(parts[i + 1]!, 10)
|
||||||
|
if (!isNaN(n) && n >= 256 && n <= 16384) {
|
||||||
|
tokens = n
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
} else if (part === '--focus' && i + 1 < parts.length) {
|
||||||
|
focus.push(parts[i + 1]!)
|
||||||
|
i++
|
||||||
|
} else if (part === '--invalidate') {
|
||||||
|
invalidate = true
|
||||||
|
} else if (part === '--stats') {
|
||||||
|
stats = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { tokens, focus, invalidate, stats }
|
||||||
|
}
|
||||||
|
|
||||||
|
export const call: LocalCommandCall = async (args) => {
|
||||||
|
const root = getCwd()
|
||||||
|
const { tokens, focus, invalidate, stats } = parseArgs(args ?? '')
|
||||||
|
|
||||||
|
// Lazy import to avoid loading tree-sitter at startup
|
||||||
|
const {
|
||||||
|
buildRepoMap,
|
||||||
|
invalidateCache,
|
||||||
|
getCacheStats,
|
||||||
|
} = await import('../../context/repoMap/index.js')
|
||||||
|
|
||||||
|
if (stats) {
|
||||||
|
const cacheStats = getCacheStats(root)
|
||||||
|
const lines = [
|
||||||
|
`Repository map cache stats:`,
|
||||||
|
` Cache directory: ${cacheStats.cacheDir}`,
|
||||||
|
` Cache file: ${cacheStats.cacheFile ?? '(none)'}`,
|
||||||
|
` Cached entries: ${cacheStats.entryCount}`,
|
||||||
|
` Cache exists: ${cacheStats.exists}`,
|
||||||
|
]
|
||||||
|
return { type: 'text', value: lines.join('\n') }
|
||||||
|
}
|
||||||
|
|
||||||
|
if (invalidate) {
|
||||||
|
invalidateCache(root)
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root,
|
||||||
|
maxTokens: tokens,
|
||||||
|
focusFiles: focus.length > 0 ? focus : undefined,
|
||||||
|
})
|
||||||
|
return {
|
||||||
|
type: 'text',
|
||||||
|
value: [
|
||||||
|
`Cache invalidated and rebuilt.`,
|
||||||
|
`Files: ${result.fileCount} ranked (${result.totalFileCount} total) | Tokens: ${result.tokenCount} | Time: ${result.buildTimeMs}ms | Cache hit: ${result.cacheHit}`,
|
||||||
|
'',
|
||||||
|
result.map,
|
||||||
|
].join('\n'),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root,
|
||||||
|
maxTokens: tokens,
|
||||||
|
focusFiles: focus.length > 0 ? focus : undefined,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
type: 'text',
|
||||||
|
value: [
|
||||||
|
`Repository map: ${result.fileCount} files ranked (${result.totalFileCount} total) | Tokens: ${result.tokenCount} | Time: ${result.buildTimeMs}ms | Cache hit: ${result.cacheHit}`,
|
||||||
|
'',
|
||||||
|
result.map,
|
||||||
|
].join('\n'),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,14 +3,12 @@ import * as React from 'react'
|
|||||||
import { DEFAULT_CODEX_BASE_URL } from '../services/api/providerConfig.js'
|
import { DEFAULT_CODEX_BASE_URL } from '../services/api/providerConfig.js'
|
||||||
import { Box, Text } from '../ink.js'
|
import { Box, Text } from '../ink.js'
|
||||||
import { useKeybinding } from '../keybindings/useKeybinding.js'
|
import { useKeybinding } from '../keybindings/useKeybinding.js'
|
||||||
import { useSetAppState } from '../state/AppState.js'
|
|
||||||
import type { ProviderProfile } from '../utils/config.js'
|
import type { ProviderProfile } from '../utils/config.js'
|
||||||
import {
|
import {
|
||||||
clearCodexCredentials,
|
clearCodexCredentials,
|
||||||
readCodexCredentialsAsync,
|
readCodexCredentialsAsync,
|
||||||
} from '../utils/codexCredentials.js'
|
} from '../utils/codexCredentials.js'
|
||||||
import { isBareMode, isEnvTruthy } from '../utils/envUtils.js'
|
import { isBareMode, isEnvTruthy } from '../utils/envUtils.js'
|
||||||
import { getPrimaryModel, hasMultipleModels, parseModelList } from '../utils/providerModels.js'
|
|
||||||
import {
|
import {
|
||||||
applySavedProfileToCurrentSession,
|
applySavedProfileToCurrentSession,
|
||||||
buildCodexOAuthProfileEnv,
|
buildCodexOAuthProfileEnv,
|
||||||
@@ -52,7 +50,6 @@ import {
|
|||||||
import { Pane } from './design-system/Pane.js'
|
import { Pane } from './design-system/Pane.js'
|
||||||
import TextInput from './TextInput.js'
|
import TextInput from './TextInput.js'
|
||||||
import { useCodexOAuthFlow } from './useCodexOAuthFlow.js'
|
import { useCodexOAuthFlow } from './useCodexOAuthFlow.js'
|
||||||
import { useSetAppState } from '../state/AppState.js'
|
|
||||||
|
|
||||||
export type ProviderManagerResult = {
|
export type ProviderManagerResult = {
|
||||||
action: 'saved' | 'cancelled'
|
action: 'saved' | 'cancelled'
|
||||||
@@ -111,8 +108,8 @@ const FORM_STEPS: Array<{
|
|||||||
{
|
{
|
||||||
key: 'model',
|
key: 'model',
|
||||||
label: 'Default model',
|
label: 'Default model',
|
||||||
placeholder: 'e.g. llama3.1:8b or glm-4.7, glm-4.7-flash',
|
placeholder: 'e.g. llama3.1:8b',
|
||||||
helpText: 'Model name(s) to use. Separate multiple with commas; first is default.',
|
helpText: 'Model name to use when this provider is active.',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: 'apiKey',
|
key: 'apiKey',
|
||||||
@@ -156,12 +153,7 @@ function profileSummary(profile: ProviderProfile, isActive: boolean): string {
|
|||||||
const keyInfo = profile.apiKey ? 'key set' : 'no key'
|
const keyInfo = profile.apiKey ? 'key set' : 'no key'
|
||||||
const providerKind =
|
const providerKind =
|
||||||
profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible'
|
profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible'
|
||||||
const models = parseModelList(profile.model)
|
return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}`
|
||||||
const modelDisplay =
|
|
||||||
models.length <= 3
|
|
||||||
? models.join(', ')
|
|
||||||
: `${models[0]}, ${models[1]} + ${models.length - 2} more`
|
|
||||||
return `${providerKind} · ${profile.baseUrl} · ${modelDisplay} · ${keyInfo}${activeSuffix}`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function getGithubCredentialSourceFromEnv(
|
function getGithubCredentialSourceFromEnv(
|
||||||
@@ -328,7 +320,6 @@ function CodexOAuthSetup({
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||||
const setAppState = useSetAppState()
|
|
||||||
const initialGithubCredentialSource = getGithubCredentialSourceFromEnv()
|
const initialGithubCredentialSource = getGithubCredentialSourceFromEnv()
|
||||||
const initialIsGithubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
const initialIsGithubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||||
const initialHasGithubCredential = initialGithubCredentialSource !== 'none'
|
const initialHasGithubCredential = initialGithubCredentialSource !== 'none'
|
||||||
@@ -362,7 +353,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||||
const [statusMessage, setStatusMessage] = React.useState<string | undefined>()
|
const [statusMessage, setStatusMessage] = React.useState<string | undefined>()
|
||||||
const [errorMessage, setErrorMessage] = React.useState<string | undefined>()
|
const [errorMessage, setErrorMessage] = React.useState<string | undefined>()
|
||||||
const [menuFocusValue, setMenuFocusValue] = React.useState<string | undefined>()
|
|
||||||
const [hasStoredCodexOAuthCredentials, setHasStoredCodexOAuthCredentials] =
|
const [hasStoredCodexOAuthCredentials, setHasStoredCodexOAuthCredentials] =
|
||||||
React.useState(false)
|
React.useState(false)
|
||||||
const [storedCodexOAuthProfileId, setStoredCodexOAuthProfileId] =
|
const [storedCodexOAuthProfileId, setStoredCodexOAuthProfileId] =
|
||||||
@@ -578,48 +568,24 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
const githubError = activateGithubProvider()
|
const githubError = activateGithubProvider()
|
||||||
if (githubError) {
|
if (githubError) {
|
||||||
setErrorMessage(`Could not activate GitHub provider: ${githubError}`)
|
setErrorMessage(`Could not activate GitHub provider: ${githubError}`)
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
setAppState(prev => ({
|
|
||||||
...prev,
|
|
||||||
mainLoopModel: GITHUB_PROVIDER_DEFAULT_MODEL,
|
|
||||||
mainLoopModelForSession: null,
|
|
||||||
}))
|
|
||||||
refreshProfiles()
|
refreshProfiles()
|
||||||
setAppState(prev => ({
|
|
||||||
...prev,
|
|
||||||
mainLoopModel: GITHUB_PROVIDER_DEFAULT_MODEL,
|
|
||||||
}))
|
|
||||||
setStatusMessage(`Active provider: ${GITHUB_PROVIDER_LABEL}`)
|
setStatusMessage(`Active provider: ${GITHUB_PROVIDER_LABEL}`)
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
const active = setActiveProviderProfile(profileId)
|
const active = setActiveProviderProfile(profileId)
|
||||||
if (!active) {
|
if (!active) {
|
||||||
setErrorMessage('Could not change active provider.')
|
setErrorMessage('Could not change active provider.')
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the session model to the new provider's first model.
|
|
||||||
// persistActiveProviderProfileModel (called by onChangeAppState) will
|
|
||||||
// not overwrite the multi-model list because it checks if the model
|
|
||||||
// is already in the profile's comma-separated model list.
|
|
||||||
const newModel = getPrimaryModel(active.model)
|
|
||||||
setAppState(prev => ({
|
|
||||||
...prev,
|
|
||||||
mainLoopModel: newModel,
|
|
||||||
}))
|
|
||||||
|
|
||||||
providerLabel = active.name
|
providerLabel = active.name
|
||||||
setAppState(prev => ({
|
|
||||||
...prev,
|
|
||||||
mainLoopModel: active.model,
|
|
||||||
mainLoopModelForSession: null,
|
|
||||||
}))
|
|
||||||
const settingsOverrideError =
|
const settingsOverrideError =
|
||||||
clearStartupProviderOverrideFromUserSettings()
|
clearStartupProviderOverrideFromUserSettings()
|
||||||
const isActiveCodexOAuth = isCodexOAuthProfile(
|
const isActiveCodexOAuth = isCodexOAuthProfile(
|
||||||
@@ -647,21 +613,16 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||||
: `Active provider: ${active.name}`,
|
: `Active provider: ${active.name}`,
|
||||||
)
|
)
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
refreshProfiles()
|
refreshProfiles()
|
||||||
setStatusMessage(undefined)
|
setStatusMessage(undefined)
|
||||||
const detail = error instanceof Error ? error.message : String(error)
|
const detail = error instanceof Error ? error.message : String(error)
|
||||||
setErrorMessage(`Could not finish activating ${providerLabel}: ${detail}`)
|
setErrorMessage(`Could not finish activating ${providerLabel}: ${detail}`)
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function returnToMenu(): void {
|
|
||||||
setMenuFocusValue('done')
|
|
||||||
setScreen('menu')
|
|
||||||
}
|
|
||||||
|
|
||||||
function closeWithCancelled(message: string): void {
|
function closeWithCancelled(message: string): void {
|
||||||
onDone({ action: 'cancelled', message })
|
onDone({ action: 'cancelled', message })
|
||||||
}
|
}
|
||||||
@@ -812,13 +773,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const isActiveSavedProfile = getActiveProviderProfile()?.id === saved.id
|
const isActiveSavedProfile = getActiveProviderProfile()?.id === saved.id
|
||||||
if (isActiveSavedProfile) {
|
|
||||||
setAppState(prev => ({
|
|
||||||
...prev,
|
|
||||||
mainLoopModel: saved.model,
|
|
||||||
mainLoopModelForSession: null,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
const settingsOverrideError = isActiveSavedProfile
|
const settingsOverrideError = isActiveSavedProfile
|
||||||
? clearStartupProviderOverrideFromUserSettings()
|
? clearStartupProviderOverrideFromUserSettings()
|
||||||
: null
|
: null
|
||||||
@@ -846,7 +800,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
setEditingProfileId(null)
|
setEditingProfileId(null)
|
||||||
setFormStepIndex(0)
|
setFormStepIndex(0)
|
||||||
setErrorMessage(undefined)
|
setErrorMessage(undefined)
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
}
|
}
|
||||||
|
|
||||||
function renderOllamaSelection(): React.ReactNode {
|
function renderOllamaSelection(): React.ReactNode {
|
||||||
@@ -969,7 +923,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
}
|
}
|
||||||
|
|
||||||
useKeybinding('confirm:no', handleBackFromForm, {
|
useKeybinding('confirm:no', handleBackFromForm, {
|
||||||
@@ -1050,31 +1004,11 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
label: 'LM Studio',
|
label: 'LM Studio',
|
||||||
description: 'Local LM Studio endpoint',
|
description: 'Local LM Studio endpoint',
|
||||||
},
|
},
|
||||||
{
|
|
||||||
value: 'dashscope-cn',
|
|
||||||
label: 'Alibaba Coding Plan (China)',
|
|
||||||
description: 'Alibaba DashScope China endpoint',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
value: 'dashscope-intl',
|
|
||||||
label: 'Alibaba Coding Plan',
|
|
||||||
description: 'Alibaba DashScope International endpoint',
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
value: 'custom',
|
value: 'custom',
|
||||||
label: 'Custom',
|
label: 'Custom',
|
||||||
description: 'Any OpenAI-compatible provider',
|
description: 'Any OpenAI-compatible provider',
|
||||||
},
|
},
|
||||||
{
|
|
||||||
value: 'nvidia-nim',
|
|
||||||
label: 'NVIDIA NIM',
|
|
||||||
description: 'NVIDIA NIM endpoint',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
value: 'minimax',
|
|
||||||
label: 'MiniMax',
|
|
||||||
description: 'MiniMax API endpoint',
|
|
||||||
},
|
|
||||||
...(mode === 'first-run'
|
...(mode === 'first-run'
|
||||||
? [
|
? [
|
||||||
{
|
{
|
||||||
@@ -1112,7 +1046,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
closeWithCancelled('Provider setup skipped')
|
closeWithCancelled('Provider setup skipped')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
}}
|
}}
|
||||||
visibleOptionCount={Math.min(13, options.length)}
|
visibleOptionCount={Math.min(13, options.length)}
|
||||||
/>
|
/>
|
||||||
@@ -1150,7 +1084,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
focus={true}
|
focus={true}
|
||||||
showCursor={true}
|
showCursor={true}
|
||||||
placeholder={`${currentStep.placeholder}${figures.ellipsis}`}
|
placeholder={`${currentStep.placeholder}${figures.ellipsis}`}
|
||||||
mask={currentStepKey === 'apiKey' ? '*' : undefined}
|
|
||||||
columns={80}
|
columns={80}
|
||||||
cursorOffset={cursorOffset}
|
cursorOffset={cursorOffset}
|
||||||
onChangeCursorOffset={setCursorOffset}
|
onChangeCursorOffset={setCursorOffset}
|
||||||
@@ -1313,7 +1246,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
onCancel={() => closeWithCancelled('Provider manager closed')}
|
onCancel={() => closeWithCancelled('Provider manager closed')}
|
||||||
defaultFocusValue={menuFocusValue}
|
|
||||||
visibleOptionCount={options.length}
|
visibleOptionCount={options.length}
|
||||||
/>
|
/>
|
||||||
</Box>
|
</Box>
|
||||||
@@ -1361,8 +1293,8 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
description: 'Return to provider manager',
|
description: 'Return to provider manager',
|
||||||
},
|
},
|
||||||
]}
|
]}
|
||||||
onChange={() => returnToMenu()}
|
onChange={() => setScreen('menu')}
|
||||||
onCancel={() => returnToMenu()}
|
onCancel={() => setScreen('menu')}
|
||||||
visibleOptionCount={1}
|
visibleOptionCount={1}
|
||||||
/>
|
/>
|
||||||
</Box>
|
</Box>
|
||||||
@@ -1377,7 +1309,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
<Select
|
<Select
|
||||||
options={selectOptions}
|
options={selectOptions}
|
||||||
onChange={onSelect}
|
onChange={onSelect}
|
||||||
onCancel={() => returnToMenu()}
|
onCancel={() => setScreen('menu')}
|
||||||
visibleOptionCount={Math.min(10, Math.max(2, selectOptions.length))}
|
visibleOptionCount={Math.min(10, Math.max(2, selectOptions.length))}
|
||||||
/>
|
/>
|
||||||
</Box>
|
</Box>
|
||||||
@@ -1418,7 +1350,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
setErrorMessage(
|
setErrorMessage(
|
||||||
'Codex OAuth login finished, but the provider profile could not be saved.',
|
'Codex OAuth login finished, but the provider profile could not be saved.',
|
||||||
)
|
)
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1430,7 +1362,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
setErrorMessage(
|
setErrorMessage(
|
||||||
'Codex OAuth login finished, but the provider could not be set as the startup provider.',
|
'Codex OAuth login finished, but the provider could not be set as the startup provider.',
|
||||||
)
|
)
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1464,7 +1396,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
|
|
||||||
setStatusMessage(message)
|
setStatusMessage(message)
|
||||||
setErrorMessage(undefined)
|
setErrorMessage(undefined)
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
)
|
)
|
||||||
@@ -1504,7 +1436,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
refreshProfiles()
|
refreshProfiles()
|
||||||
setStatusMessage('GitHub provider deleted')
|
setStatusMessage('GitHub provider deleted')
|
||||||
}
|
}
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1539,7 +1471,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
|||||||
: 'Provider deleted',
|
: 'Provider deleted',
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
returnToMenu()
|
setScreen('menu')
|
||||||
},
|
},
|
||||||
{ includeGithub: true },
|
{ includeGithub: true },
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -117,28 +117,17 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
|||||||
const baseUrl = resolvedRequest.baseUrl
|
const baseUrl = resolvedRequest.baseUrl
|
||||||
const isLocal = isLocalProviderUrl(baseUrl)
|
const isLocal = isLocalProviderUrl(baseUrl)
|
||||||
let name = 'OpenAI'
|
let name = 'OpenAI'
|
||||||
if (/nvidia/i.test(baseUrl) || /nvidia/i.test(rawModel) || process.env.NVIDIA_NIM)
|
// Override to Codex when resolved endpoint is Codex
|
||||||
name = 'NVIDIA NIM'
|
if (resolvedRequest.transport === 'codex_responses' || baseUrl.includes('chatgpt.com/backend-api/codex')) {
|
||||||
else if (/minimax/i.test(baseUrl) || /minimax/i.test(rawModel) || process.env.MINIMAX_API_KEY)
|
|
||||||
name = 'MiniMax'
|
|
||||||
else if (resolvedRequest.transport === 'codex_responses' || baseUrl.includes('chatgpt.com/backend-api/codex'))
|
|
||||||
name = 'Codex'
|
name = 'Codex'
|
||||||
else if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel))
|
} else if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
|
||||||
name = 'DeepSeek'
|
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
|
||||||
else if (/openrouter/i.test(baseUrl))
|
else if (/together/i.test(baseUrl)) name = 'Together AI'
|
||||||
name = 'OpenRouter'
|
else if (/groq/i.test(baseUrl)) name = 'Groq'
|
||||||
else if (/together/i.test(baseUrl))
|
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
|
||||||
name = 'Together AI'
|
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
|
||||||
else if (/groq/i.test(baseUrl))
|
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
|
||||||
name = 'Groq'
|
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||||
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel))
|
|
||||||
name = 'Mistral'
|
|
||||||
else if (/azure/i.test(baseUrl))
|
|
||||||
name = 'Azure OpenAI'
|
|
||||||
else if (/llama/i.test(rawModel))
|
|
||||||
name = 'Meta Llama'
|
|
||||||
else if (isLocal)
|
|
||||||
name = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
|
||||||
|
|
||||||
// Resolve model alias to actual model name + reasoning effort
|
// Resolve model alias to actual model name + reasoning effort
|
||||||
let displayModel = resolvedRequest.resolvedModel
|
let displayModel = resolvedRequest.resolvedModel
|
||||||
@@ -153,9 +142,7 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
|||||||
const settings = getSettings_DEPRECATED() || {}
|
const settings = getSettings_DEPRECATED() || {}
|
||||||
const modelSetting = settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
|
const modelSetting = settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
|
||||||
const resolvedModel = parseUserSpecifiedModel(modelSetting)
|
const resolvedModel = parseUserSpecifiedModel(modelSetting)
|
||||||
const baseUrl = process.env.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com'
|
return { name: 'Anthropic', model: resolvedModel, baseUrl: 'https://api.anthropic.com', isLocal: false }
|
||||||
const isLocal = isLocalProviderUrl(baseUrl)
|
|
||||||
return { name: 'Anthropic', model: resolvedModel, baseUrl, isLocal }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ─── Box drawing ──────────────────────────────────────────────────────────────
|
// ─── Box drawing ──────────────────────────────────────────────────────────────
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import stripAnsi from 'strip-ansi'
|
|||||||
|
|
||||||
import { createRoot } from '../ink.js'
|
import { createRoot } from '../ink.js'
|
||||||
import { AppStateProvider } from '../state/AppState.js'
|
import { AppStateProvider } from '../state/AppState.js'
|
||||||
import { maskTextWithVisibleEdges } from '../utils/Cursor.js'
|
|
||||||
import TextInput from './TextInput.js'
|
import TextInput from './TextInput.js'
|
||||||
import VimTextInput from './VimTextInput.js'
|
import VimTextInput from './VimTextInput.js'
|
||||||
|
|
||||||
@@ -200,13 +199,6 @@ test('TextInput renders typed characters before delayed parent value commits', a
|
|||||||
expect(output).not.toContain('Type here...')
|
expect(output).not.toContain('Type here...')
|
||||||
})
|
})
|
||||||
|
|
||||||
test('maskTextWithVisibleEdges preserves only the first and last three chars', () => {
|
|
||||||
expect(maskTextWithVisibleEdges('sk-secret-12345678', '*')).toBe(
|
|
||||||
'sk-************678',
|
|
||||||
)
|
|
||||||
expect(maskTextWithVisibleEdges('abcdef', '*')).toBe('******')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('VimTextInput preserves rapid typed characters before delayed parent value commits', async () => {
|
test('VimTextInput preserves rapid typed characters before delayed parent value commits', async () => {
|
||||||
const { stdout, stdin, getOutput } = createTestStreams()
|
const { stdout, stdin, getOutput } = createTestStreams()
|
||||||
const root = await createRoot({
|
const root = await createRoot({
|
||||||
|
|||||||
@@ -1,16 +1,5 @@
|
|||||||
import { afterEach, expect, test } from 'bun:test'
|
import { afterEach, expect, test } from 'bun:test'
|
||||||
|
|
||||||
// MACRO is replaced at build time by Bun.define but not in test mode.
|
|
||||||
// Define it globally so tests that import modules using MACRO don't crash.
|
|
||||||
;(globalThis as Record<string, unknown>).MACRO = {
|
|
||||||
VERSION: '99.0.0',
|
|
||||||
DISPLAY_VERSION: '0.0.0-test',
|
|
||||||
BUILD_TIME: new Date().toISOString(),
|
|
||||||
ISSUES_EXPLAINER: 'report the issue at https://github.com/anthropics/claude-code/issues',
|
|
||||||
PACKAGE_URL: '@gitlawb/openclaude',
|
|
||||||
NATIVE_PACKAGE_URL: undefined,
|
|
||||||
}
|
|
||||||
|
|
||||||
import { getSystemPrompt, DEFAULT_AGENT_PROMPT } from './prompts.js'
|
import { getSystemPrompt, DEFAULT_AGENT_PROMPT } from './prompts.js'
|
||||||
import { CLI_SYSPROMPT_PREFIXES, getCLISyspromptPrefix } from './system.js'
|
import { CLI_SYSPROMPT_PREFIXES, getCLISyspromptPrefix } from './system.js'
|
||||||
import { CLAUDE_CODE_GUIDE_AGENT } from '../tools/AgentTool/built-in/claudeCodeGuideAgent.js'
|
import { CLAUDE_CODE_GUIDE_AGENT } from '../tools/AgentTool/built-in/claudeCodeGuideAgent.js'
|
||||||
|
|||||||
64
src/context.repoMap.test.ts
Normal file
64
src/context.repoMap.test.ts
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
import { afterEach, describe, expect, test } from 'bun:test'
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
delete process.env.REPO_MAP
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('getRepoMapContext', () => {
|
||||||
|
test('returns null when REPO_MAP env flag is off (default)', async () => {
|
||||||
|
const { getRepoMapContext } = await import('./context.js')
|
||||||
|
const result = await getRepoMapContext()
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
test('buildRepoMap produces valid output for context injection', async () => {
|
||||||
|
process.env.REPO_MAP = '1'
|
||||||
|
const { mkdtempSync, writeFileSync, rmSync } = await import('fs')
|
||||||
|
const { tmpdir } = await import('os')
|
||||||
|
const { join } = await import('path')
|
||||||
|
const { buildRepoMap } = await import('./context/repoMap/index.js')
|
||||||
|
|
||||||
|
const tempDir = mkdtempSync(join(tmpdir(), 'repomap-ctx-'))
|
||||||
|
try {
|
||||||
|
writeFileSync(
|
||||||
|
join(tempDir, 'main.ts'),
|
||||||
|
'export function main(): void { console.log("hello") }\n',
|
||||||
|
)
|
||||||
|
writeFileSync(
|
||||||
|
join(tempDir, 'utils.ts'),
|
||||||
|
'import { main } from "./main"\nexport function helper(): void { main() }\n',
|
||||||
|
)
|
||||||
|
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root: tempDir,
|
||||||
|
maxTokens: 1024,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Valid map that could be injected
|
||||||
|
expect(result.map.length).toBeGreaterThan(0)
|
||||||
|
expect(result.tokenCount).toBeGreaterThan(0)
|
||||||
|
expect(result.tokenCount).toBeLessThanOrEqual(1024)
|
||||||
|
expect(typeof result.cacheHit).toBe('boolean')
|
||||||
|
} finally {
|
||||||
|
rmSync(tempDir, { recursive: true, force: true })
|
||||||
|
const { invalidateCache } = await import('./context/repoMap/index.js')
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
test('getSystemContext does not include repoMap key when flag is off', async () => {
|
||||||
|
const { getSystemContext } = await import('./context.js')
|
||||||
|
const result = await getSystemContext()
|
||||||
|
expect('repoMap' in result).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('getSystemContext includes repoMap key when REPO_MAP env flag is on', async () => {
|
||||||
|
process.env.REPO_MAP = '1'
|
||||||
|
const { getSystemContext, getRepoMapContext } = await import('./context.js')
|
||||||
|
getRepoMapContext.cache.clear?.()
|
||||||
|
getSystemContext.cache.clear?.()
|
||||||
|
const result = await getSystemContext()
|
||||||
|
expect(typeof result.repoMap).toBe('string')
|
||||||
|
expect(result.repoMap!.length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
})
|
||||||
@@ -31,6 +31,7 @@ export function setSystemPromptInjection(value: string | null): void {
|
|||||||
// Clear context caches immediately when injection changes
|
// Clear context caches immediately when injection changes
|
||||||
getUserContext.cache.clear?.()
|
getUserContext.cache.clear?.()
|
||||||
getSystemContext.cache.clear?.()
|
getSystemContext.cache.clear?.()
|
||||||
|
getRepoMapContext.cache.clear?.()
|
||||||
}
|
}
|
||||||
|
|
||||||
export const getGitStatus = memoize(async (): Promise<string | null> => {
|
export const getGitStatus = memoize(async (): Promise<string | null> => {
|
||||||
@@ -110,6 +111,35 @@ export const getGitStatus = memoize(async (): Promise<string | null> => {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
export const getRepoMapContext = memoize(
|
||||||
|
async (): Promise<string | null> => {
|
||||||
|
const runtimeEnabled = isEnvTruthy(process.env.REPO_MAP)
|
||||||
|
if (!runtimeEnabled) return null
|
||||||
|
if (isBareMode()) return null
|
||||||
|
if (isEnvTruthy(process.env.CLAUDE_CODE_REMOTE)) return null
|
||||||
|
|
||||||
|
try {
|
||||||
|
const startTime = Date.now()
|
||||||
|
logForDiagnosticsNoPII('info', 'repo_map_started')
|
||||||
|
const { buildRepoMap } = await import('./context/repoMap/index.js')
|
||||||
|
const result = await buildRepoMap({ maxTokens: 1024 })
|
||||||
|
logForDiagnosticsNoPII('info', 'repo_map_completed', {
|
||||||
|
duration_ms: Date.now() - startTime,
|
||||||
|
token_count: result.tokenCount,
|
||||||
|
file_count: result.fileCount,
|
||||||
|
cache_hit: result.cacheHit,
|
||||||
|
})
|
||||||
|
if (!result.map || result.map.length === 0) return null
|
||||||
|
return `This is a structural map of the repository, ranked by importance. Use it to understand the codebase architecture.\n\n${result.map}`
|
||||||
|
} catch (err) {
|
||||||
|
logForDiagnosticsNoPII('warn', 'repo_map_failed', {
|
||||||
|
error: String(err),
|
||||||
|
})
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This context is prepended to each conversation, and cached for the duration of the conversation.
|
* This context is prepended to each conversation, and cached for the duration of the conversation.
|
||||||
*/
|
*/
|
||||||
@@ -127,6 +157,8 @@ export const getSystemContext = memoize(
|
|||||||
? null
|
? null
|
||||||
: await getGitStatus()
|
: await getGitStatus()
|
||||||
|
|
||||||
|
const repoMap = await getRepoMapContext()
|
||||||
|
|
||||||
// Include system prompt injection if set (for cache breaking, internal-only)
|
// Include system prompt injection if set (for cache breaking, internal-only)
|
||||||
const injection = feature('BREAK_CACHE_COMMAND')
|
const injection = feature('BREAK_CACHE_COMMAND')
|
||||||
? getSystemPromptInjection()
|
? getSystemPromptInjection()
|
||||||
@@ -135,11 +167,13 @@ export const getSystemContext = memoize(
|
|||||||
logForDiagnosticsNoPII('info', 'system_context_completed', {
|
logForDiagnosticsNoPII('info', 'system_context_completed', {
|
||||||
duration_ms: Date.now() - startTime,
|
duration_ms: Date.now() - startTime,
|
||||||
has_git_status: gitStatus !== null,
|
has_git_status: gitStatus !== null,
|
||||||
|
has_repo_map: repoMap !== null,
|
||||||
has_injection: injection !== null,
|
has_injection: injection !== null,
|
||||||
})
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
...(gitStatus && { gitStatus }),
|
...(gitStatus && { gitStatus }),
|
||||||
|
...(repoMap && { repoMap }),
|
||||||
...(feature('BREAK_CACHE_COMMAND') && injection
|
...(feature('BREAK_CACHE_COMMAND') && injection
|
||||||
? {
|
? {
|
||||||
cacheBreaker: `[CACHE_BREAKER: ${injection}]`,
|
cacheBreaker: `[CACHE_BREAKER: ${injection}]`,
|
||||||
|
|||||||
29
src/context/repoMap/__fixtures__/mini-repo/fileA.ts
Normal file
29
src/context/repoMap/__fixtures__/mini-repo/fileA.ts
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
// fileA — imports from fileB and fileC
|
||||||
|
|
||||||
|
import { CacheLayer, buildCache } from './fileB'
|
||||||
|
import { createStore, type StoreConfig } from './fileC'
|
||||||
|
|
||||||
|
export class AppController {
|
||||||
|
private cache: CacheLayer
|
||||||
|
private config: StoreConfig
|
||||||
|
|
||||||
|
constructor(config: StoreConfig) {
|
||||||
|
this.cache = buildCache()
|
||||||
|
this.config = config
|
||||||
|
}
|
||||||
|
|
||||||
|
initialize(): void {
|
||||||
|
const store = createStore()
|
||||||
|
this.cache.cacheSet('primary', store)
|
||||||
|
}
|
||||||
|
|
||||||
|
getFromCache(key: string): unknown {
|
||||||
|
return this.cache.cacheGet(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function startApp(config: StoreConfig): AppController {
|
||||||
|
const app = new AppController(config)
|
||||||
|
app.initialize()
|
||||||
|
return app
|
||||||
|
}
|
||||||
23
src/context/repoMap/__fixtures__/mini-repo/fileB.ts
Normal file
23
src/context/repoMap/__fixtures__/mini-repo/fileB.ts
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
// fileB — imports from fileC
|
||||||
|
|
||||||
|
import { DataStore, createStore } from './fileC'
|
||||||
|
|
||||||
|
export class CacheLayer {
|
||||||
|
private store: DataStore
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
this.store = createStore()
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheGet(key: string): unknown | undefined {
|
||||||
|
return this.store.lookup(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheSet(key: string, value: unknown): void {
|
||||||
|
this.store.add(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function buildCache(): CacheLayer {
|
||||||
|
return new CacheLayer()
|
||||||
|
}
|
||||||
22
src/context/repoMap/__fixtures__/mini-repo/fileC.ts
Normal file
22
src/context/repoMap/__fixtures__/mini-repo/fileC.ts
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
// fileC — the most imported module (imported by fileA and fileB)
|
||||||
|
|
||||||
|
export class DataStore {
|
||||||
|
private items: Map<string, unknown> = new Map()
|
||||||
|
|
||||||
|
add(key: string, value: unknown): void {
|
||||||
|
this.items.set(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup(key: string): unknown | undefined {
|
||||||
|
return this.items.get(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createStore(): DataStore {
|
||||||
|
return new DataStore()
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface StoreConfig {
|
||||||
|
maxSize: number
|
||||||
|
ttl: number
|
||||||
|
}
|
||||||
9
src/context/repoMap/__fixtures__/mini-repo/fileD.ts
Normal file
9
src/context/repoMap/__fixtures__/mini-repo/fileD.ts
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
// fileD — imports from fileA
|
||||||
|
|
||||||
|
import { AppController, startApp } from './fileA'
|
||||||
|
|
||||||
|
export function runApp(): void {
|
||||||
|
const controller: AppController = startApp({ maxSize: 100, ttl: 3600 })
|
||||||
|
const result = controller.getFromCache('test')
|
||||||
|
console.log(result)
|
||||||
|
}
|
||||||
25
src/context/repoMap/__fixtures__/mini-repo/fileE.ts
Normal file
25
src/context/repoMap/__fixtures__/mini-repo/fileE.ts
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
// fileE — isolated, no imports from other fixture files
|
||||||
|
|
||||||
|
export interface Logger {
|
||||||
|
log(message: string): void
|
||||||
|
warn(message: string): void
|
||||||
|
error(message: string): void
|
||||||
|
}
|
||||||
|
|
||||||
|
export class ConsoleLogger implements Logger {
|
||||||
|
log(message: string): void {
|
||||||
|
console.log(`[LOG] ${message}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
warn(message: string): void {
|
||||||
|
console.warn(`[WARN] ${message}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
error(message: string): void {
|
||||||
|
console.error(`[ERROR] ${message}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createLogger(): Logger {
|
||||||
|
return new ConsoleLogger()
|
||||||
|
}
|
||||||
139
src/context/repoMap/cache.ts
Normal file
139
src/context/repoMap/cache.ts
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
import { createHash } from 'crypto'
|
||||||
|
import {
|
||||||
|
existsSync,
|
||||||
|
mkdirSync,
|
||||||
|
readFileSync,
|
||||||
|
statSync,
|
||||||
|
writeFileSync,
|
||||||
|
} from 'fs'
|
||||||
|
import { homedir } from 'os'
|
||||||
|
import { join } from 'path'
|
||||||
|
import type { CacheData, CacheEntry, CacheStats, Tag } from './types.js'
|
||||||
|
|
||||||
|
const CACHE_VERSION = 1
|
||||||
|
const CACHE_DIR = join(homedir(), '.openclaude', 'repomap-cache')
|
||||||
|
|
||||||
|
function getCacheFilePath(root: string): string {
|
||||||
|
const hash = createHash('sha1').update(root).digest('hex')
|
||||||
|
return join(CACHE_DIR, `${hash}.json`)
|
||||||
|
}
|
||||||
|
|
||||||
|
function ensureCacheDir(): void {
|
||||||
|
if (!existsSync(CACHE_DIR)) {
|
||||||
|
mkdirSync(CACHE_DIR, { recursive: true })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Load cache from disk. Returns empty cache if not found or invalid. */
|
||||||
|
export function loadCache(root: string): CacheData {
|
||||||
|
const path = getCacheFilePath(root)
|
||||||
|
try {
|
||||||
|
const raw = readFileSync(path, 'utf-8')
|
||||||
|
const data = JSON.parse(raw) as CacheData
|
||||||
|
if (data.version !== CACHE_VERSION) {
|
||||||
|
return { version: CACHE_VERSION, entries: {} }
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
} catch {
|
||||||
|
return { version: CACHE_VERSION, entries: {} }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Save cache to disk. */
|
||||||
|
export function saveCache(root: string, cache: CacheData): void {
|
||||||
|
ensureCacheDir()
|
||||||
|
const path = getCacheFilePath(root)
|
||||||
|
writeFileSync(path, JSON.stringify(cache), 'utf-8')
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a file's cached entry is still valid based on mtime and size.
|
||||||
|
* Returns the cached tags if valid, null otherwise.
|
||||||
|
*/
|
||||||
|
export function getCachedTags(
|
||||||
|
cache: CacheData,
|
||||||
|
filePath: string,
|
||||||
|
root: string,
|
||||||
|
): Tag[] | null {
|
||||||
|
const entry = cache.entries[filePath]
|
||||||
|
if (!entry) return null
|
||||||
|
|
||||||
|
try {
|
||||||
|
const absolutePath = join(root, filePath)
|
||||||
|
const stat = statSync(absolutePath)
|
||||||
|
if (stat.mtimeMs === entry.mtimeMs && stat.size === entry.size) {
|
||||||
|
return entry.tags
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// File may have been deleted
|
||||||
|
}
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Update the cache entry for a file. */
|
||||||
|
export function setCachedTags(
|
||||||
|
cache: CacheData,
|
||||||
|
filePath: string,
|
||||||
|
root: string,
|
||||||
|
tags: Tag[],
|
||||||
|
): void {
|
||||||
|
try {
|
||||||
|
const absolutePath = join(root, filePath)
|
||||||
|
const stat = statSync(absolutePath)
|
||||||
|
cache.entries[filePath] = {
|
||||||
|
tags,
|
||||||
|
mtimeMs: stat.mtimeMs,
|
||||||
|
size: stat.size,
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// If we can't stat, don't cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compute a hash of the inputs that affect the rendered map.
|
||||||
|
* Used to cache the final rendered output.
|
||||||
|
*/
|
||||||
|
export function computeMapHash(
|
||||||
|
files: string[],
|
||||||
|
maxTokens: number,
|
||||||
|
focusFiles: string[],
|
||||||
|
): string {
|
||||||
|
const sorted = [...files].sort()
|
||||||
|
const input = JSON.stringify({ files: sorted, maxTokens, focusFiles: [...focusFiles].sort() })
|
||||||
|
return createHash('sha1').update(input).digest('hex')
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Get cache statistics. */
|
||||||
|
export function getCacheStats(root: string): CacheStats {
|
||||||
|
const cacheFile = getCacheFilePath(root)
|
||||||
|
const exists = existsSync(cacheFile)
|
||||||
|
let entryCount = 0
|
||||||
|
|
||||||
|
if (exists) {
|
||||||
|
try {
|
||||||
|
const data = JSON.parse(readFileSync(cacheFile, 'utf-8')) as CacheData
|
||||||
|
entryCount = Object.keys(data.entries).length
|
||||||
|
} catch {
|
||||||
|
// corrupted cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
cacheDir: CACHE_DIR,
|
||||||
|
cacheFile: exists ? cacheFile : null,
|
||||||
|
entryCount,
|
||||||
|
exists,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Delete the cache for a repo root. */
|
||||||
|
export function invalidateCache(root: string): void {
|
||||||
|
const path = getCacheFilePath(root)
|
||||||
|
try {
|
||||||
|
const { unlinkSync } = require('fs')
|
||||||
|
unlinkSync(path)
|
||||||
|
} catch {
|
||||||
|
// File may not exist
|
||||||
|
}
|
||||||
|
}
|
||||||
109
src/context/repoMap/gitFiles.ts
Normal file
109
src/context/repoMap/gitFiles.ts
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
import { execFile } from 'child_process'
|
||||||
|
import { readdirSync } from 'fs'
|
||||||
|
import { join, relative } from 'path'
|
||||||
|
import type { SupportedLanguage } from './types.js'
|
||||||
|
|
||||||
|
const SUPPORTED_EXTENSIONS: Record<string, SupportedLanguage> = {
|
||||||
|
'.ts': 'typescript',
|
||||||
|
'.tsx': 'typescript',
|
||||||
|
'.js': 'javascript',
|
||||||
|
'.jsx': 'javascript',
|
||||||
|
'.mjs': 'javascript',
|
||||||
|
'.cjs': 'javascript',
|
||||||
|
'.py': 'python',
|
||||||
|
}
|
||||||
|
|
||||||
|
const EXCLUDED_DIRS = new Set([
|
||||||
|
'node_modules',
|
||||||
|
'dist',
|
||||||
|
'.git',
|
||||||
|
'.hg',
|
||||||
|
'.svn',
|
||||||
|
'build',
|
||||||
|
'out',
|
||||||
|
'coverage',
|
||||||
|
'__pycache__',
|
||||||
|
'.next',
|
||||||
|
'.nuxt',
|
||||||
|
'vendor',
|
||||||
|
'.worktrees',
|
||||||
|
])
|
||||||
|
|
||||||
|
const EXCLUDED_FILES = new Set([
|
||||||
|
'bun.lock',
|
||||||
|
'bun.lockb',
|
||||||
|
'package-lock.json',
|
||||||
|
'yarn.lock',
|
||||||
|
'pnpm-lock.yaml',
|
||||||
|
])
|
||||||
|
|
||||||
|
export function getLanguageForFile(filePath: string): SupportedLanguage | null {
|
||||||
|
const ext = filePath.substring(filePath.lastIndexOf('.'))
|
||||||
|
return SUPPORTED_EXTENSIONS[ext] ?? null
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isSupportedFile(filePath: string): boolean {
|
||||||
|
return getLanguageForFile(filePath) !== null
|
||||||
|
}
|
||||||
|
|
||||||
|
/** List files using git ls-files. Returns relative paths. */
|
||||||
|
function gitLsFiles(root: string): Promise<string[]> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
execFile(
|
||||||
|
'git',
|
||||||
|
['ls-files', '--cached', '--others', '--exclude-standard'],
|
||||||
|
{ cwd: root, maxBuffer: 10 * 1024 * 1024 },
|
||||||
|
(error, stdout) => {
|
||||||
|
if (error) {
|
||||||
|
reject(error)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const files = stdout
|
||||||
|
.split('\n')
|
||||||
|
.map(f => f.trim())
|
||||||
|
.filter(f => f.length > 0)
|
||||||
|
resolve(files)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Walk directory tree manually as fallback when git is unavailable. */
|
||||||
|
function walkDirectory(root: string, currentDir: string = root): string[] {
|
||||||
|
const results: string[] = []
|
||||||
|
let entries: ReturnType<typeof readdirSync>
|
||||||
|
try {
|
||||||
|
entries = readdirSync(currentDir, { withFileTypes: true })
|
||||||
|
} catch {
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
const name = entry.name
|
||||||
|
if (entry.isDirectory()) {
|
||||||
|
if (!EXCLUDED_DIRS.has(name) && !name.startsWith('.')) {
|
||||||
|
results.push(...walkDirectory(root, join(currentDir, name)))
|
||||||
|
}
|
||||||
|
} else if (entry.isFile()) {
|
||||||
|
if (!EXCLUDED_FILES.has(name)) {
|
||||||
|
results.push(relative(root, join(currentDir, name)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enumerate all supported source files in the repo.
|
||||||
|
* Tries git ls-files first, falls back to manual walk.
|
||||||
|
*/
|
||||||
|
export async function getRepoFiles(root: string): Promise<string[]> {
|
||||||
|
let files: string[]
|
||||||
|
try {
|
||||||
|
files = await gitLsFiles(root)
|
||||||
|
} catch {
|
||||||
|
files = walkDirectory(root)
|
||||||
|
}
|
||||||
|
|
||||||
|
return files.filter(isSupportedFile)
|
||||||
|
}
|
||||||
88
src/context/repoMap/graph.ts
Normal file
88
src/context/repoMap/graph.ts
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
import Graph from 'graphology'
|
||||||
|
import type { FileTags } from './types.js'
|
||||||
|
|
||||||
|
// Common identifiers that should contribute less weight (high IDF penalty).
|
||||||
|
const COMMON_NAMES = new Set([
|
||||||
|
'map', 'get', 'set', 'value', 'key', 'data', 'result', 'error',
|
||||||
|
'name', 'type', 'id', 'index', 'item', 'items', 'list', 'options',
|
||||||
|
'config', 'args', 'params', 'props', 'state', 'event', 'callback',
|
||||||
|
'handler', 'fn', 'func', 'self', 'this', 'ctx', 'context', 'req',
|
||||||
|
'res', 'next', 'err', 'msg', 'obj', 'arr', 'str', 'num', 'val',
|
||||||
|
'init', 'start', 'stop', 'run', 'main', 'test', 'setup', 'teardown',
|
||||||
|
'constructor', 'toString', 'valueOf', 'length', 'size', 'count',
|
||||||
|
'push', 'pop', 'shift', 'filter', 'reduce', 'forEach', 'find',
|
||||||
|
'log', 'warn', 'info', 'debug', 'trace',
|
||||||
|
])
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build a directed graph from file tags.
|
||||||
|
*
|
||||||
|
* Nodes are file paths. An edge from A to B means file A references
|
||||||
|
* a symbol defined in file B. Edge weight = refCount * idf(symbolName).
|
||||||
|
*/
|
||||||
|
export function buildGraph(allFileTags: FileTags[]): Graph {
|
||||||
|
const graph = new Graph({ multi: false, type: 'directed' })
|
||||||
|
|
||||||
|
// Build a map from symbol name → files that define it
|
||||||
|
const defIndex = new Map<string, Set<string>>()
|
||||||
|
for (const ft of allFileTags) {
|
||||||
|
for (const tag of ft.tags) {
|
||||||
|
if (tag.kind === 'def') {
|
||||||
|
let files = defIndex.get(tag.name)
|
||||||
|
if (!files) {
|
||||||
|
files = new Set()
|
||||||
|
defIndex.set(tag.name, files)
|
||||||
|
}
|
||||||
|
files.add(ft.path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute IDF: log(totalFiles / filesDefiningSymbol)
|
||||||
|
// Common names get an extra penalty
|
||||||
|
const totalFiles = allFileTags.length
|
||||||
|
function idf(symbolName: string): number {
|
||||||
|
const defFiles = defIndex.get(symbolName)
|
||||||
|
const docFreq = defFiles ? defFiles.size : 1
|
||||||
|
const rawIdf = Math.log(totalFiles / docFreq)
|
||||||
|
return COMMON_NAMES.has(symbolName) ? rawIdf * 0.1 : rawIdf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add all files as nodes
|
||||||
|
for (const ft of allFileTags) {
|
||||||
|
if (!graph.hasNode(ft.path)) {
|
||||||
|
graph.addNode(ft.path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build edges: for each ref in a file, find where it's defined
|
||||||
|
for (const ft of allFileTags) {
|
||||||
|
// Count refs per target file
|
||||||
|
const edgeWeights = new Map<string, number>()
|
||||||
|
|
||||||
|
for (const tag of ft.tags) {
|
||||||
|
if (tag.kind !== 'ref') continue
|
||||||
|
|
||||||
|
const defFiles = defIndex.get(tag.name)
|
||||||
|
if (!defFiles) continue
|
||||||
|
|
||||||
|
const weight = idf(tag.name)
|
||||||
|
for (const defFile of defFiles) {
|
||||||
|
if (defFile === ft.path) continue // skip self-references
|
||||||
|
const current = edgeWeights.get(defFile) ?? 0
|
||||||
|
edgeWeights.set(defFile, current + weight)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const [target, weight] of edgeWeights) {
|
||||||
|
if (graph.hasEdge(ft.path, target)) {
|
||||||
|
graph.setEdgeAttribute(ft.path, target, 'weight',
|
||||||
|
graph.getEdgeAttribute(ft.path, target, 'weight') + weight)
|
||||||
|
} else {
|
||||||
|
graph.addEdge(ft.path, target, { weight })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return graph
|
||||||
|
}
|
||||||
144
src/context/repoMap/index.ts
Normal file
144
src/context/repoMap/index.ts
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
import {
|
||||||
|
computeMapHash,
|
||||||
|
getCachedTags,
|
||||||
|
getCacheStats as getCacheStatsImpl,
|
||||||
|
invalidateCache as invalidateCacheImpl,
|
||||||
|
loadCache,
|
||||||
|
saveCache,
|
||||||
|
setCachedTags,
|
||||||
|
} from './cache.js'
|
||||||
|
import { getRepoFiles } from './gitFiles.js'
|
||||||
|
import { buildGraph } from './graph.js'
|
||||||
|
import { rankFiles } from './pagerank.js'
|
||||||
|
import { initParser } from './parser.js'
|
||||||
|
import { renderMap } from './renderer.js'
|
||||||
|
import { extractTags } from './symbolExtractor.js'
|
||||||
|
import type { FileTags, RepoMapOptions, RepoMapResult, CacheStats } from './types.js'
|
||||||
|
|
||||||
|
const DEFAULT_MAX_TOKENS = 2048
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build a structural summary of a code repository.
|
||||||
|
*
|
||||||
|
* Walks the repo, extracts symbols via tree-sitter, builds an IDF-weighted
|
||||||
|
* reference graph, ranks files with PageRank, and renders a token-budgeted
|
||||||
|
* structural summary.
|
||||||
|
*/
|
||||||
|
export async function buildRepoMap(options: RepoMapOptions = {}): Promise<RepoMapResult> {
|
||||||
|
const startTime = Date.now()
|
||||||
|
const root = options.root ?? process.cwd()
|
||||||
|
const maxTokens = options.maxTokens ?? DEFAULT_MAX_TOKENS
|
||||||
|
const focusFiles = options.focusFiles ?? []
|
||||||
|
|
||||||
|
// Initialize tree-sitter
|
||||||
|
await initParser()
|
||||||
|
|
||||||
|
// Get files
|
||||||
|
const files = options.files ?? await getRepoFiles(root)
|
||||||
|
const totalFileCount = files.length
|
||||||
|
|
||||||
|
// Check if we have a cached rendered map
|
||||||
|
const mapHash = computeMapHash(files, maxTokens, focusFiles)
|
||||||
|
const cache = loadCache(root)
|
||||||
|
|
||||||
|
// Check if rendered map is cached (stored as a special entry)
|
||||||
|
const renderedCacheKey = `__rendered__${mapHash}`
|
||||||
|
const renderedEntry = cache.entries[renderedCacheKey]
|
||||||
|
if (renderedEntry && renderedEntry.tags.length === 1) {
|
||||||
|
const cachedResult = renderedEntry.tags[0]!
|
||||||
|
// The cached "tag" stores the rendered map in the signature field
|
||||||
|
// and metadata in name/line fields
|
||||||
|
try {
|
||||||
|
const meta = JSON.parse(cachedResult.name)
|
||||||
|
return {
|
||||||
|
map: cachedResult.signature,
|
||||||
|
cacheHit: true,
|
||||||
|
buildTimeMs: Date.now() - startTime,
|
||||||
|
fileCount: meta.fileCount ?? 0,
|
||||||
|
totalFileCount,
|
||||||
|
tokenCount: meta.tokenCount ?? 0,
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Invalid cached data, continue with full build
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract tags for all files (using per-file cache).
|
||||||
|
// Separate cached hits from files needing extraction.
|
||||||
|
const allFileTags: FileTags[] = []
|
||||||
|
const uncachedFiles: string[] = []
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
const cachedTags = getCachedTags(cache, file, root)
|
||||||
|
if (cachedTags) {
|
||||||
|
allFileTags.push({ path: file, tags: cachedTags })
|
||||||
|
} else {
|
||||||
|
uncachedFiles.push(file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process uncached files in parallel batches
|
||||||
|
const BATCH_SIZE = 50
|
||||||
|
for (let i = 0; i < uncachedFiles.length; i += BATCH_SIZE) {
|
||||||
|
const batch = uncachedFiles.slice(i, i + BATCH_SIZE)
|
||||||
|
const results = await Promise.all(
|
||||||
|
batch.map(file => extractTags(file, root).catch(() => null))
|
||||||
|
)
|
||||||
|
for (let j = 0; j < results.length; j++) {
|
||||||
|
const fileTags = results[j]
|
||||||
|
if (fileTags) {
|
||||||
|
allFileTags.push(fileTags)
|
||||||
|
setCachedTags(cache, fileTags.path, root, fileTags.tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build graph and rank
|
||||||
|
const graph = buildGraph(allFileTags)
|
||||||
|
const ranked = rankFiles(graph, focusFiles)
|
||||||
|
|
||||||
|
// Build a lookup map
|
||||||
|
const fileTagsMap = new Map<string, FileTags>()
|
||||||
|
for (const ft of allFileTags) {
|
||||||
|
fileTagsMap.set(ft.path, ft)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render
|
||||||
|
const { map, tokenCount, fileCount } = renderMap(ranked, fileTagsMap, maxTokens)
|
||||||
|
|
||||||
|
// Cache the rendered result
|
||||||
|
cache.entries[renderedCacheKey] = {
|
||||||
|
tags: [{
|
||||||
|
kind: 'def',
|
||||||
|
name: JSON.stringify({ fileCount, tokenCount }),
|
||||||
|
line: 0,
|
||||||
|
signature: map,
|
||||||
|
}],
|
||||||
|
mtimeMs: Date.now(),
|
||||||
|
size: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
saveCache(root, cache)
|
||||||
|
|
||||||
|
return {
|
||||||
|
map,
|
||||||
|
cacheHit: false,
|
||||||
|
buildTimeMs: Date.now() - startTime,
|
||||||
|
fileCount,
|
||||||
|
totalFileCount,
|
||||||
|
tokenCount,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Invalidate the disk cache for a given repo root. */
|
||||||
|
export function invalidateCache(root?: string): void {
|
||||||
|
invalidateCacheImpl(root ?? process.cwd())
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Get cache statistics for a given repo root. */
|
||||||
|
export function getCacheStats(root?: string): CacheStats {
|
||||||
|
return getCacheStatsImpl(root ?? process.cwd())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-export types for convenience
|
||||||
|
export type { RepoMapOptions, RepoMapResult, CacheStats } from './types.js'
|
||||||
57
src/context/repoMap/pagerank.ts
Normal file
57
src/context/repoMap/pagerank.ts
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
import type Graph from 'graphology'
|
||||||
|
import pagerank from 'graphology-pagerank'
|
||||||
|
|
||||||
|
export interface RankedFile {
|
||||||
|
path: string
|
||||||
|
score: number
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run PageRank on the file reference graph.
|
||||||
|
*
|
||||||
|
* focusFiles get a 100x boost in the personalization vector so they
|
||||||
|
* and their neighbors rank higher.
|
||||||
|
*
|
||||||
|
* Returns files sorted by score descending.
|
||||||
|
*/
|
||||||
|
export function rankFiles(
|
||||||
|
graph: Graph,
|
||||||
|
focusFiles: string[] = [],
|
||||||
|
): RankedFile[] {
|
||||||
|
if (graph.order === 0) return []
|
||||||
|
|
||||||
|
const hasPersonalization = focusFiles.length > 0
|
||||||
|
|
||||||
|
// graphology-pagerank accepts getEdgeWeight option
|
||||||
|
const scores: Record<string, number> = pagerank(graph, {
|
||||||
|
alpha: 0.85,
|
||||||
|
maxIterations: 100,
|
||||||
|
tolerance: 1e-6,
|
||||||
|
getEdgeWeight: 'weight',
|
||||||
|
})
|
||||||
|
|
||||||
|
// Apply focus boost post-hoc if focus files are specified
|
||||||
|
if (hasPersonalization) {
|
||||||
|
for (const file of focusFiles) {
|
||||||
|
if (scores[file] !== undefined) {
|
||||||
|
scores[file] *= 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also boost direct neighbors of focus files
|
||||||
|
for (const file of focusFiles) {
|
||||||
|
if (!graph.hasNode(file)) continue
|
||||||
|
graph.forEachNeighbor(file, (neighbor) => {
|
||||||
|
if (scores[neighbor] !== undefined) {
|
||||||
|
scores[neighbor] *= 10
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const ranked: RankedFile[] = Object.entries(scores)
|
||||||
|
.map(([path, score]) => ({ path, score }))
|
||||||
|
.sort((a, b) => b.score - a.score)
|
||||||
|
|
||||||
|
return ranked
|
||||||
|
}
|
||||||
166
src/context/repoMap/parser.ts
Normal file
166
src/context/repoMap/parser.ts
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
import { existsSync, readFileSync } from 'fs'
|
||||||
|
import { join, resolve } from 'path'
|
||||||
|
import { fileURLToPath } from 'url'
|
||||||
|
import type { SupportedLanguage } from './types.js'
|
||||||
|
|
||||||
|
// Resolve project root in both source and bundled modes.
|
||||||
|
// In source (bun test/dev): import.meta.url is src/context/repoMap/parser.ts → go up 4 levels
|
||||||
|
// In bundle (node dist/cli.mjs): import.meta.url is dist/cli.mjs → go up 2 levels
|
||||||
|
const __filename = fileURLToPath(import.meta.url)
|
||||||
|
const __projectRoot = join(
|
||||||
|
__filename,
|
||||||
|
process.env.NODE_ENV === 'test' ? '../../../../' : '../../',
|
||||||
|
)
|
||||||
|
|
||||||
|
// web-tree-sitter types
|
||||||
|
type TreeSitterParser = {
|
||||||
|
parse(input: string): { rootNode: unknown }
|
||||||
|
setLanguage(lang: unknown): void
|
||||||
|
delete(): void
|
||||||
|
}
|
||||||
|
|
||||||
|
type TreeSitterLanguage = {
|
||||||
|
query(source: string): unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
// The actual module exports { Parser, Language } as named exports
|
||||||
|
let ParserClass: (new () => TreeSitterParser) & {
|
||||||
|
init(opts?: { locateFile?: (file: string) => string }): Promise<void>
|
||||||
|
} | null = null
|
||||||
|
let LanguageLoader: {
|
||||||
|
load(path: string | Uint8Array): Promise<TreeSitterLanguage>
|
||||||
|
} | null = null
|
||||||
|
|
||||||
|
let initialized = false
|
||||||
|
const languageCache = new Map<SupportedLanguage, TreeSitterLanguage>()
|
||||||
|
const queryCache = new Map<SupportedLanguage, string>()
|
||||||
|
|
||||||
|
/** Resolve the path to the tree-sitter WASM file. */
|
||||||
|
function getTreeSitterWasmPath(): string {
|
||||||
|
// Try require.resolve first (works in source mode with node_modules)
|
||||||
|
try {
|
||||||
|
const webTsDir = resolve(
|
||||||
|
require.resolve('web-tree-sitter/package.json'),
|
||||||
|
'..',
|
||||||
|
)
|
||||||
|
return join(webTsDir, 'tree-sitter.wasm')
|
||||||
|
} catch {
|
||||||
|
// Fallback: relative to project root
|
||||||
|
return join(__projectRoot, 'node_modules', 'web-tree-sitter', 'tree-sitter.wasm')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Resolve the path to a language WASM grammar file. */
|
||||||
|
function getLanguageWasmPath(language: SupportedLanguage): string {
|
||||||
|
const wasmName = language === 'typescript' ? 'tree-sitter-typescript' :
|
||||||
|
language === 'javascript' ? 'tree-sitter-javascript' :
|
||||||
|
`tree-sitter-${language}`
|
||||||
|
|
||||||
|
try {
|
||||||
|
const wasmDir = resolve(
|
||||||
|
require.resolve('tree-sitter-wasms/package.json'),
|
||||||
|
'..',
|
||||||
|
'out',
|
||||||
|
)
|
||||||
|
return join(wasmDir, `${wasmName}.wasm`)
|
||||||
|
} catch {
|
||||||
|
return join(__projectRoot, 'node_modules', 'tree-sitter-wasms', 'out', `${wasmName}.wasm`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Resolve the path to a tag query .scm file for the given language. */
|
||||||
|
function getQueryPath(language: SupportedLanguage): string {
|
||||||
|
// Try source location first (works in both source and when queries are alongside the bundle)
|
||||||
|
const sourcePath = join(__projectRoot, 'src', 'context', 'repoMap', 'queries', `${language}-tags.scm`)
|
||||||
|
if (existsSync(sourcePath)) {
|
||||||
|
return sourcePath
|
||||||
|
}
|
||||||
|
// Fallback: relative to this file (source mode)
|
||||||
|
return join(fileURLToPath(import.meta.url), '..', 'queries', `${language}-tags.scm`)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Initialize the tree-sitter WASM module. */
|
||||||
|
export async function initParser(): Promise<void> {
|
||||||
|
if (initialized) return
|
||||||
|
|
||||||
|
try {
|
||||||
|
const mod = await import('web-tree-sitter')
|
||||||
|
ParserClass = mod.Parser as typeof ParserClass
|
||||||
|
LanguageLoader = mod.Language as typeof LanguageLoader
|
||||||
|
|
||||||
|
const wasmPath = getTreeSitterWasmPath()
|
||||||
|
await ParserClass!.init({
|
||||||
|
locateFile: () => wasmPath,
|
||||||
|
})
|
||||||
|
initialized = true
|
||||||
|
} catch (err) {
|
||||||
|
// eslint-disable-next-line no-console
|
||||||
|
console.error('[repoMap] Failed to initialize tree-sitter:', err)
|
||||||
|
throw err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Load a language grammar. Cached after first load. */
|
||||||
|
export async function loadLanguage(language: SupportedLanguage): Promise<TreeSitterLanguage | null> {
|
||||||
|
if (languageCache.has(language)) {
|
||||||
|
return languageCache.get(language)!
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!initialized) {
|
||||||
|
await initParser()
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const wasmPath = getLanguageWasmPath(language)
|
||||||
|
const lang = await LanguageLoader!.load(wasmPath)
|
||||||
|
languageCache.set(language, lang)
|
||||||
|
return lang
|
||||||
|
} catch (err) {
|
||||||
|
// eslint-disable-next-line no-console
|
||||||
|
console.error(`[repoMap] Failed to load ${language} grammar:`, err)
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Load the tag query for a language. Cached after first load. */
|
||||||
|
export function loadQuery(language: SupportedLanguage): string | null {
|
||||||
|
if (queryCache.has(language)) {
|
||||||
|
return queryCache.get(language)!
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const queryPath = getQueryPath(language)
|
||||||
|
const content = readFileSync(queryPath, 'utf-8')
|
||||||
|
queryCache.set(language, content)
|
||||||
|
return content
|
||||||
|
} catch {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Create a new parser instance with the given language set. */
|
||||||
|
export async function createParser(language: SupportedLanguage): Promise<TreeSitterParser | null> {
|
||||||
|
if (!initialized) {
|
||||||
|
await initParser()
|
||||||
|
}
|
||||||
|
|
||||||
|
const lang = await loadLanguage(language)
|
||||||
|
if (!lang) return null
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parser = new ParserClass!()
|
||||||
|
parser.setLanguage(lang)
|
||||||
|
return parser
|
||||||
|
} catch {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Clear all caches (useful for testing). */
|
||||||
|
export function clearParserCaches(): void {
|
||||||
|
languageCache.clear()
|
||||||
|
queryCache.clear()
|
||||||
|
initialized = false
|
||||||
|
ParserClass = null
|
||||||
|
LanguageLoader = null
|
||||||
|
}
|
||||||
92
src/context/repoMap/queries/javascript-tags.scm
Normal file
92
src/context/repoMap/queries/javascript-tags.scm
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
; Source: https://github.com/Aider-AI/aider/blob/main/aider/queries/tree-sitter-languages/javascript-tags.scm
|
||||||
|
; License: MIT (Apache-2.0 dual) — see https://github.com/Aider-AI/aider/blob/main/LICENSE
|
||||||
|
; Copied for use in openclaude's repo-map feature.
|
||||||
|
|
||||||
|
(
|
||||||
|
(comment)* @doc
|
||||||
|
.
|
||||||
|
(method_definition
|
||||||
|
name: (property_identifier) @name.definition.method) @definition.method
|
||||||
|
(#not-eq? @name.definition.method "constructor")
|
||||||
|
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||||
|
(#select-adjacent! @doc @definition.method)
|
||||||
|
)
|
||||||
|
|
||||||
|
(
|
||||||
|
(comment)* @doc
|
||||||
|
.
|
||||||
|
[
|
||||||
|
(class
|
||||||
|
name: (_) @name.definition.class)
|
||||||
|
(class_declaration
|
||||||
|
name: (_) @name.definition.class)
|
||||||
|
] @definition.class
|
||||||
|
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||||
|
(#select-adjacent! @doc @definition.class)
|
||||||
|
)
|
||||||
|
|
||||||
|
(
|
||||||
|
(comment)* @doc
|
||||||
|
.
|
||||||
|
[
|
||||||
|
(function
|
||||||
|
name: (identifier) @name.definition.function)
|
||||||
|
(function_declaration
|
||||||
|
name: (identifier) @name.definition.function)
|
||||||
|
(generator_function
|
||||||
|
name: (identifier) @name.definition.function)
|
||||||
|
(generator_function_declaration
|
||||||
|
name: (identifier) @name.definition.function)
|
||||||
|
] @definition.function
|
||||||
|
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||||
|
(#select-adjacent! @doc @definition.function)
|
||||||
|
)
|
||||||
|
|
||||||
|
(
|
||||||
|
(comment)* @doc
|
||||||
|
.
|
||||||
|
(lexical_declaration
|
||||||
|
(variable_declarator
|
||||||
|
name: (identifier) @name.definition.function
|
||||||
|
value: [(arrow_function) (function)]) @definition.function)
|
||||||
|
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||||
|
(#select-adjacent! @doc @definition.function)
|
||||||
|
)
|
||||||
|
|
||||||
|
(
|
||||||
|
(comment)* @doc
|
||||||
|
.
|
||||||
|
(variable_declaration
|
||||||
|
(variable_declarator
|
||||||
|
name: (identifier) @name.definition.function
|
||||||
|
value: [(arrow_function) (function)]) @definition.function)
|
||||||
|
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||||
|
(#select-adjacent! @doc @definition.function)
|
||||||
|
)
|
||||||
|
|
||||||
|
(assignment_expression
|
||||||
|
left: [
|
||||||
|
(identifier) @name.definition.function
|
||||||
|
(member_expression
|
||||||
|
property: (property_identifier) @name.definition.function)
|
||||||
|
]
|
||||||
|
right: [(arrow_function) (function)]
|
||||||
|
) @definition.function
|
||||||
|
|
||||||
|
(pair
|
||||||
|
key: (property_identifier) @name.definition.function
|
||||||
|
value: [(arrow_function) (function)]) @definition.function
|
||||||
|
|
||||||
|
(
|
||||||
|
(call_expression
|
||||||
|
function: (identifier) @name.reference.call) @reference.call
|
||||||
|
(#not-match? @name.reference.call "^(require)$")
|
||||||
|
)
|
||||||
|
|
||||||
|
(call_expression
|
||||||
|
function: (member_expression
|
||||||
|
property: (property_identifier) @name.reference.call)
|
||||||
|
arguments: (_) @reference.call)
|
||||||
|
|
||||||
|
(new_expression
|
||||||
|
constructor: (_) @name.reference.class) @reference.class
|
||||||
16
src/context/repoMap/queries/python-tags.scm
Normal file
16
src/context/repoMap/queries/python-tags.scm
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
; Source: https://github.com/Aider-AI/aider/blob/main/aider/queries/tree-sitter-languages/python-tags.scm
|
||||||
|
; License: MIT (Apache-2.0 dual) — see https://github.com/Aider-AI/aider/blob/main/LICENSE
|
||||||
|
; Copied for use in openclaude's repo-map feature.
|
||||||
|
|
||||||
|
(class_definition
|
||||||
|
name: (identifier) @name.definition.class) @definition.class
|
||||||
|
|
||||||
|
(function_definition
|
||||||
|
name: (identifier) @name.definition.function) @definition.function
|
||||||
|
|
||||||
|
(call
|
||||||
|
function: [
|
||||||
|
(identifier) @name.reference.call
|
||||||
|
(attribute
|
||||||
|
attribute: (identifier) @name.reference.call)
|
||||||
|
]) @reference.call
|
||||||
45
src/context/repoMap/queries/typescript-tags.scm
Normal file
45
src/context/repoMap/queries/typescript-tags.scm
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
; Source: https://github.com/Aider-AI/aider/blob/main/aider/queries/tree-sitter-languages/typescript-tags.scm
|
||||||
|
; License: MIT (Apache-2.0 dual) — see https://github.com/Aider-AI/aider/blob/main/LICENSE
|
||||||
|
; Copied for use in openclaude's repo-map feature.
|
||||||
|
|
||||||
|
(function_signature
|
||||||
|
name: (identifier) @name.definition.function) @definition.function
|
||||||
|
|
||||||
|
(method_signature
|
||||||
|
name: (property_identifier) @name.definition.method) @definition.method
|
||||||
|
|
||||||
|
(abstract_method_signature
|
||||||
|
name: (property_identifier) @name.definition.method) @definition.method
|
||||||
|
|
||||||
|
(abstract_class_declaration
|
||||||
|
name: (type_identifier) @name.definition.class) @definition.class
|
||||||
|
|
||||||
|
(module
|
||||||
|
name: (identifier) @name.definition.module) @definition.module
|
||||||
|
|
||||||
|
(interface_declaration
|
||||||
|
name: (type_identifier) @name.definition.interface) @definition.interface
|
||||||
|
|
||||||
|
(type_annotation
|
||||||
|
(type_identifier) @name.reference.type) @reference.type
|
||||||
|
|
||||||
|
(new_expression
|
||||||
|
constructor: (identifier) @name.reference.class) @reference.class
|
||||||
|
|
||||||
|
(function_declaration
|
||||||
|
name: (identifier) @name.definition.function) @definition.function
|
||||||
|
|
||||||
|
(method_definition
|
||||||
|
name: (property_identifier) @name.definition.method) @definition.method
|
||||||
|
|
||||||
|
(class_declaration
|
||||||
|
name: (type_identifier) @name.definition.class) @definition.class
|
||||||
|
|
||||||
|
(interface_declaration
|
||||||
|
name: (type_identifier) @name.definition.class) @definition.class
|
||||||
|
|
||||||
|
(type_alias_declaration
|
||||||
|
name: (type_identifier) @name.definition.type) @definition.type
|
||||||
|
|
||||||
|
(enum_declaration
|
||||||
|
name: (identifier) @name.definition.enum) @definition.enum
|
||||||
72
src/context/repoMap/renderer.ts
Normal file
72
src/context/repoMap/renderer.ts
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
import type { FileTags, Tag } from './types.js'
|
||||||
|
import type { RankedFile } from './pagerank.js'
|
||||||
|
import { countTokens } from './tokenize.js'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Render a token-budgeted repo map from ranked files and their tags.
|
||||||
|
*
|
||||||
|
* Format per file:
|
||||||
|
* path/to/file.ts:
|
||||||
|
* ⋮
|
||||||
|
* signature line for def 1
|
||||||
|
* ⋮
|
||||||
|
* signature line for def 2
|
||||||
|
* ⋮
|
||||||
|
*
|
||||||
|
* Files that don't fit within the budget are dropped entirely.
|
||||||
|
*/
|
||||||
|
export function renderMap(
|
||||||
|
rankedFiles: RankedFile[],
|
||||||
|
fileTagsMap: Map<string, FileTags>,
|
||||||
|
maxTokens: number,
|
||||||
|
): { map: string; tokenCount: number; fileCount: number } {
|
||||||
|
const sections: string[] = []
|
||||||
|
let currentTokens = 0
|
||||||
|
let fileCount = 0
|
||||||
|
|
||||||
|
for (const { path } of rankedFiles) {
|
||||||
|
const ft = fileTagsMap.get(path)
|
||||||
|
if (!ft) continue
|
||||||
|
|
||||||
|
// Only include definitions in the rendered output
|
||||||
|
const defs = ft.tags
|
||||||
|
.filter(t => t.kind === 'def')
|
||||||
|
.sort((a, b) => a.line - b.line)
|
||||||
|
|
||||||
|
if (defs.length === 0) continue
|
||||||
|
|
||||||
|
const section = renderFileSection(path, defs)
|
||||||
|
const sectionTokens = countTokens(section)
|
||||||
|
|
||||||
|
// Would this section bust the budget?
|
||||||
|
if (currentTokens + sectionTokens > maxTokens) {
|
||||||
|
// Don't include partial files — drop entirely
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
sections.push(section)
|
||||||
|
currentTokens += sectionTokens
|
||||||
|
fileCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
const map = sections.join('\n')
|
||||||
|
return { map, tokenCount: currentTokens, fileCount }
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderFileSection(path: string, defs: Tag[]): string {
|
||||||
|
const lines: string[] = [`${path}:`]
|
||||||
|
let lastLine = 0
|
||||||
|
|
||||||
|
for (const def of defs) {
|
||||||
|
// Add elision marker if there's a gap
|
||||||
|
if (def.line > lastLine + 1) {
|
||||||
|
lines.push('⋮')
|
||||||
|
}
|
||||||
|
lines.push(` ${def.signature}`)
|
||||||
|
lastLine = def.line
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trailing elision marker
|
||||||
|
lines.push('⋮')
|
||||||
|
return lines.join('\n')
|
||||||
|
}
|
||||||
275
src/context/repoMap/repoMap.test.ts
Normal file
275
src/context/repoMap/repoMap.test.ts
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
import { afterEach, beforeAll, describe, expect, test } from 'bun:test'
|
||||||
|
import { cpSync, mkdtempSync, rmSync, utimesSync, writeFileSync } from 'fs'
|
||||||
|
import { tmpdir } from 'os'
|
||||||
|
import { join } from 'path'
|
||||||
|
import { invalidateCache, buildRepoMap } from './index.js'
|
||||||
|
import { extractTags } from './symbolExtractor.js'
|
||||||
|
import { buildGraph } from './graph.js'
|
||||||
|
import { initParser } from './parser.js'
|
||||||
|
import { countTokens } from './tokenize.js'
|
||||||
|
|
||||||
|
const FIXTURE_ROOT = join(import.meta.dir, '__fixtures__', 'mini-repo')
|
||||||
|
const FIXTURE_FILES = ['fileA.ts', 'fileB.ts', 'fileC.ts', 'fileD.ts', 'fileE.ts']
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
await initParser()
|
||||||
|
})
|
||||||
|
|
||||||
|
// Clean up cache between tests to avoid cross-test interference
|
||||||
|
afterEach(() => {
|
||||||
|
invalidateCache(FIXTURE_ROOT)
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('symbol extraction', () => {
|
||||||
|
test('extracts function and class defs from a TypeScript file', async () => {
|
||||||
|
const result = await extractTags('fileC.ts', FIXTURE_ROOT)
|
||||||
|
expect(result).not.toBeNull()
|
||||||
|
|
||||||
|
const defs = result!.tags.filter(t => t.kind === 'def')
|
||||||
|
const defNames = defs.map(t => t.name)
|
||||||
|
|
||||||
|
expect(defNames).toContain('DataStore')
|
||||||
|
expect(defNames).toContain('createStore')
|
||||||
|
expect(defNames).toContain('StoreConfig')
|
||||||
|
|
||||||
|
// All defs should have kind='def'
|
||||||
|
for (const d of defs) {
|
||||||
|
expect(d.kind).toBe('def')
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
test('extracts references to imported symbols', async () => {
|
||||||
|
const result = await extractTags('fileA.ts', FIXTURE_ROOT)
|
||||||
|
expect(result).not.toBeNull()
|
||||||
|
|
||||||
|
const refs = result!.tags.filter(t => t.kind === 'ref')
|
||||||
|
const refNames = refs.map(t => t.name)
|
||||||
|
|
||||||
|
// fileA imports CacheLayer from fileB and StoreConfig from fileC
|
||||||
|
expect(refNames).toContain('CacheLayer')
|
||||||
|
expect(refNames).toContain('StoreConfig')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('graph', () => {
|
||||||
|
test('builds edges between files that reference each other\'s symbols', async () => {
|
||||||
|
const allTags = []
|
||||||
|
for (const f of FIXTURE_FILES) {
|
||||||
|
const tags = await extractTags(f, FIXTURE_ROOT)
|
||||||
|
if (tags) allTags.push(tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
const graph = buildGraph(allTags)
|
||||||
|
|
||||||
|
// fileA imports from fileB (references CacheLayer defined in fileB)
|
||||||
|
expect(graph.hasEdge('fileA.ts', 'fileB.ts')).toBe(true)
|
||||||
|
|
||||||
|
// fileA imports from fileC (references StoreConfig, DataStore defined in fileC)
|
||||||
|
expect(graph.hasEdge('fileA.ts', 'fileC.ts')).toBe(true)
|
||||||
|
|
||||||
|
// fileB imports from fileC (references DataStore defined in fileC)
|
||||||
|
expect(graph.hasEdge('fileB.ts', 'fileC.ts')).toBe(true)
|
||||||
|
|
||||||
|
// fileD imports from fileA
|
||||||
|
expect(graph.hasEdge('fileD.ts', 'fileA.ts')).toBe(true)
|
||||||
|
|
||||||
|
// fileE is isolated — no edges to/from it
|
||||||
|
expect(graph.degree('fileE.ts')).toBe(0)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('pagerank', () => {
|
||||||
|
test('ranks the most-imported file highest', async () => {
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root: FIXTURE_ROOT,
|
||||||
|
maxTokens: 2048,
|
||||||
|
files: FIXTURE_FILES,
|
||||||
|
})
|
||||||
|
|
||||||
|
// The map starts with the highest-ranked file
|
||||||
|
const firstFile = result.map.split('\n')[0]
|
||||||
|
expect(firstFile).toBe('fileC.ts:')
|
||||||
|
|
||||||
|
// fileE should be ranked lowest (or near last)
|
||||||
|
const lines = result.map.split('\n')
|
||||||
|
const filePositions = FIXTURE_FILES.map(f => {
|
||||||
|
const idx = lines.findIndex(l => l === `${f}:`)
|
||||||
|
return { file: f, position: idx }
|
||||||
|
}).filter(x => x.position >= 0)
|
||||||
|
.sort((a, b) => a.position - b.position)
|
||||||
|
|
||||||
|
// fileC should be first
|
||||||
|
expect(filePositions[0]!.file).toBe('fileC.ts')
|
||||||
|
|
||||||
|
// fileE should be last (or among the last)
|
||||||
|
const lastFile = filePositions[filePositions.length - 1]!.file
|
||||||
|
expect(['fileD.ts', 'fileE.ts']).toContain(lastFile)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('renderer', () => {
|
||||||
|
test('respects the token budget within 5%', async () => {
|
||||||
|
const maxTokens = 500
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root: FIXTURE_ROOT,
|
||||||
|
maxTokens,
|
||||||
|
files: FIXTURE_FILES,
|
||||||
|
})
|
||||||
|
|
||||||
|
const actualTokens = countTokens(result.map)
|
||||||
|
expect(actualTokens).toBeLessThanOrEqual(maxTokens * 1.05)
|
||||||
|
expect(result.tokenCount).toBeLessThanOrEqual(maxTokens * 1.05)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('drops files that don\'t fit rather than listing their names', async () => {
|
||||||
|
// Very tight budget — should only fit 1-2 files
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root: FIXTURE_ROOT,
|
||||||
|
maxTokens: 100,
|
||||||
|
files: FIXTURE_FILES,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Count how many files appear as headers in the output
|
||||||
|
const fileHeaders = result.map.split('\n').filter(l => l.endsWith(':') && !l.startsWith(' '))
|
||||||
|
|
||||||
|
// Every file header in the output should have its signatures listed
|
||||||
|
for (const header of fileHeaders) {
|
||||||
|
// The file must have at least one signature line after it
|
||||||
|
const headerIdx = result.map.indexOf(header)
|
||||||
|
const afterHeader = result.map.slice(headerIdx + header.length)
|
||||||
|
// Should have content (signatures), not just the filename
|
||||||
|
expect(afterHeader.trim().length).toBeGreaterThan(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should have fewer files than total
|
||||||
|
expect(fileHeaders.length).toBeLessThan(FIXTURE_FILES.length)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('cache', () => {
|
||||||
|
test('second build of unchanged fixture uses the cache', async () => {
|
||||||
|
// First build (cold)
|
||||||
|
const result1 = await buildRepoMap({
|
||||||
|
root: FIXTURE_ROOT,
|
||||||
|
maxTokens: 2048,
|
||||||
|
files: FIXTURE_FILES,
|
||||||
|
})
|
||||||
|
expect(result1.cacheHit).toBe(false)
|
||||||
|
|
||||||
|
// Second build (warm)
|
||||||
|
const result2 = await buildRepoMap({
|
||||||
|
root: FIXTURE_ROOT,
|
||||||
|
maxTokens: 2048,
|
||||||
|
files: FIXTURE_FILES,
|
||||||
|
})
|
||||||
|
expect(result2.cacheHit).toBe(true)
|
||||||
|
expect(result2.buildTimeMs).toBeLessThan(result1.buildTimeMs)
|
||||||
|
|
||||||
|
// Output should be identical
|
||||||
|
expect(result2.map).toBe(result1.map)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('modifying a file invalidates only that file', async () => {
|
||||||
|
// Create a temp copy of the fixture
|
||||||
|
const tempDir = mkdtempSync(join(tmpdir(), 'repomap-test-'))
|
||||||
|
try {
|
||||||
|
for (const f of FIXTURE_FILES) {
|
||||||
|
cpSync(join(FIXTURE_ROOT, f), join(tempDir, f))
|
||||||
|
}
|
||||||
|
|
||||||
|
// First build
|
||||||
|
const result1 = await buildRepoMap({
|
||||||
|
root: tempDir,
|
||||||
|
maxTokens: 2048,
|
||||||
|
files: FIXTURE_FILES,
|
||||||
|
})
|
||||||
|
expect(result1.cacheHit).toBe(false)
|
||||||
|
|
||||||
|
// Touch one file to change its mtime
|
||||||
|
const targetFile = join(tempDir, 'fileE.ts')
|
||||||
|
const now = new Date()
|
||||||
|
utimesSync(targetFile, now, now)
|
||||||
|
|
||||||
|
// Second build — rendered cache should be invalidated because file list hash
|
||||||
|
// includes the files and the rendered map hash changes with different mtimes
|
||||||
|
// for the per-file cache check
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
const result2 = await buildRepoMap({
|
||||||
|
root: tempDir,
|
||||||
|
maxTokens: 2048,
|
||||||
|
files: FIXTURE_FILES,
|
||||||
|
})
|
||||||
|
// The per-file cache for fileE should miss (mtime changed),
|
||||||
|
// but other files should still hit the per-file cache
|
||||||
|
expect(result2.cacheHit).toBe(false)
|
||||||
|
|
||||||
|
// Output should still be valid
|
||||||
|
expect(result2.map.length).toBeGreaterThan(0)
|
||||||
|
expect(result2.fileCount).toBe(result1.fileCount)
|
||||||
|
} finally {
|
||||||
|
rmSync(tempDir, { recursive: true, force: true })
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('gitFiles', () => {
|
||||||
|
test('falls back gracefully when not in a git repo', async () => {
|
||||||
|
// Create a temp directory with source files but NO .git
|
||||||
|
const tempDir = mkdtempSync(join(tmpdir(), 'repomap-nogit-'))
|
||||||
|
try {
|
||||||
|
writeFileSync(
|
||||||
|
join(tempDir, 'hello.ts'),
|
||||||
|
'export function hello(): string { return "world" }\n',
|
||||||
|
)
|
||||||
|
writeFileSync(
|
||||||
|
join(tempDir, 'utils.ts'),
|
||||||
|
'export function add(a: number, b: number): number { return a + b }\n',
|
||||||
|
)
|
||||||
|
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root: tempDir,
|
||||||
|
maxTokens: 1024,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Should succeed without throwing
|
||||||
|
expect(result.map.length).toBeGreaterThan(0)
|
||||||
|
expect(result.totalFileCount).toBeGreaterThan(0)
|
||||||
|
} finally {
|
||||||
|
rmSync(tempDir, { recursive: true, force: true })
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('error handling', () => {
|
||||||
|
test('no crash on malformed source file', async () => {
|
||||||
|
const tempDir = mkdtempSync(join(tmpdir(), 'repomap-malformed-'))
|
||||||
|
try {
|
||||||
|
// Valid file
|
||||||
|
writeFileSync(
|
||||||
|
join(tempDir, 'good.ts'),
|
||||||
|
'export function good(): number { return 1 }\n',
|
||||||
|
)
|
||||||
|
// Malformed file — severe syntax errors
|
||||||
|
writeFileSync(
|
||||||
|
join(tempDir, 'bad.ts'),
|
||||||
|
'}{}{}{export classclass [[[ function ,,, @@@ ###\n',
|
||||||
|
)
|
||||||
|
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root: tempDir,
|
||||||
|
maxTokens: 1024,
|
||||||
|
files: ['good.ts', 'bad.ts'],
|
||||||
|
})
|
||||||
|
|
||||||
|
// Should complete successfully
|
||||||
|
expect(result.map.length).toBeGreaterThan(0)
|
||||||
|
// The good file should be in the output
|
||||||
|
expect(result.map).toContain('good.ts')
|
||||||
|
} finally {
|
||||||
|
rmSync(tempDir, { recursive: true, force: true })
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
108
src/context/repoMap/symbolExtractor.ts
Normal file
108
src/context/repoMap/symbolExtractor.ts
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
import { readFileSync } from 'fs'
|
||||||
|
import { join } from 'path'
|
||||||
|
import { getLanguageForFile } from './gitFiles.js'
|
||||||
|
import { createParser, loadLanguage, loadQuery } from './parser.js'
|
||||||
|
import type { FileTags, Tag } from './types.js'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract definition and reference tags from a single source file.
|
||||||
|
* Returns null if the file can't be parsed (unsupported language, parse error, etc).
|
||||||
|
*/
|
||||||
|
export async function extractTags(
|
||||||
|
filePath: string,
|
||||||
|
root: string,
|
||||||
|
): Promise<FileTags | null> {
|
||||||
|
const language = getLanguageForFile(filePath)
|
||||||
|
if (!language) return null
|
||||||
|
|
||||||
|
const absolutePath = join(root, filePath)
|
||||||
|
let source: string
|
||||||
|
try {
|
||||||
|
source = readFileSync(absolutePath, 'utf-8')
|
||||||
|
} catch {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = source.split('\n')
|
||||||
|
|
||||||
|
const parser = await createParser(language)
|
||||||
|
if (!parser) return null
|
||||||
|
|
||||||
|
const querySource = loadQuery(language)
|
||||||
|
if (!querySource) {
|
||||||
|
parser.delete()
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const tree = parser.parse(source) as {
|
||||||
|
rootNode: unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
const lang = await loadLanguage(language)
|
||||||
|
if (!lang) {
|
||||||
|
parser.delete()
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the non-deprecated Query constructor
|
||||||
|
const { Query } = await import('web-tree-sitter')
|
||||||
|
const query = new Query(lang, querySource) as {
|
||||||
|
matches(rootNode: unknown): Array<{
|
||||||
|
pattern: number
|
||||||
|
captures: Array<{
|
||||||
|
name: string
|
||||||
|
node: {
|
||||||
|
text: string
|
||||||
|
startPosition: { row: number; column: number }
|
||||||
|
endPosition: { row: number; column: number }
|
||||||
|
}
|
||||||
|
}>
|
||||||
|
}>
|
||||||
|
}
|
||||||
|
|
||||||
|
const matches = query.matches(tree.rootNode)
|
||||||
|
const tags: Tag[] = []
|
||||||
|
const seen = new Set<string>() // dedup by kind+name+line
|
||||||
|
|
||||||
|
for (const match of matches) {
|
||||||
|
let name: string | null = null
|
||||||
|
let kind: 'def' | 'ref' | null = null
|
||||||
|
let subKind: string | undefined
|
||||||
|
let lineRow = 0
|
||||||
|
|
||||||
|
for (const capture of match.captures) {
|
||||||
|
const captureName = capture.name
|
||||||
|
|
||||||
|
// Name captures: name.definition.X or name.reference.X
|
||||||
|
if (captureName.startsWith('name.definition.')) {
|
||||||
|
name = capture.node.text
|
||||||
|
kind = 'def'
|
||||||
|
subKind = captureName.slice('name.definition.'.length)
|
||||||
|
lineRow = capture.node.startPosition.row
|
||||||
|
} else if (captureName.startsWith('name.reference.')) {
|
||||||
|
name = capture.node.text
|
||||||
|
kind = 'ref'
|
||||||
|
subKind = captureName.slice('name.reference.'.length)
|
||||||
|
lineRow = capture.node.startPosition.row
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (name && kind) {
|
||||||
|
const key = `${kind}:${name}:${lineRow}`
|
||||||
|
if (!seen.has(key)) {
|
||||||
|
seen.add(key)
|
||||||
|
const line = lineRow + 1 // convert 0-based to 1-based
|
||||||
|
const signature = lines[lineRow]?.trimEnd() ?? ''
|
||||||
|
tags.push({ kind, name, line, signature, subKind })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parser.delete()
|
||||||
|
return { path: filePath, tags }
|
||||||
|
} catch {
|
||||||
|
parser.delete()
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
15
src/context/repoMap/tokenize.ts
Normal file
15
src/context/repoMap/tokenize.ts
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
import { getEncoding, type Tiktoken } from 'js-tiktoken'
|
||||||
|
|
||||||
|
let encoder: Tiktoken | null = null
|
||||||
|
|
||||||
|
function getEncoder() {
|
||||||
|
if (!encoder) {
|
||||||
|
encoder = getEncoding('cl100k_base')
|
||||||
|
}
|
||||||
|
return encoder
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Count the number of tokens in a string using cl100k_base encoding. */
|
||||||
|
export function countTokens(text: string): number {
|
||||||
|
return getEncoder().encode(text).length
|
||||||
|
}
|
||||||
65
src/context/repoMap/types.ts
Normal file
65
src/context/repoMap/types.ts
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
export interface Tag {
|
||||||
|
/** 'def' for definitions, 'ref' for references */
|
||||||
|
kind: 'def' | 'ref'
|
||||||
|
/** Symbol name (e.g. function name, class name) */
|
||||||
|
name: string
|
||||||
|
/** 1-based line number in the source file */
|
||||||
|
line: number
|
||||||
|
/** The full line of source code at this position (used as signature for defs) */
|
||||||
|
signature: string
|
||||||
|
/** Sub-kind from the query (e.g. 'function', 'class', 'method', 'type') */
|
||||||
|
subKind?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface FileTags {
|
||||||
|
/** Relative path from the repo root */
|
||||||
|
path: string
|
||||||
|
/** All tags extracted from this file */
|
||||||
|
tags: Tag[]
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface RepoMapOptions {
|
||||||
|
/** Root directory of the repo (defaults to cwd) */
|
||||||
|
root?: string
|
||||||
|
/** Maximum token budget for the rendered map */
|
||||||
|
maxTokens?: number
|
||||||
|
/** Files to boost in PageRank (relative paths) */
|
||||||
|
focusFiles?: string[]
|
||||||
|
/** Override the list of files to process (relative paths) */
|
||||||
|
files?: string[]
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface RepoMapResult {
|
||||||
|
/** The rendered repo map string */
|
||||||
|
map: string
|
||||||
|
/** Whether the result came from cache */
|
||||||
|
cacheHit: boolean
|
||||||
|
/** Time in milliseconds to build the map */
|
||||||
|
buildTimeMs: number
|
||||||
|
/** Number of files included in the rendered map */
|
||||||
|
fileCount: number
|
||||||
|
/** Total number of files processed */
|
||||||
|
totalFileCount: number
|
||||||
|
/** Actual token count of the rendered map */
|
||||||
|
tokenCount: number
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CacheEntry {
|
||||||
|
tags: Tag[]
|
||||||
|
mtimeMs: number
|
||||||
|
size: number
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CacheData {
|
||||||
|
version: number
|
||||||
|
entries: Record<string, CacheEntry>
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CacheStats {
|
||||||
|
cacheDir: string
|
||||||
|
cacheFile: string | null
|
||||||
|
entryCount: number
|
||||||
|
exists: boolean
|
||||||
|
}
|
||||||
|
|
||||||
|
export type SupportedLanguage = 'typescript' | 'javascript' | 'python'
|
||||||
@@ -5,7 +5,7 @@ import {
|
|||||||
} from '../utils/providerProfile.js'
|
} from '../utils/providerProfile.js'
|
||||||
import {
|
import {
|
||||||
getProviderValidationError,
|
getProviderValidationError,
|
||||||
validateProviderEnvForStartupOrExit,
|
validateProviderEnvOrExit,
|
||||||
} from '../utils/providerValidation.js'
|
} from '../utils/providerValidation.js'
|
||||||
|
|
||||||
// OpenClaude: polyfill globalThis.File for Node < 20.
|
// OpenClaude: polyfill globalThis.File for Node < 20.
|
||||||
@@ -132,7 +132,7 @@ async function main(): Promise<void> {
|
|||||||
hydrateGithubModelsTokenFromSecureStorage()
|
hydrateGithubModelsTokenFromSecureStorage()
|
||||||
}
|
}
|
||||||
|
|
||||||
await validateProviderEnvForStartupOrExit()
|
await validateProviderEnvOrExit()
|
||||||
|
|
||||||
// Print the gradient startup screen before the Ink UI loads
|
// Print the gradient startup screen before the Ink UI loads
|
||||||
const { printStartupScreen } = await import('../components/StartupScreen.js')
|
const { printStartupScreen } = await import('../components/StartupScreen.js')
|
||||||
|
|||||||
@@ -1,75 +0,0 @@
|
|||||||
import { describe, it, expect, mock } from 'bun:test'
|
|
||||||
import { getCombinedTools, loadReexposedMcpTools } from './mcp.js'
|
|
||||||
import type { Tool as InternalTool } from '../Tool.js'
|
|
||||||
import type { MCPServerConnection } from '../services/mcp/types.js'
|
|
||||||
import type { Tool } from '@modelcontextprotocol/sdk/types.js'
|
|
||||||
|
|
||||||
// Mock the MCP client service to control the tools and connections returned
|
|
||||||
const mockGetMcpToolsCommandsAndResources = mock(async (onConnectionAttempt: any) => {})
|
|
||||||
mock.module('../services/mcp/client.js', () => ({
|
|
||||||
getMcpToolsCommandsAndResources: mockGetMcpToolsCommandsAndResources
|
|
||||||
}))
|
|
||||||
|
|
||||||
describe('getCombinedTools', () => {
|
|
||||||
it('deduplicates builtins when mcpTools have the same name, prioritizing mcpTools', () => {
|
|
||||||
const builtinBash = { name: 'Bash', isMcp: false } as unknown as InternalTool
|
|
||||||
const builtinRead = { name: 'Read', isMcp: false } as unknown as InternalTool
|
|
||||||
const mcpBash = { name: 'Bash', isMcp: true } as unknown as InternalTool
|
|
||||||
|
|
||||||
const builtins = [builtinBash, builtinRead]
|
|
||||||
const mcpTools = [mcpBash]
|
|
||||||
|
|
||||||
const result = getCombinedTools(builtins, mcpTools)
|
|
||||||
|
|
||||||
expect(result).toHaveLength(2)
|
|
||||||
expect(result[0]).toBe(mcpBash)
|
|
||||||
expect(result[1]).toBe(builtinRead)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('loadReexposedMcpTools', () => {
|
|
||||||
it('loads tools and clients regardless of connection state (including needs-auth)', async () => {
|
|
||||||
// Setup the mock to simulate yielding a needs-auth server and a connected server
|
|
||||||
mockGetMcpToolsCommandsAndResources.mockImplementation(async (onConnectionAttempt) => {
|
|
||||||
const needsAuthClient = {
|
|
||||||
name: 'auth-server',
|
|
||||||
type: 'needs-auth',
|
|
||||||
config: {}
|
|
||||||
} as MCPServerConnection
|
|
||||||
|
|
||||||
const authTool = {
|
|
||||||
name: 'mcp__auth-server__authenticate',
|
|
||||||
isMcp: true
|
|
||||||
} as unknown as InternalTool
|
|
||||||
|
|
||||||
const connectedClient = {
|
|
||||||
name: 'connected-server',
|
|
||||||
type: 'connected',
|
|
||||||
config: {},
|
|
||||||
client: {}
|
|
||||||
} as MCPServerConnection
|
|
||||||
|
|
||||||
const connectedTool = {
|
|
||||||
name: 'mcp__connected-server__do_thing',
|
|
||||||
isMcp: true
|
|
||||||
} as unknown as InternalTool
|
|
||||||
|
|
||||||
// Simulate the callback behavior
|
|
||||||
onConnectionAttempt({ client: needsAuthClient, tools: [authTool], commands: [] })
|
|
||||||
onConnectionAttempt({ client: connectedClient, tools: [connectedTool], commands: [] })
|
|
||||||
})
|
|
||||||
|
|
||||||
const { mcpClients, mcpTools } = await loadReexposedMcpTools()
|
|
||||||
|
|
||||||
expect(mcpClients).toHaveLength(2)
|
|
||||||
expect(mcpClients[0].type).toBe('needs-auth')
|
|
||||||
expect(mcpClients[1].type).toBe('connected')
|
|
||||||
|
|
||||||
expect(mcpTools).toHaveLength(2)
|
|
||||||
expect(mcpTools[0].name).toBe('mcp__auth-server__authenticate')
|
|
||||||
expect(mcpTools[1].name).toBe('mcp__connected-server__do_thing')
|
|
||||||
|
|
||||||
// Reset mock for other tests
|
|
||||||
mockGetMcpToolsCommandsAndResources.mockReset()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -7,7 +7,6 @@ process.env.CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS ??= 'true'
|
|||||||
|
|
||||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||||
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'
|
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'
|
||||||
import { ZodError } from 'zod'
|
|
||||||
import {
|
import {
|
||||||
CallToolRequestSchema,
|
CallToolRequestSchema,
|
||||||
type CallToolResult,
|
type CallToolResult,
|
||||||
@@ -18,12 +17,9 @@ import {
|
|||||||
import { getDefaultAppState } from 'src/state/AppStateStore.js'
|
import { getDefaultAppState } from 'src/state/AppStateStore.js'
|
||||||
import review from '../commands/review.js'
|
import review from '../commands/review.js'
|
||||||
import type { Command } from '../commands.js'
|
import type { Command } from '../commands.js'
|
||||||
import { getMcpToolsCommandsAndResources } from '../services/mcp/client.js'
|
|
||||||
import type { MCPServerConnection } from '../services/mcp/types.js'
|
|
||||||
import {
|
import {
|
||||||
findToolByName,
|
findToolByName,
|
||||||
getEmptyToolPermissionContext,
|
getEmptyToolPermissionContext,
|
||||||
type Tool as InternalTool,
|
|
||||||
type ToolUseContext,
|
type ToolUseContext,
|
||||||
} from '../Tool.js'
|
} from '../Tool.js'
|
||||||
import { getTools } from '../tools.js'
|
import { getTools } from '../tools.js'
|
||||||
@@ -43,32 +39,6 @@ type ToolOutput = Tool['outputSchema']
|
|||||||
|
|
||||||
const MCP_COMMANDS: Command[] = [review]
|
const MCP_COMMANDS: Command[] = [review]
|
||||||
|
|
||||||
export function getCombinedTools(
|
|
||||||
builtins: InternalTool[],
|
|
||||||
mcpTools: InternalTool[],
|
|
||||||
): InternalTool[] {
|
|
||||||
const mcpToolNames = new Set(mcpTools.map(t => t.name))
|
|
||||||
const deduplicatedBuiltins = builtins.filter(t => !mcpToolNames.has(t.name))
|
|
||||||
|
|
||||||
return [...mcpTools, ...deduplicatedBuiltins]
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function loadReexposedMcpTools(): Promise<{
|
|
||||||
mcpClients: MCPServerConnection[]
|
|
||||||
mcpTools: InternalTool[]
|
|
||||||
}> {
|
|
||||||
const mcpClients: MCPServerConnection[] = []
|
|
||||||
const mcpTools: InternalTool[] = []
|
|
||||||
|
|
||||||
// Load configured MCP clients and their tools
|
|
||||||
await getMcpToolsCommandsAndResources(({ client, tools: clientTools }) => {
|
|
||||||
mcpClients.push(client)
|
|
||||||
mcpTools.push(...clientTools)
|
|
||||||
})
|
|
||||||
|
|
||||||
return { mcpClients, mcpTools }
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function startMCPServer(
|
export async function startMCPServer(
|
||||||
cwd: string,
|
cwd: string,
|
||||||
debug: boolean,
|
debug: boolean,
|
||||||
@@ -93,13 +63,12 @@ export async function startMCPServer(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
const { mcpClients, mcpTools } = await loadReexposedMcpTools()
|
|
||||||
|
|
||||||
server.setRequestHandler(
|
server.setRequestHandler(
|
||||||
ListToolsRequestSchema,
|
ListToolsRequestSchema,
|
||||||
async (): Promise<ListToolsResult> => {
|
async (): Promise<ListToolsResult> => {
|
||||||
|
// TODO: Also re-expose any MCP tools
|
||||||
const toolPermissionContext = getEmptyToolPermissionContext()
|
const toolPermissionContext = getEmptyToolPermissionContext()
|
||||||
const tools = getCombinedTools(getTools(toolPermissionContext), mcpTools)
|
const tools = getTools(toolPermissionContext)
|
||||||
return {
|
return {
|
||||||
tools: await Promise.all(
|
tools: await Promise.all(
|
||||||
tools.map(async tool => {
|
tools.map(async tool => {
|
||||||
@@ -125,7 +94,7 @@ export async function startMCPServer(
|
|||||||
tools,
|
tools,
|
||||||
agents: [],
|
agents: [],
|
||||||
}),
|
}),
|
||||||
inputSchema: (tool.inputJSONSchema ?? zodToJsonSchema(tool.inputSchema)) as ToolInput,
|
inputSchema: zodToJsonSchema(tool.inputSchema) as ToolInput,
|
||||||
outputSchema,
|
outputSchema,
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
@@ -138,7 +107,8 @@ export async function startMCPServer(
|
|||||||
CallToolRequestSchema,
|
CallToolRequestSchema,
|
||||||
async ({ params: { name, arguments: args } }): Promise<CallToolResult> => {
|
async ({ params: { name, arguments: args } }): Promise<CallToolResult> => {
|
||||||
const toolPermissionContext = getEmptyToolPermissionContext()
|
const toolPermissionContext = getEmptyToolPermissionContext()
|
||||||
const tools = getCombinedTools(getTools(toolPermissionContext), mcpTools)
|
// TODO: Also re-expose any MCP tools
|
||||||
|
const tools = getTools(toolPermissionContext)
|
||||||
const tool = findToolByName(tools, name)
|
const tool = findToolByName(tools, name)
|
||||||
if (!tool) {
|
if (!tool) {
|
||||||
throw new Error(`Tool ${name} not found`)
|
throw new Error(`Tool ${name} not found`)
|
||||||
@@ -153,7 +123,7 @@ export async function startMCPServer(
|
|||||||
tools,
|
tools,
|
||||||
mainLoopModel: getMainLoopModel(),
|
mainLoopModel: getMainLoopModel(),
|
||||||
thinkingConfig: { type: 'disabled' },
|
thinkingConfig: { type: 'disabled' },
|
||||||
mcpClients,
|
mcpClients: [],
|
||||||
mcpResources: {},
|
mcpResources: {},
|
||||||
isNonInteractiveSession: true,
|
isNonInteractiveSession: true,
|
||||||
debug,
|
debug,
|
||||||
@@ -170,16 +140,13 @@ export async function startMCPServer(
|
|||||||
updateAttributionState: () => {},
|
updateAttributionState: () => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: validate input types with zod
|
||||||
try {
|
try {
|
||||||
if (!tool.isEnabled()) {
|
if (!tool.isEnabled()) {
|
||||||
throw new Error(`Tool ${name} is not enabled`)
|
throw new Error(`Tool ${name} is not enabled`)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate input types with zod
|
|
||||||
const parsedArgs = tool.inputSchema.parse(args ?? {})
|
|
||||||
|
|
||||||
const validationResult = await tool.validateInput?.(
|
const validationResult = await tool.validateInput?.(
|
||||||
(parsedArgs as never) ?? {},
|
(args as never) ?? {},
|
||||||
toolUseContext,
|
toolUseContext,
|
||||||
)
|
)
|
||||||
if (validationResult && !validationResult.result) {
|
if (validationResult && !validationResult.result) {
|
||||||
@@ -188,7 +155,7 @@ export async function startMCPServer(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
const finalResult = await tool.call(
|
const finalResult = await tool.call(
|
||||||
(parsedArgs ?? {}) as never,
|
(args ?? {}) as never,
|
||||||
toolUseContext,
|
toolUseContext,
|
||||||
hasPermissionsToUseTool,
|
hasPermissionsToUseTool,
|
||||||
createAssistantMessage({
|
createAssistantMessage({
|
||||||
@@ -196,50 +163,20 @@ export async function startMCPServer(
|
|||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
let content: CallToolResult['content']
|
|
||||||
const data = finalResult.data as string | { type: string; text?: string; source?: { type: string; media_type: string; data: string } }[] | unknown
|
|
||||||
|
|
||||||
if (typeof data === 'string') {
|
|
||||||
content = [{ type: 'text', text: data }]
|
|
||||||
} else if (Array.isArray(data)) {
|
|
||||||
content = data.map((block: any) => {
|
|
||||||
if (block.type === 'text') {
|
|
||||||
return { type: 'text', text: block.text || '' }
|
|
||||||
} else if (block.type === 'image' && block.source) {
|
|
||||||
return {
|
|
||||||
type: 'image',
|
|
||||||
data: block.source.data,
|
|
||||||
mimeType: block.source.media_type,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// eslint-disable-next-line custom-rules/no-top-level-side-effects, no-console
|
|
||||||
console.warn(`Unmapped content block type from tool ${name}: ${block.type || 'unknown'}`)
|
|
||||||
return { type: 'text', text: jsonStringify(block) }
|
|
||||||
}
|
|
||||||
}) as CallToolResult['content']
|
|
||||||
} else {
|
|
||||||
content = [{ type: 'text', text: jsonStringify(data) }]
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
content,
|
content: [
|
||||||
isError: !!(finalResult as any).isError,
|
{
|
||||||
|
type: 'text' as const,
|
||||||
|
text:
|
||||||
|
typeof finalResult === 'string'
|
||||||
|
? finalResult
|
||||||
|
: jsonStringify(finalResult.data),
|
||||||
|
},
|
||||||
|
],
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error)
|
logError(error)
|
||||||
|
|
||||||
if (error instanceof ZodError) {
|
|
||||||
return {
|
|
||||||
isError: true,
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: `Tool ${name} input is invalid:\n${error.errors.map(e => `- ${e.path.join('.')}: ${e.message}`).join('\n')}`,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const parts =
|
const parts =
|
||||||
error instanceof Error ? getErrorParts(error) : [String(error)]
|
error instanceof Error ? getErrorParts(error) : [String(error)]
|
||||||
const errorText = parts.filter(Boolean).join('\n').trim() || 'Error'
|
const errorText = parts.filter(Boolean).join('\n').trim() || 'Error'
|
||||||
@@ -264,4 +201,3 @@ export async function startMCPServer(
|
|||||||
|
|
||||||
return await runServer()
|
return await runServer()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
import { APIError } from '@anthropic-ai/sdk'
|
import { APIError } from '@anthropic-ai/sdk'
|
||||||
import { fetchWithProxyRetry } from './fetchWithProxyRetry.js'
|
|
||||||
import type {
|
import type {
|
||||||
ResolvedCodexCredentials,
|
ResolvedCodexCredentials,
|
||||||
ResolvedProviderRequest,
|
ResolvedProviderRequest,
|
||||||
@@ -560,15 +559,12 @@ export async function performCodexRequest(options: {
|
|||||||
}
|
}
|
||||||
headers.originator ??= 'openclaude'
|
headers.originator ??= 'openclaude'
|
||||||
|
|
||||||
const response = await fetchWithProxyRetry(
|
const response = await fetch(`${options.request.baseUrl}/responses`, {
|
||||||
`${options.request.baseUrl}/responses`,
|
method: 'POST',
|
||||||
{
|
headers,
|
||||||
method: 'POST',
|
body: JSON.stringify(body),
|
||||||
headers,
|
signal: options.signal,
|
||||||
body: JSON.stringify(body),
|
})
|
||||||
signal: options.signal,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
const errorBody = await response.text().catch(() => 'unknown error')
|
const errorBody = await response.text().catch(() => 'unknown error')
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
import { APIError } from '@anthropic-ai/sdk'
|
|
||||||
import { expect, test } from 'bun:test'
|
|
||||||
|
|
||||||
import { getAssistantMessageFromError } from './errors.js'
|
|
||||||
|
|
||||||
function getFirstText(message: ReturnType<typeof getAssistantMessageFromError>): string {
|
|
||||||
const first = message.message.content[0]
|
|
||||||
if (!first || typeof first !== 'object' || !('text' in first)) {
|
|
||||||
return ''
|
|
||||||
}
|
|
||||||
return typeof first.text === 'string' ? first.text : ''
|
|
||||||
}
|
|
||||||
|
|
||||||
test('maps endpoint_not_found category markers to actionable setup guidance', () => {
|
|
||||||
const error = APIError.generate(
|
|
||||||
404,
|
|
||||||
undefined,
|
|
||||||
'OpenAI API error 404: Not Found [openai_category=endpoint_not_found] Hint: Confirm OPENAI_BASE_URL includes /v1.',
|
|
||||||
new Headers(),
|
|
||||||
)
|
|
||||||
|
|
||||||
const message = getAssistantMessageFromError(error, 'qwen2.5-coder:7b')
|
|
||||||
const text = getFirstText(message)
|
|
||||||
|
|
||||||
expect(message.isApiErrorMessage).toBe(true)
|
|
||||||
expect(text).toContain('Provider endpoint was not found')
|
|
||||||
expect(text).toContain('OPENAI_BASE_URL')
|
|
||||||
expect(text).toContain('/v1')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('maps tool_call_incompatible category markers to model/tool guidance', () => {
|
|
||||||
const error = APIError.generate(
|
|
||||||
400,
|
|
||||||
undefined,
|
|
||||||
'OpenAI API error 400: tool_calls are not supported [openai_category=tool_call_incompatible]',
|
|
||||||
new Headers(),
|
|
||||||
)
|
|
||||||
|
|
||||||
const message = getAssistantMessageFromError(error, 'qwen2.5-coder:7b')
|
|
||||||
const text = getFirstText(message)
|
|
||||||
|
|
||||||
expect(text).toContain('rejected tool-calling payloads')
|
|
||||||
expect(text).toContain('/model')
|
|
||||||
})
|
|
||||||
@@ -50,110 +50,9 @@ import {
|
|||||||
} from '../claudeAiLimits.js'
|
} from '../claudeAiLimits.js'
|
||||||
import { shouldProcessRateLimits } from '../rateLimitMocking.js' // Used for /mock-limits command
|
import { shouldProcessRateLimits } from '../rateLimitMocking.js' // Used for /mock-limits command
|
||||||
import { extractConnectionErrorDetails, formatAPIError } from './errorUtils.js'
|
import { extractConnectionErrorDetails, formatAPIError } from './errorUtils.js'
|
||||||
import {
|
|
||||||
extractOpenAICategoryMarker,
|
|
||||||
type OpenAICompatibilityFailureCategory,
|
|
||||||
} from './openaiErrorClassification.js'
|
|
||||||
|
|
||||||
export const API_ERROR_MESSAGE_PREFIX = 'API Error'
|
export const API_ERROR_MESSAGE_PREFIX = 'API Error'
|
||||||
|
|
||||||
function stripOpenAICompatibilityMetadata(message: string): string {
|
|
||||||
return message
|
|
||||||
.replace(/\s*\[openai_category=[a-z_]+\]\s*/g, ' ')
|
|
||||||
.replace(/\s{2,}/g, ' ')
|
|
||||||
.trim()
|
|
||||||
}
|
|
||||||
|
|
||||||
function mapOpenAICompatibilityFailureToAssistantMessage(options: {
|
|
||||||
category: OpenAICompatibilityFailureCategory
|
|
||||||
model: string
|
|
||||||
rawMessage: string
|
|
||||||
}): AssistantMessage {
|
|
||||||
const switchCmd = getIsNonInteractiveSession() ? '--model' : '/model'
|
|
||||||
const compactHint = getIsNonInteractiveSession()
|
|
||||||
? 'Reduce prompt size or start a new session.'
|
|
||||||
: 'Run /compact or start a new session with /new.'
|
|
||||||
|
|
||||||
switch (options.category) {
|
|
||||||
case 'localhost_resolution_failed':
|
|
||||||
case 'connection_refused':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content:
|
|
||||||
'Could not connect to the local OpenAI-compatible provider. Ensure the local server is running, then use OPENAI_BASE_URL=http://127.0.0.1:11434/v1 for Ollama.',
|
|
||||||
error: 'unknown',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'endpoint_not_found':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content:
|
|
||||||
'Provider endpoint was not found. Confirm OPENAI_BASE_URL targets an OpenAI-compatible /v1 endpoint (for Ollama: http://127.0.0.1:11434/v1).',
|
|
||||||
error: 'invalid_request',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'model_not_found':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `The selected model (${options.model}) is not available on this provider. Run ${switchCmd} to choose another model, or verify installed local models (for Ollama: ollama list).`,
|
|
||||||
error: 'invalid_request',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'auth_invalid':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `${API_ERROR_MESSAGE_PREFIX}: Authentication failed for your OpenAI-compatible provider. Verify OPENAI_API_KEY and endpoint-specific auth requirements.`,
|
|
||||||
error: 'authentication_failed',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'rate_limited':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `${API_ERROR_MESSAGE_PREFIX}: Provider rate limit reached. Retry in a few seconds.`,
|
|
||||||
error: 'rate_limit',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'request_timeout':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `${API_ERROR_MESSAGE_PREFIX}: Provider request timed out. Local models may be loading or overloaded; retry shortly or increase API_TIMEOUT_MS.`,
|
|
||||||
error: 'unknown',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'context_overflow':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `The conversation exceeded the provider context limit. ${compactHint}`,
|
|
||||||
error: 'invalid_request',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'tool_call_incompatible':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `The selected provider/model rejected tool-calling payloads. Try ${switchCmd} to pick a tool-capable model or continue without tools.`,
|
|
||||||
error: 'invalid_request',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'malformed_provider_response':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `${API_ERROR_MESSAGE_PREFIX}: Provider returned a malformed response. Confirm endpoint compatibility and check local proxy/network middleware.`,
|
|
||||||
error: 'unknown',
|
|
||||||
errorDetails: stripOpenAICompatibilityMetadata(options.rawMessage),
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'provider_unavailable':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `${API_ERROR_MESSAGE_PREFIX}: Provider is temporarily unavailable. Retry in a moment.`,
|
|
||||||
error: 'unknown',
|
|
||||||
})
|
|
||||||
|
|
||||||
case 'network_error':
|
|
||||||
case 'unknown':
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `${API_ERROR_MESSAGE_PREFIX}: ${stripOpenAICompatibilityMetadata(options.rawMessage)}`,
|
|
||||||
error: 'unknown',
|
|
||||||
})
|
|
||||||
|
|
||||||
default:
|
|
||||||
return createAssistantAPIErrorMessage({
|
|
||||||
content: `${API_ERROR_MESSAGE_PREFIX}: ${stripOpenAICompatibilityMetadata(options.rawMessage)}`,
|
|
||||||
error: 'unknown',
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export function startsWithApiErrorPrefix(text: string): boolean {
|
export function startsWithApiErrorPrefix(text: string): boolean {
|
||||||
return (
|
return (
|
||||||
text.startsWith(API_ERROR_MESSAGE_PREFIX) ||
|
text.startsWith(API_ERROR_MESSAGE_PREFIX) ||
|
||||||
@@ -558,19 +457,6 @@ export function getAssistantMessageFromError(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenAI-compatible transport and HTTP failures include structured category
|
|
||||||
// markers from openaiShim.ts for actionable end-user remediation.
|
|
||||||
if (error instanceof APIError) {
|
|
||||||
const openaiCategory = extractOpenAICategoryMarker(error.message)
|
|
||||||
if (openaiCategory) {
|
|
||||||
return mapOpenAICompatibilityFailureToAssistantMessage({
|
|
||||||
category: openaiCategory,
|
|
||||||
model,
|
|
||||||
rawMessage: error.message,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for emergency capacity off switch for Opus PAYG users
|
// Check for emergency capacity off switch for Opus PAYG users
|
||||||
if (
|
if (
|
||||||
error instanceof Error &&
|
error instanceof Error &&
|
||||||
|
|||||||
@@ -1,86 +0,0 @@
|
|||||||
import { afterEach, beforeEach, expect, test } from 'bun:test'
|
|
||||||
|
|
||||||
import { _resetKeepAliveForTesting } from '../../utils/proxy.js'
|
|
||||||
import {
|
|
||||||
fetchWithProxyRetry,
|
|
||||||
isRetryableFetchError,
|
|
||||||
} from './fetchWithProxyRetry.js'
|
|
||||||
|
|
||||||
type FetchType = typeof globalThis.fetch
|
|
||||||
|
|
||||||
const originalFetch = globalThis.fetch
|
|
||||||
const originalEnv = {
|
|
||||||
HTTP_PROXY: process.env.HTTP_PROXY,
|
|
||||||
HTTPS_PROXY: process.env.HTTPS_PROXY,
|
|
||||||
}
|
|
||||||
|
|
||||||
function restoreEnv(key: 'HTTP_PROXY' | 'HTTPS_PROXY', value: string | undefined): void {
|
|
||||||
if (value === undefined) {
|
|
||||||
delete process.env[key]
|
|
||||||
} else {
|
|
||||||
process.env[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
process.env.HTTP_PROXY = 'http://127.0.0.1:15236'
|
|
||||||
delete process.env.HTTPS_PROXY
|
|
||||||
_resetKeepAliveForTesting()
|
|
||||||
})
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
globalThis.fetch = originalFetch
|
|
||||||
restoreEnv('HTTP_PROXY', originalEnv.HTTP_PROXY)
|
|
||||||
restoreEnv('HTTPS_PROXY', originalEnv.HTTPS_PROXY)
|
|
||||||
_resetKeepAliveForTesting()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('isRetryableFetchError matches Bun socket-closed failures', () => {
|
|
||||||
expect(
|
|
||||||
isRetryableFetchError(
|
|
||||||
new Error(
|
|
||||||
'The socket connection was closed unexpectedly. For more information, pass `verbose: true` in the second argument to fetch()',
|
|
||||||
),
|
|
||||||
),
|
|
||||||
).toBe(true)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('fetchWithProxyRetry retries once with keepalive disabled after socket closure', async () => {
|
|
||||||
const calls: Array<RequestInit | undefined> = []
|
|
||||||
|
|
||||||
globalThis.fetch = (async (_input, init) => {
|
|
||||||
calls.push(init)
|
|
||||||
if (calls.length === 1) {
|
|
||||||
throw new Error(
|
|
||||||
'The socket connection was closed unexpectedly. For more information, pass `verbose: true` in the second argument to fetch()',
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return new Response('ok')
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const response = await fetchWithProxyRetry('https://example.com/search', {
|
|
||||||
method: 'POST',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(await response.text()).toBe('ok')
|
|
||||||
expect(calls).toHaveLength(2)
|
|
||||||
expect((calls[0] as RequestInit & { proxy?: string }).proxy).toBe(
|
|
||||||
'http://127.0.0.1:15236',
|
|
||||||
)
|
|
||||||
expect((calls[0] as RequestInit).keepalive).toBeUndefined()
|
|
||||||
expect((calls[1] as RequestInit).keepalive).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('fetchWithProxyRetry does not retry non-network errors', async () => {
|
|
||||||
let attempts = 0
|
|
||||||
|
|
||||||
globalThis.fetch = (async () => {
|
|
||||||
attempts += 1
|
|
||||||
throw new Error('400 bad request')
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
await expect(fetchWithProxyRetry('https://example.com')).rejects.toThrow(
|
|
||||||
'400 bad request',
|
|
||||||
)
|
|
||||||
expect(attempts).toBe(1)
|
|
||||||
})
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
import { disableKeepAlive, getProxyFetchOptions } from '../../utils/proxy.js'
|
|
||||||
|
|
||||||
const RETRYABLE_FETCH_ERROR_PATTERN =
|
|
||||||
/socket connection was closed unexpectedly|ECONNRESET|EPIPE|socket hang up|Connection reset by peer|fetch failed/i
|
|
||||||
|
|
||||||
export function isRetryableFetchError(error: unknown): boolean {
|
|
||||||
if (!(error instanceof Error)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if (error.name === 'AbortError') {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return RETRYABLE_FETCH_ERROR_PATTERN.test(error.message)
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function fetchWithProxyRetry(
|
|
||||||
input: string | URL | Request,
|
|
||||||
init?: RequestInit,
|
|
||||||
options?: { forAnthropicAPI?: boolean; maxAttempts?: number },
|
|
||||||
): Promise<Response> {
|
|
||||||
const maxAttempts = Math.max(1, options?.maxAttempts ?? 2)
|
|
||||||
let lastError: unknown
|
|
||||||
|
|
||||||
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
||||||
try {
|
|
||||||
return await fetch(input, {
|
|
||||||
...init,
|
|
||||||
...getProxyFetchOptions({
|
|
||||||
forAnthropicAPI: options?.forAnthropicAPI,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
} catch (error) {
|
|
||||||
lastError = error
|
|
||||||
if (attempt >= maxAttempts || !isRetryableFetchError(error)) {
|
|
||||||
throw error
|
|
||||||
}
|
|
||||||
disableKeepAlive()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
throw lastError instanceof Error
|
|
||||||
? lastError
|
|
||||||
: new Error('Fetch failed without an error object')
|
|
||||||
}
|
|
||||||
@@ -1,97 +0,0 @@
|
|||||||
import { expect, test } from 'bun:test'
|
|
||||||
|
|
||||||
import {
|
|
||||||
buildOpenAICompatibilityErrorMessage,
|
|
||||||
classifyOpenAIHttpFailure,
|
|
||||||
classifyOpenAINetworkFailure,
|
|
||||||
extractOpenAICategoryMarker,
|
|
||||||
formatOpenAICategoryMarker,
|
|
||||||
} from './openaiErrorClassification.js'
|
|
||||||
|
|
||||||
test('classifies localhost ECONNREFUSED as connection_refused', () => {
|
|
||||||
const error = Object.assign(new TypeError('fetch failed'), {
|
|
||||||
code: 'ECONNREFUSED',
|
|
||||||
})
|
|
||||||
|
|
||||||
const failure = classifyOpenAINetworkFailure(error, {
|
|
||||||
url: 'http://localhost:11434/v1/chat/completions',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(failure.category).toBe('connection_refused')
|
|
||||||
expect(failure.retryable).toBe(true)
|
|
||||||
expect(failure.code).toBe('ECONNREFUSED')
|
|
||||||
expect(failure.hint).toContain('local server is running')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('classifies localhost ENOTFOUND as localhost_resolution_failed', () => {
|
|
||||||
const error = Object.assign(new TypeError('getaddrinfo ENOTFOUND localhost'), {
|
|
||||||
code: 'ENOTFOUND',
|
|
||||||
})
|
|
||||||
|
|
||||||
const failure = classifyOpenAINetworkFailure(error, {
|
|
||||||
url: 'http://localhost:11434/v1/chat/completions',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(failure.category).toBe('localhost_resolution_failed')
|
|
||||||
expect(failure.retryable).toBe(true)
|
|
||||||
expect(failure.code).toBe('ENOTFOUND')
|
|
||||||
expect(failure.hint).toContain('127.0.0.1')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('classifies model-not-found 404 responses', () => {
|
|
||||||
const failure = classifyOpenAIHttpFailure({
|
|
||||||
status: 404,
|
|
||||||
body: 'The model qwen2.5-coder:7b was not found',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(failure.category).toBe('model_not_found')
|
|
||||||
expect(failure.retryable).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('classifies generic 404 responses as endpoint_not_found', () => {
|
|
||||||
const failure = classifyOpenAIHttpFailure({
|
|
||||||
status: 404,
|
|
||||||
body: 'Not Found',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(failure.category).toBe('endpoint_not_found')
|
|
||||||
expect(failure.hint).toContain('/v1')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('classifies context-overflow responses', () => {
|
|
||||||
const failure = classifyOpenAIHttpFailure({
|
|
||||||
status: 500,
|
|
||||||
body: 'request too large: maximum context length exceeded',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(failure.category).toBe('context_overflow')
|
|
||||||
expect(failure.retryable).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('classifies tool compatibility failures', () => {
|
|
||||||
const failure = classifyOpenAIHttpFailure({
|
|
||||||
status: 400,
|
|
||||||
body: 'tool_calls are not supported by this model',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(failure.category).toBe('tool_call_incompatible')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('embeds and extracts category markers in formatted messages', () => {
|
|
||||||
const marker = formatOpenAICategoryMarker('endpoint_not_found')
|
|
||||||
expect(marker).toBe('[openai_category=endpoint_not_found]')
|
|
||||||
|
|
||||||
const formatted = buildOpenAICompatibilityErrorMessage('OpenAI API error 404: Not Found', {
|
|
||||||
category: 'endpoint_not_found',
|
|
||||||
hint: 'Confirm OPENAI_BASE_URL includes /v1.',
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(formatted).toContain('[openai_category=endpoint_not_found]')
|
|
||||||
expect(formatted).toContain('Hint: Confirm OPENAI_BASE_URL includes /v1.')
|
|
||||||
expect(extractOpenAICategoryMarker(formatted)).toBe('endpoint_not_found')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('ignores unknown category markers during extraction', () => {
|
|
||||||
const malformed = 'OpenAI API error 500 [openai_category=totally_fake_category]'
|
|
||||||
expect(extractOpenAICategoryMarker(malformed)).toBeUndefined()
|
|
||||||
})
|
|
||||||
@@ -1,355 +0,0 @@
|
|||||||
export type OpenAICompatibilityFailureCategory =
|
|
||||||
| 'connection_refused'
|
|
||||||
| 'localhost_resolution_failed'
|
|
||||||
| 'request_timeout'
|
|
||||||
| 'network_error'
|
|
||||||
| 'auth_invalid'
|
|
||||||
| 'rate_limited'
|
|
||||||
| 'model_not_found'
|
|
||||||
| 'endpoint_not_found'
|
|
||||||
| 'context_overflow'
|
|
||||||
| 'tool_call_incompatible'
|
|
||||||
| 'malformed_provider_response'
|
|
||||||
| 'provider_unavailable'
|
|
||||||
| 'unknown'
|
|
||||||
|
|
||||||
export type OpenAICompatibilityFailure = {
|
|
||||||
source: 'network' | 'http'
|
|
||||||
category: OpenAICompatibilityFailureCategory
|
|
||||||
retryable: boolean
|
|
||||||
message: string
|
|
||||||
hint?: string
|
|
||||||
code?: string
|
|
||||||
status?: number
|
|
||||||
}
|
|
||||||
|
|
||||||
const OPENAI_CATEGORY_MARKER_PREFIX = '[openai_category='
|
|
||||||
|
|
||||||
const LOCALHOST_HOSTNAMES = new Set(['localhost', '127.0.0.1', '::1'])
|
|
||||||
|
|
||||||
const OPENAI_COMPATIBILITY_FAILURE_CATEGORIES: ReadonlySet<OpenAICompatibilityFailureCategory> =
|
|
||||||
new Set<OpenAICompatibilityFailureCategory>([
|
|
||||||
'connection_refused',
|
|
||||||
'localhost_resolution_failed',
|
|
||||||
'request_timeout',
|
|
||||||
'network_error',
|
|
||||||
'auth_invalid',
|
|
||||||
'rate_limited',
|
|
||||||
'model_not_found',
|
|
||||||
'endpoint_not_found',
|
|
||||||
'context_overflow',
|
|
||||||
'tool_call_incompatible',
|
|
||||||
'malformed_provider_response',
|
|
||||||
'provider_unavailable',
|
|
||||||
'unknown',
|
|
||||||
])
|
|
||||||
|
|
||||||
function isOpenAICompatibilityFailureCategory(
|
|
||||||
value: string,
|
|
||||||
): value is OpenAICompatibilityFailureCategory {
|
|
||||||
return OPENAI_COMPATIBILITY_FAILURE_CATEGORIES.has(
|
|
||||||
value as OpenAICompatibilityFailureCategory,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
function getErrorCode(error: unknown): string | undefined {
|
|
||||||
let current: unknown = error
|
|
||||||
const maxDepth = 5
|
|
||||||
|
|
||||||
for (let depth = 0; depth < maxDepth; depth++) {
|
|
||||||
if (
|
|
||||||
current &&
|
|
||||||
typeof current === 'object' &&
|
|
||||||
'code' in current &&
|
|
||||||
typeof (current as { code?: unknown }).code === 'string'
|
|
||||||
) {
|
|
||||||
return (current as { code: string }).code
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
|
||||||
current &&
|
|
||||||
typeof current === 'object' &&
|
|
||||||
'cause' in current &&
|
|
||||||
(current as { cause?: unknown }).cause !== current
|
|
||||||
) {
|
|
||||||
current = (current as { cause?: unknown }).cause
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return undefined
|
|
||||||
}
|
|
||||||
|
|
||||||
function getHostname(url: string): string | null {
|
|
||||||
try {
|
|
||||||
return new URL(url).hostname.toLowerCase()
|
|
||||||
} catch {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function isLocalhostLikeHostname(hostname: string | null): boolean {
|
|
||||||
if (!hostname) return false
|
|
||||||
if (LOCALHOST_HOSTNAMES.has(hostname)) return true
|
|
||||||
return /^127\./.test(hostname)
|
|
||||||
}
|
|
||||||
|
|
||||||
function isContextOverflowMessage(body: string): boolean {
|
|
||||||
const lower = body.toLowerCase()
|
|
||||||
return (
|
|
||||||
lower.includes('too many tokens') ||
|
|
||||||
lower.includes('request too large') ||
|
|
||||||
lower.includes('context length') ||
|
|
||||||
lower.includes('maximum context') ||
|
|
||||||
lower.includes('input length') ||
|
|
||||||
lower.includes('payload too large') ||
|
|
||||||
lower.includes('prompt is too long')
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
function isToolCompatibilityMessage(body: string): boolean {
|
|
||||||
const lower = body.toLowerCase()
|
|
||||||
return (
|
|
||||||
lower.includes('tool_calls') ||
|
|
||||||
lower.includes('tool_call') ||
|
|
||||||
lower.includes('tool_use') ||
|
|
||||||
lower.includes('tool_result') ||
|
|
||||||
lower.includes('function calling') ||
|
|
||||||
lower.includes('function call')
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
function isMalformedProviderResponse(body: string): boolean {
|
|
||||||
const lower = body.toLowerCase()
|
|
||||||
return (
|
|
||||||
lower.includes('<!doctype html') ||
|
|
||||||
lower.includes('<html') ||
|
|
||||||
lower.includes('invalid json') ||
|
|
||||||
lower.includes('malformed') ||
|
|
||||||
lower.includes('unexpected token') ||
|
|
||||||
lower.includes('cannot parse') ||
|
|
||||||
lower.includes('not valid json')
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
function isModelNotFoundMessage(body: string): boolean {
|
|
||||||
const lower = body.toLowerCase()
|
|
||||||
return (
|
|
||||||
lower.includes('model') &&
|
|
||||||
(
|
|
||||||
lower.includes('not found') ||
|
|
||||||
lower.includes('does not exist') ||
|
|
||||||
lower.includes('unknown model') ||
|
|
||||||
lower.includes('unavailable model')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
export function formatOpenAICategoryMarker(
|
|
||||||
category: OpenAICompatibilityFailureCategory,
|
|
||||||
): string {
|
|
||||||
return `${OPENAI_CATEGORY_MARKER_PREFIX}${category}]`
|
|
||||||
}
|
|
||||||
|
|
||||||
export function extractOpenAICategoryMarker(
|
|
||||||
message: string,
|
|
||||||
): OpenAICompatibilityFailureCategory | undefined {
|
|
||||||
const match = message.match(/\[openai_category=([a-z_]+)]/)
|
|
||||||
const category = match?.[1]
|
|
||||||
|
|
||||||
if (!category || !isOpenAICompatibilityFailureCategory(category)) {
|
|
||||||
return undefined
|
|
||||||
}
|
|
||||||
|
|
||||||
return category
|
|
||||||
}
|
|
||||||
|
|
||||||
export function buildOpenAICompatibilityErrorMessage(
|
|
||||||
baseMessage: string,
|
|
||||||
failure: Pick<OpenAICompatibilityFailure, 'category' | 'hint'>,
|
|
||||||
): string {
|
|
||||||
const marker = formatOpenAICategoryMarker(failure.category)
|
|
||||||
const hint = failure.hint ? ` Hint: ${failure.hint}` : ''
|
|
||||||
return `${baseMessage} ${marker}${hint}`
|
|
||||||
}
|
|
||||||
|
|
||||||
export function classifyOpenAINetworkFailure(
|
|
||||||
error: unknown,
|
|
||||||
options: { url: string },
|
|
||||||
): OpenAICompatibilityFailure {
|
|
||||||
const message = error instanceof Error ? error.message : String(error)
|
|
||||||
const lowerMessage = message.toLowerCase()
|
|
||||||
const code = getErrorCode(error)
|
|
||||||
const hostname = getHostname(options.url)
|
|
||||||
const isLocalHost = isLocalhostLikeHostname(hostname)
|
|
||||||
|
|
||||||
if (
|
|
||||||
code === 'ETIMEDOUT' ||
|
|
||||||
code === 'UND_ERR_CONNECT_TIMEOUT' ||
|
|
||||||
lowerMessage.includes('timeout') ||
|
|
||||||
lowerMessage.includes('timed out') ||
|
|
||||||
lowerMessage.includes('aborterror')
|
|
||||||
) {
|
|
||||||
return {
|
|
||||||
source: 'network',
|
|
||||||
category: 'request_timeout',
|
|
||||||
retryable: true,
|
|
||||||
message,
|
|
||||||
code,
|
|
||||||
hint: 'The provider took too long to respond. Check local model load time or increase API timeout.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
|
||||||
isLocalHost &&
|
|
||||||
(
|
|
||||||
code === 'ENOTFOUND' ||
|
|
||||||
code === 'EAI_AGAIN' ||
|
|
||||||
lowerMessage.includes('getaddrinfo') ||
|
|
||||||
(code === undefined && lowerMessage.includes('fetch failed'))
|
|
||||||
)
|
|
||||||
) {
|
|
||||||
return {
|
|
||||||
source: 'network',
|
|
||||||
category: 'localhost_resolution_failed',
|
|
||||||
retryable: true,
|
|
||||||
message,
|
|
||||||
code,
|
|
||||||
hint: 'Localhost failed for this request. Retry with 127.0.0.1 and confirm Ollama is serving on the configured port.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (code === 'ECONNREFUSED') {
|
|
||||||
return {
|
|
||||||
source: 'network',
|
|
||||||
category: 'connection_refused',
|
|
||||||
retryable: true,
|
|
||||||
message,
|
|
||||||
code,
|
|
||||||
hint: isLocalHost
|
|
||||||
? 'Connection to the local provider was refused. Ensure the local server is running and listening on the configured port.'
|
|
||||||
: 'Connection was refused by the provider endpoint. Ensure the server is running and the port is correct.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
source: 'network',
|
|
||||||
category: 'network_error',
|
|
||||||
retryable: true,
|
|
||||||
message,
|
|
||||||
code,
|
|
||||||
hint: 'Network transport failed before a provider response was received.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export function classifyOpenAIHttpFailure(options: {
|
|
||||||
status: number
|
|
||||||
body: string
|
|
||||||
}): OpenAICompatibilityFailure {
|
|
||||||
const body = options.body ?? ''
|
|
||||||
|
|
||||||
if (options.status === 401 || options.status === 403) {
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'auth_invalid',
|
|
||||||
retryable: false,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
hint: 'Authentication failed. Verify API key, token source, and endpoint-specific auth headers.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (options.status === 429) {
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'rate_limited',
|
|
||||||
retryable: true,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
hint: 'Provider rate-limited the request. Retry after backoff.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (options.status === 404 && isModelNotFoundMessage(body)) {
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'model_not_found',
|
|
||||||
retryable: false,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
hint: 'The selected model is not installed or not available on this endpoint.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (options.status === 404) {
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'endpoint_not_found',
|
|
||||||
retryable: false,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
hint: 'Endpoint was not found. Confirm OPENAI_BASE_URL includes /v1 for OpenAI-compatible local providers.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
|
||||||
options.status === 413 ||
|
|
||||||
((options.status === 400 || options.status >= 500) &&
|
|
||||||
isContextOverflowMessage(body))
|
|
||||||
) {
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'context_overflow',
|
|
||||||
retryable: false,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
hint: 'Prompt context exceeded model/server limits. Reduce context or increase provider context length.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (options.status === 400 && isToolCompatibilityMessage(body)) {
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'tool_call_incompatible',
|
|
||||||
retryable: false,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
hint: 'Provider/model rejected tool-calling payload. Retry without tools or use a tool-capable model.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
|
||||||
(options.status >= 200 && options.status < 300 && isMalformedProviderResponse(body)) ||
|
|
||||||
(options.status >= 400 && isMalformedProviderResponse(body))
|
|
||||||
) {
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'malformed_provider_response',
|
|
||||||
retryable: false,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
hint: 'Provider returned malformed or non-JSON response where JSON was expected.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (options.status >= 500) {
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'provider_unavailable',
|
|
||||||
retryable: true,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
hint: 'Provider reported a server-side failure. Retry after a short delay.',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
source: 'http',
|
|
||||||
category: 'unknown',
|
|
||||||
retryable: false,
|
|
||||||
status: options.status,
|
|
||||||
message: body,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
import { afterEach, expect, mock, test } from 'bun:test'
|
|
||||||
|
|
||||||
const originalFetch = globalThis.fetch
|
|
||||||
const originalEnv = {
|
|
||||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
|
||||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
|
||||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
|
||||||
}
|
|
||||||
|
|
||||||
function restoreEnv(key: string, value: string | undefined): void {
|
|
||||||
if (value === undefined) {
|
|
||||||
delete process.env[key]
|
|
||||||
} else {
|
|
||||||
process.env[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
globalThis.fetch = originalFetch
|
|
||||||
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
|
|
||||||
restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY)
|
|
||||||
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
|
|
||||||
mock.restore()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('logs classified transport diagnostics with category and code', async () => {
|
|
||||||
const debugSpy = mock(() => {})
|
|
||||||
mock.module('../../utils/debug.js', () => ({
|
|
||||||
logForDebugging: debugSpy,
|
|
||||||
}))
|
|
||||||
|
|
||||||
const nonce = `${Date.now()}-${Math.random()}`
|
|
||||||
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
|
|
||||||
|
|
||||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
|
||||||
process.env.OPENAI_API_KEY = 'ollama'
|
|
||||||
|
|
||||||
const transportError = Object.assign(new TypeError('fetch failed'), {
|
|
||||||
code: 'ECONNREFUSED',
|
|
||||||
})
|
|
||||||
|
|
||||||
globalThis.fetch = mock(async () => {
|
|
||||||
throw transportError
|
|
||||||
}) as typeof globalThis.fetch
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as {
|
|
||||||
beta: {
|
|
||||||
messages: {
|
|
||||||
create: (params: Record<string, unknown>) => Promise<unknown>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
client.beta.messages.create({
|
|
||||||
model: 'qwen2.5-coder:7b',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
}),
|
|
||||||
).rejects.toThrow('openai_category=connection_refused')
|
|
||||||
|
|
||||||
const transportLog = debugSpy.mock.calls.find(call =>
|
|
||||||
typeof call?.[0] === 'string' && call[0].includes('transport failure'),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(transportLog).toBeDefined()
|
|
||||||
expect(String(transportLog?.[0])).toContain('category=connection_refused')
|
|
||||||
expect(String(transportLog?.[0])).toContain('code=ECONNREFUSED')
|
|
||||||
expect(transportLog?.[1]).toEqual({ level: 'warn' })
|
|
||||||
})
|
|
||||||
|
|
||||||
test('redacts credentials in transport diagnostic URL logs', async () => {
|
|
||||||
const debugSpy = mock(() => {})
|
|
||||||
mock.module('../../utils/debug.js', () => ({
|
|
||||||
logForDebugging: debugSpy,
|
|
||||||
}))
|
|
||||||
|
|
||||||
const nonce = `${Date.now()}-${Math.random()}`
|
|
||||||
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
|
|
||||||
|
|
||||||
process.env.OPENAI_BASE_URL = 'http://user:supersecret@localhost:11434/v1'
|
|
||||||
process.env.OPENAI_API_KEY = 'supersecret'
|
|
||||||
|
|
||||||
const transportError = Object.assign(new TypeError('fetch failed'), {
|
|
||||||
code: 'ECONNREFUSED',
|
|
||||||
})
|
|
||||||
|
|
||||||
globalThis.fetch = mock(async () => {
|
|
||||||
throw transportError
|
|
||||||
}) as typeof globalThis.fetch
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as {
|
|
||||||
beta: {
|
|
||||||
messages: {
|
|
||||||
create: (params: Record<string, unknown>) => Promise<unknown>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
client.beta.messages.create({
|
|
||||||
model: 'qwen2.5-coder:7b',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
}),
|
|
||||||
).rejects.toThrow('openai_category=connection_refused')
|
|
||||||
|
|
||||||
const transportLog = debugSpy.mock.calls.find(call =>
|
|
||||||
typeof call?.[0] === 'string' && call[0].includes('transport failure'),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(transportLog).toBeDefined()
|
|
||||||
const logLine = String(transportLog?.[0])
|
|
||||||
expect(logLine).toContain('url=http://redacted:redacted@localhost:11434/v1/chat/completions')
|
|
||||||
expect(logLine).not.toContain('user:supersecret')
|
|
||||||
expect(logLine).not.toContain('supersecret@')
|
|
||||||
})
|
|
||||||
@@ -2775,172 +2775,3 @@ test('streaming: strips leaked reasoning preamble when split across multiple con
|
|||||||
|
|
||||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||||
})
|
})
|
||||||
|
|
||||||
test('classifies localhost transport failures with actionable category marker', async () => {
|
|
||||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
|
||||||
|
|
||||||
const transportError = Object.assign(new TypeError('fetch failed'), {
|
|
||||||
code: 'ECONNREFUSED',
|
|
||||||
})
|
|
||||||
|
|
||||||
globalThis.fetch = (async () => {
|
|
||||||
throw transportError
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
client.beta.messages.create({
|
|
||||||
model: 'qwen2.5-coder:7b',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
}),
|
|
||||||
).rejects.toThrow('openai_category=connection_refused')
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
client.beta.messages.create({
|
|
||||||
model: 'qwen2.5-coder:7b',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
}),
|
|
||||||
).rejects.toThrow('local server is running')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('propagates AbortError without wrapping it as transport failure', async () => {
|
|
||||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
|
||||||
|
|
||||||
const abortError = new DOMException('The operation was aborted.', 'AbortError')
|
|
||||||
globalThis.fetch = (async () => {
|
|
||||||
throw abortError
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const controller = new AbortController()
|
|
||||||
controller.abort()
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
client.beta.messages.create(
|
|
||||||
{
|
|
||||||
model: 'qwen2.5-coder:7b',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
},
|
|
||||||
{ signal: controller.signal },
|
|
||||||
),
|
|
||||||
).rejects.toBe(abortError)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('classifies chat-completions endpoint 404 failures with endpoint_not_found marker', async () => {
|
|
||||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434'
|
|
||||||
|
|
||||||
globalThis.fetch = (async () =>
|
|
||||||
new Response('Not Found', {
|
|
||||||
status: 404,
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'text/plain',
|
|
||||||
},
|
|
||||||
})) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
client.beta.messages.create({
|
|
||||||
model: 'qwen2.5-coder:7b',
|
|
||||||
messages: [{ role: 'user', content: 'hello' }],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
}),
|
|
||||||
).rejects.toThrow('openai_category=endpoint_not_found')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('preserves valid tool_result and drops orphan tool_result', async () => {
|
|
||||||
let requestBody: Record<string, unknown> | undefined
|
|
||||||
|
|
||||||
globalThis.fetch = (async (_input, init) => {
|
|
||||||
requestBody = JSON.parse(String(init?.body))
|
|
||||||
|
|
||||||
return new Response(
|
|
||||||
JSON.stringify({
|
|
||||||
id: 'chatcmpl-1',
|
|
||||||
model: 'mistral-large-latest',
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
message: {
|
|
||||||
role: 'assistant',
|
|
||||||
content: 'done',
|
|
||||||
},
|
|
||||||
finish_reason: 'stop',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
usage: {
|
|
||||||
prompt_tokens: 12,
|
|
||||||
completion_tokens: 4,
|
|
||||||
total_tokens: 16,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
{
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}) as FetchType
|
|
||||||
|
|
||||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
|
||||||
|
|
||||||
await client.beta.messages.create({
|
|
||||||
model: 'mistral-large-latest',
|
|
||||||
system: 'test system',
|
|
||||||
messages: [
|
|
||||||
{ role: 'user', content: 'Search and then I will interrupt' },
|
|
||||||
{
|
|
||||||
role: 'assistant',
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'tool_use',
|
|
||||||
id: 'valid_call_1',
|
|
||||||
name: 'Search',
|
|
||||||
input: { query: 'openclaude' },
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
role: 'user',
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'tool_result',
|
|
||||||
tool_use_id: 'valid_call_1',
|
|
||||||
content: 'Found it!',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: 'tool_result',
|
|
||||||
tool_use_id: 'orphan_call_2',
|
|
||||||
content: 'Interrupted result',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
role: 'user',
|
|
||||||
content: 'What happened?',
|
|
||||||
}
|
|
||||||
],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
max_tokens: 64,
|
|
||||||
stream: false,
|
|
||||||
})
|
|
||||||
|
|
||||||
const messages = requestBody?.messages as Array<Record<string, unknown>>
|
|
||||||
|
|
||||||
// Should have: system, user, assistant (tool_use), tool (valid_call_1), user
|
|
||||||
// Should NOT have: tool (orphan_call_2)
|
|
||||||
|
|
||||||
const toolMessages = messages.filter(m => m.role === 'tool')
|
|
||||||
expect(toolMessages.length).toBe(1)
|
|
||||||
expect(toolMessages[0].tool_call_id).toBe('valid_call_1')
|
|
||||||
|
|
||||||
const orphanMessage = toolMessages.find(m => m.tool_call_id === 'orphan_call_2')
|
|
||||||
expect(orphanMessage).toBeUndefined()
|
|
||||||
})
|
|
||||||
|
|||||||
@@ -47,18 +47,12 @@ import {
|
|||||||
type AnthropicUsage,
|
type AnthropicUsage,
|
||||||
type ShimCreateParams,
|
type ShimCreateParams,
|
||||||
} from './codexShim.js'
|
} from './codexShim.js'
|
||||||
import { fetchWithProxyRetry } from './fetchWithProxyRetry.js'
|
|
||||||
import {
|
import {
|
||||||
isLocalProviderUrl,
|
isLocalProviderUrl,
|
||||||
resolveRuntimeCodexCredentials,
|
resolveRuntimeCodexCredentials,
|
||||||
resolveProviderRequest,
|
resolveProviderRequest,
|
||||||
getGithubEndpointType,
|
getGithubEndpointType,
|
||||||
} from './providerConfig.js'
|
} from './providerConfig.js'
|
||||||
import {
|
|
||||||
buildOpenAICompatibilityErrorMessage,
|
|
||||||
classifyOpenAIHttpFailure,
|
|
||||||
classifyOpenAINetworkFailure,
|
|
||||||
} from './openaiErrorClassification.js'
|
|
||||||
import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js'
|
import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js'
|
||||||
import { redactSecretValueForDisplay } from '../../utils/providerProfile.js'
|
import { redactSecretValueForDisplay } from '../../utils/providerProfile.js'
|
||||||
import {
|
import {
|
||||||
@@ -88,19 +82,6 @@ const COPILOT_HEADERS: Record<string, string> = {
|
|||||||
'Copilot-Integration-Id': 'vscode-chat',
|
'Copilot-Integration-Id': 'vscode-chat',
|
||||||
}
|
}
|
||||||
|
|
||||||
const SENSITIVE_URL_QUERY_PARAM_NAMES = [
|
|
||||||
'api_key',
|
|
||||||
'key',
|
|
||||||
'token',
|
|
||||||
'access_token',
|
|
||||||
'refresh_token',
|
|
||||||
'signature',
|
|
||||||
'sig',
|
|
||||||
'secret',
|
|
||||||
'password',
|
|
||||||
'authorization',
|
|
||||||
]
|
|
||||||
|
|
||||||
function isGithubModelsMode(): boolean {
|
function isGithubModelsMode(): boolean {
|
||||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||||
}
|
}
|
||||||
@@ -150,34 +131,6 @@ function formatRetryAfterHint(response: Response): string {
|
|||||||
return ra ? ` (Retry-After: ${ra})` : ''
|
return ra ? ` (Retry-After: ${ra})` : ''
|
||||||
}
|
}
|
||||||
|
|
||||||
function shouldRedactUrlQueryParam(name: string): boolean {
|
|
||||||
const lower = name.toLowerCase()
|
|
||||||
return SENSITIVE_URL_QUERY_PARAM_NAMES.some(token => lower.includes(token))
|
|
||||||
}
|
|
||||||
|
|
||||||
function redactUrlForDiagnostics(url: string): string {
|
|
||||||
try {
|
|
||||||
const parsed = new URL(url)
|
|
||||||
if (parsed.username) {
|
|
||||||
parsed.username = 'redacted'
|
|
||||||
}
|
|
||||||
if (parsed.password) {
|
|
||||||
parsed.password = 'redacted'
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const key of parsed.searchParams.keys()) {
|
|
||||||
if (shouldRedactUrlQueryParam(key)) {
|
|
||||||
parsed.searchParams.set(key, 'redacted')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const serialized = parsed.toString()
|
|
||||||
return redactSecretValueForDisplay(serialized, process.env as SecretValueSource) ?? serialized
|
|
||||||
} catch {
|
|
||||||
return redactSecretValueForDisplay(url, process.env as SecretValueSource) ?? url
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function sleepMs(ms: number): Promise<void> {
|
function sleepMs(ms: number): Promise<void> {
|
||||||
return new Promise(resolve => setTimeout(resolve, ms))
|
return new Promise(resolve => setTimeout(resolve, ms))
|
||||||
}
|
}
|
||||||
@@ -349,7 +302,6 @@ function convertMessages(
|
|||||||
system: unknown,
|
system: unknown,
|
||||||
): OpenAIMessage[] {
|
): OpenAIMessage[] {
|
||||||
const result: OpenAIMessage[] = []
|
const result: OpenAIMessage[] = []
|
||||||
const knownToolCallIds = new Set<string>()
|
|
||||||
|
|
||||||
// System message first
|
// System message first
|
||||||
const sysText = convertSystemPrompt(system)
|
const sysText = convertSystemPrompt(system)
|
||||||
@@ -369,21 +321,13 @@ function convertMessages(
|
|||||||
const toolResults = content.filter((b: { type?: string }) => b.type === 'tool_result')
|
const toolResults = content.filter((b: { type?: string }) => b.type === 'tool_result')
|
||||||
const otherContent = content.filter((b: { type?: string }) => b.type !== 'tool_result')
|
const otherContent = content.filter((b: { type?: string }) => b.type !== 'tool_result')
|
||||||
|
|
||||||
// Emit tool results as tool messages, but ONLY if we have a matching tool_use ID.
|
// Emit tool results as tool messages
|
||||||
// Mistral/OpenAI strictly require tool messages to follow an assistant message with tool_calls.
|
|
||||||
// If the user interrupted (ESC) and a synthetic tool_result was generated without a recorded tool_use,
|
|
||||||
// emitting it here would cause a "role must alternate" or "unexpected role" error.
|
|
||||||
for (const tr of toolResults) {
|
for (const tr of toolResults) {
|
||||||
const id = tr.tool_use_id ?? 'unknown'
|
result.push({
|
||||||
if (knownToolCallIds.has(id)) {
|
role: 'tool',
|
||||||
result.push({
|
tool_call_id: tr.tool_use_id ?? 'unknown',
|
||||||
role: 'tool',
|
content: convertToolResultContent(tr.content, tr.is_error),
|
||||||
tool_call_id: id,
|
})
|
||||||
content: convertToolResultContent(tr.content, tr.is_error),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
logForDebugging(`Dropping orphan tool_result for ID: ${id} to prevent API error`)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Emit remaining user content
|
// Emit remaining user content
|
||||||
@@ -424,11 +368,9 @@ function convertMessages(
|
|||||||
input?: unknown
|
input?: unknown
|
||||||
extra_content?: Record<string, unknown>
|
extra_content?: Record<string, unknown>
|
||||||
signature?: string
|
signature?: string
|
||||||
}) => {
|
}, index) => {
|
||||||
const id = tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`
|
|
||||||
knownToolCallIds.add(id)
|
|
||||||
const toolCall: NonNullable<OpenAIMessage['tool_calls']>[number] = {
|
const toolCall: NonNullable<OpenAIMessage['tool_calls']>[number] = {
|
||||||
id,
|
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
||||||
type: 'function' as const,
|
type: 'function' as const,
|
||||||
function: {
|
function: {
|
||||||
name: tu.name ?? 'unknown',
|
name: tu.name ?? 'unknown',
|
||||||
@@ -453,6 +395,7 @@ function convertMessages(
|
|||||||
|
|
||||||
// Merge into existing google-specific metadata if present
|
// Merge into existing google-specific metadata if present
|
||||||
const existingGoogle = (toolCall.extra_content?.google as Record<string, unknown>) ?? {}
|
const existingGoogle = (toolCall.extra_content?.google as Record<string, unknown>) ?? {}
|
||||||
|
|
||||||
toolCall.extra_content = {
|
toolCall.extra_content = {
|
||||||
...toolCall.extra_content,
|
...toolCall.extra_content,
|
||||||
google: {
|
google: {
|
||||||
@@ -607,10 +550,7 @@ function convertTools(
|
|||||||
function: {
|
function: {
|
||||||
name: t.name,
|
name: t.name,
|
||||||
description: t.description ?? '',
|
description: t.description ?? '',
|
||||||
parameters: normalizeSchemaForOpenAI(
|
parameters: normalizeSchemaForOpenAI(schema, !isGemini),
|
||||||
schema,
|
|
||||||
!isGemini && !isEnvTruthy(process.env.OPENCLAUDE_DISABLE_STRICT_TOOLS),
|
|
||||||
),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -1420,12 +1360,8 @@ class OpenAIShimMessages {
|
|||||||
...filterAnthropicHeaders(options?.headers),
|
...filterAnthropicHeaders(options?.headers),
|
||||||
}
|
}
|
||||||
|
|
||||||
const isGemini = isGeminiMode()
|
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||||
const isMiniMax = !!process.env.MINIMAX_API_KEY
|
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||||
const apiKey =
|
|
||||||
this.providerOverride?.apiKey ??
|
|
||||||
process.env.OPENAI_API_KEY ??
|
|
||||||
(isMiniMax ? process.env.MINIMAX_API_KEY : '')
|
|
||||||
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
||||||
// path segments like https://evil.com/cognitiveservices.azure.com/
|
// path segments like https://evil.com/cognitiveservices.azure.com/
|
||||||
let isAzure = false
|
let isAzure = false
|
||||||
@@ -1489,97 +1425,12 @@ class OpenAIShimMessages {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const maxAttempts = isGithub ? GITHUB_429_MAX_RETRIES : 1
|
const maxAttempts = isGithub ? GITHUB_429_MAX_RETRIES : 1
|
||||||
|
|
||||||
const throwClassifiedTransportError = (
|
|
||||||
error: unknown,
|
|
||||||
requestUrl: string,
|
|
||||||
): never => {
|
|
||||||
if (options?.signal?.aborted) {
|
|
||||||
throw error
|
|
||||||
}
|
|
||||||
|
|
||||||
const failure = classifyOpenAINetworkFailure(error, {
|
|
||||||
url: requestUrl,
|
|
||||||
})
|
|
||||||
const redactedUrl = redactUrlForDiagnostics(requestUrl)
|
|
||||||
const safeMessage =
|
|
||||||
redactSecretValueForDisplay(
|
|
||||||
failure.message,
|
|
||||||
process.env as SecretValueSource,
|
|
||||||
) || 'Request failed'
|
|
||||||
|
|
||||||
logForDebugging(
|
|
||||||
`[OpenAIShim] transport failure category=${failure.category} retryable=${failure.retryable} code=${failure.code ?? 'unknown'} method=POST url=${redactedUrl} model=${request.resolvedModel} message=${safeMessage}`,
|
|
||||||
{ level: 'warn' },
|
|
||||||
)
|
|
||||||
|
|
||||||
throw APIError.generate(
|
|
||||||
503,
|
|
||||||
undefined,
|
|
||||||
buildOpenAICompatibilityErrorMessage(
|
|
||||||
`OpenAI API transport error: ${safeMessage}${failure.code ? ` (code=${failure.code})` : ''}`,
|
|
||||||
failure,
|
|
||||||
),
|
|
||||||
new Headers(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
const throwClassifiedHttpError = (
|
|
||||||
status: number,
|
|
||||||
errorBody: string,
|
|
||||||
parsedBody: object | undefined,
|
|
||||||
responseHeaders: Headers,
|
|
||||||
requestUrl: string,
|
|
||||||
rateHint = '',
|
|
||||||
): never => {
|
|
||||||
const failure = classifyOpenAIHttpFailure({
|
|
||||||
status,
|
|
||||||
body: errorBody,
|
|
||||||
})
|
|
||||||
const redactedUrl = redactUrlForDiagnostics(requestUrl)
|
|
||||||
|
|
||||||
logForDebugging(
|
|
||||||
`[OpenAIShim] request failed category=${failure.category} retryable=${failure.retryable} status=${status} method=POST url=${redactedUrl} model=${request.resolvedModel}`,
|
|
||||||
{ level: 'warn' },
|
|
||||||
)
|
|
||||||
|
|
||||||
throw APIError.generate(
|
|
||||||
status,
|
|
||||||
parsedBody,
|
|
||||||
buildOpenAICompatibilityErrorMessage(
|
|
||||||
`OpenAI API error ${status}: ${errorBody}${rateHint}`,
|
|
||||||
failure,
|
|
||||||
),
|
|
||||||
responseHeaders,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
let response: Response | undefined
|
let response: Response | undefined
|
||||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||||
try {
|
response = await fetch(chatCompletionsUrl, fetchInit)
|
||||||
response = await fetchWithProxyRetry(chatCompletionsUrl, fetchInit)
|
|
||||||
} catch (error) {
|
|
||||||
const isAbortError =
|
|
||||||
fetchInit.signal?.aborted === true ||
|
|
||||||
(typeof DOMException !== 'undefined' &&
|
|
||||||
error instanceof DOMException &&
|
|
||||||
error.name === 'AbortError') ||
|
|
||||||
(typeof error === 'object' &&
|
|
||||||
error !== null &&
|
|
||||||
'name' in error &&
|
|
||||||
error.name === 'AbortError')
|
|
||||||
|
|
||||||
if (isAbortError) {
|
|
||||||
throw error
|
|
||||||
}
|
|
||||||
|
|
||||||
throwClassifiedTransportError(error, chatCompletionsUrl)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (response.ok) {
|
if (response.ok) {
|
||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
isGithub &&
|
isGithub &&
|
||||||
response.status === 429 &&
|
response.status === 429 &&
|
||||||
@@ -1649,43 +1500,34 @@ class OpenAIShimMessages {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let responsesResponse: Response
|
const responsesResponse = await fetch(responsesUrl, {
|
||||||
try {
|
method: 'POST',
|
||||||
responsesResponse = await fetchWithProxyRetry(responsesUrl, {
|
headers,
|
||||||
method: 'POST',
|
body: JSON.stringify(responsesBody),
|
||||||
headers,
|
signal: options?.signal,
|
||||||
body: JSON.stringify(responsesBody),
|
})
|
||||||
signal: options?.signal,
|
|
||||||
})
|
|
||||||
} catch (error) {
|
|
||||||
throwClassifiedTransportError(error, responsesUrl)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (responsesResponse.ok) {
|
if (responsesResponse.ok) {
|
||||||
return responsesResponse
|
return responsesResponse
|
||||||
}
|
}
|
||||||
const responsesErrorBody = await responsesResponse.text().catch(() => 'unknown error')
|
const responsesErrorBody = await responsesResponse.text().catch(() => 'unknown error')
|
||||||
let responsesErrorResponse: object | undefined
|
let responsesErrorResponse: object | undefined
|
||||||
try { responsesErrorResponse = JSON.parse(responsesErrorBody) } catch { /* raw text */ }
|
try { responsesErrorResponse = JSON.parse(responsesErrorBody) } catch { /* raw text */ }
|
||||||
throwClassifiedHttpError(
|
throw APIError.generate(
|
||||||
responsesResponse.status,
|
responsesResponse.status,
|
||||||
responsesErrorBody,
|
|
||||||
responsesErrorResponse,
|
responsesErrorResponse,
|
||||||
|
`OpenAI API error ${responsesResponse.status}: ${responsesErrorBody}`,
|
||||||
responsesResponse.headers,
|
responsesResponse.headers,
|
||||||
responsesUrl,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let errorResponse: object | undefined
|
let errorResponse: object | undefined
|
||||||
try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ }
|
try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ }
|
||||||
throwClassifiedHttpError(
|
throw APIError.generate(
|
||||||
response.status,
|
response.status,
|
||||||
errorBody,
|
|
||||||
errorResponse,
|
errorResponse,
|
||||||
|
`OpenAI API error ${response.status}: ${errorBody}${rateHint}`,
|
||||||
response.headers as unknown as Headers,
|
response.headers as unknown as Headers,
|
||||||
chatCompletionsUrl,
|
|
||||||
rateHint,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,107 +0,0 @@
|
|||||||
import { afterEach, expect, mock, test } from 'bun:test'
|
|
||||||
|
|
||||||
const originalEnv = {
|
|
||||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
|
||||||
CLAUDE_CODE_USE_MISTRAL: process.env.CLAUDE_CODE_USE_MISTRAL,
|
|
||||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
|
||||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
|
||||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
|
||||||
MISTRAL_BASE_URL: process.env.MISTRAL_BASE_URL,
|
|
||||||
MISTRAL_MODEL: process.env.MISTRAL_MODEL,
|
|
||||||
}
|
|
||||||
|
|
||||||
function restoreEnv(key: string, value: string | undefined): void {
|
|
||||||
if (value === undefined) {
|
|
||||||
delete process.env[key]
|
|
||||||
} else {
|
|
||||||
process.env[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
restoreEnv('CLAUDE_CODE_USE_OPENAI', originalEnv.CLAUDE_CODE_USE_OPENAI)
|
|
||||||
restoreEnv('CLAUDE_CODE_USE_MISTRAL', originalEnv.CLAUDE_CODE_USE_MISTRAL)
|
|
||||||
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
|
|
||||||
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
|
|
||||||
restoreEnv('OPENAI_API_BASE', originalEnv.OPENAI_API_BASE)
|
|
||||||
restoreEnv('MISTRAL_BASE_URL', originalEnv.MISTRAL_BASE_URL)
|
|
||||||
restoreEnv('MISTRAL_MODEL', originalEnv.MISTRAL_MODEL)
|
|
||||||
mock.restore()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('logs a warning when OPENAI_BASE_URL is literal undefined', async () => {
|
|
||||||
const debugSpy = mock(() => {})
|
|
||||||
mock.module('../../utils/debug.js', () => ({
|
|
||||||
logForDebugging: debugSpy,
|
|
||||||
}))
|
|
||||||
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
process.env.OPENAI_BASE_URL = 'undefined'
|
|
||||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
|
||||||
delete process.env.OPENAI_API_BASE
|
|
||||||
|
|
||||||
const nonce = `${Date.now()}-${Math.random()}`
|
|
||||||
const { resolveProviderRequest } = await import(`./providerConfig.ts?ts=${nonce}`)
|
|
||||||
|
|
||||||
const resolved = resolveProviderRequest()
|
|
||||||
|
|
||||||
expect(resolved.baseUrl).toBe('https://api.openai.com/v1')
|
|
||||||
|
|
||||||
const warningCall = debugSpy.mock.calls.find(call =>
|
|
||||||
typeof call?.[0] === 'string' &&
|
|
||||||
call[0].includes('OPENAI_BASE_URL') &&
|
|
||||||
call[0].includes('"undefined"'),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(warningCall).toBeDefined()
|
|
||||||
expect(warningCall?.[1]).toEqual({ level: 'warn' })
|
|
||||||
})
|
|
||||||
|
|
||||||
test('does not warn for OPENAI_API_BASE when OPENAI_BASE_URL is active', async () => {
|
|
||||||
const debugSpy = mock(() => {})
|
|
||||||
mock.module('../../utils/debug.js', () => ({
|
|
||||||
logForDebugging: debugSpy,
|
|
||||||
}))
|
|
||||||
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_USE_MISTRAL
|
|
||||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:11434/v1'
|
|
||||||
process.env.OPENAI_MODEL = 'qwen2.5-coder:7b'
|
|
||||||
process.env.OPENAI_API_BASE = 'undefined'
|
|
||||||
|
|
||||||
const nonce = `${Date.now()}-${Math.random()}`
|
|
||||||
const { resolveProviderRequest } = await import(`./providerConfig.ts?ts=${nonce}`)
|
|
||||||
|
|
||||||
const resolved = resolveProviderRequest()
|
|
||||||
|
|
||||||
expect(resolved.baseUrl).toBe('http://127.0.0.1:11434/v1')
|
|
||||||
|
|
||||||
const aliasWarning = debugSpy.mock.calls.find(call =>
|
|
||||||
typeof call?.[0] === 'string' &&
|
|
||||||
call[0].includes('OPENAI_API_BASE') &&
|
|
||||||
call[0].includes('"undefined"'),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(aliasWarning).toBeUndefined()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('uses OPENAI_API_BASE as fallback in mistral mode when MISTRAL_BASE_URL is unset', async () => {
|
|
||||||
const debugSpy = mock(() => {})
|
|
||||||
mock.module('../../utils/debug.js', () => ({
|
|
||||||
logForDebugging: debugSpy,
|
|
||||||
}))
|
|
||||||
|
|
||||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
|
||||||
process.env.CLAUDE_CODE_USE_MISTRAL = '1'
|
|
||||||
delete process.env.MISTRAL_BASE_URL
|
|
||||||
process.env.MISTRAL_MODEL = 'mistral-medium-latest'
|
|
||||||
process.env.OPENAI_API_BASE = 'http://127.0.0.1:11434/v1'
|
|
||||||
|
|
||||||
const nonce = `${Date.now()}-${Math.random()}`
|
|
||||||
const { resolveProviderRequest } = await import(`./providerConfig.ts?ts=${nonce}`)
|
|
||||||
|
|
||||||
const resolved = resolveProviderRequest()
|
|
||||||
|
|
||||||
expect(resolved.baseUrl).toBe('http://127.0.0.1:11434/v1')
|
|
||||||
expect(debugSpy.mock.calls).toHaveLength(0)
|
|
||||||
})
|
|
||||||
@@ -8,20 +8,17 @@ import {
|
|||||||
readCodexCredentials,
|
readCodexCredentials,
|
||||||
type CodexCredentialBlob,
|
type CodexCredentialBlob,
|
||||||
} from '../../utils/codexCredentials.js'
|
} from '../../utils/codexCredentials.js'
|
||||||
import { logForDebugging } from '../../utils/debug.js'
|
|
||||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||||
import {
|
import {
|
||||||
asTrimmedString,
|
asTrimmedString,
|
||||||
parseChatgptAccountId,
|
parseChatgptAccountId,
|
||||||
} from './codexOAuthShared.js'
|
} from './codexOAuthShared.js'
|
||||||
import { DEFAULT_GEMINI_BASE_URL } from 'src/utils/providerProfile.js'
|
|
||||||
|
|
||||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||||
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
||||||
export const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1'
|
export const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1'
|
||||||
/** Default GitHub Copilot API model when user selects copilot / github:copilot */
|
/** Default GitHub Copilot API model when user selects copilot / github:copilot */
|
||||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'gpt-4o'
|
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'gpt-4o'
|
||||||
const warnedUndefinedEnvNames = new Set<string>()
|
|
||||||
|
|
||||||
const CODEX_ALIAS_MODELS: Record<
|
const CODEX_ALIAS_MODELS: Record<
|
||||||
string,
|
string,
|
||||||
@@ -132,33 +129,7 @@ function isPrivateIpv6Address(hostname: string): boolean {
|
|||||||
function asEnvUrl(value: string | undefined): string | undefined {
|
function asEnvUrl(value: string | undefined): string | undefined {
|
||||||
if (!value) return undefined
|
if (!value) return undefined
|
||||||
const trimmed = value.trim()
|
const trimmed = value.trim()
|
||||||
if (!trimmed) return undefined
|
if (!trimmed || trimmed === 'undefined') return undefined
|
||||||
if (trimmed === 'undefined') {
|
|
||||||
return undefined
|
|
||||||
}
|
|
||||||
return trimmed
|
|
||||||
}
|
|
||||||
|
|
||||||
function asNamedEnvUrl(
|
|
||||||
value: string | undefined,
|
|
||||||
envName: string,
|
|
||||||
): string | undefined {
|
|
||||||
if (!value) return undefined
|
|
||||||
|
|
||||||
const trimmed = value.trim()
|
|
||||||
if (!trimmed) return undefined
|
|
||||||
|
|
||||||
if (trimmed === 'undefined') {
|
|
||||||
if (!warnedUndefinedEnvNames.has(envName)) {
|
|
||||||
warnedUndefinedEnvNames.add(envName)
|
|
||||||
logForDebugging(
|
|
||||||
`[provider-config] Environment variable ${envName} is the literal string "undefined"; ignoring it.`,
|
|
||||||
{ level: 'warn' },
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return undefined
|
|
||||||
}
|
|
||||||
|
|
||||||
return trimmed
|
return trimmed
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -382,52 +353,23 @@ export function resolveProviderRequest(options?: {
|
|||||||
}): ResolvedProviderRequest {
|
}): ResolvedProviderRequest {
|
||||||
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||||
const isMistralMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
const isMistralMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||||
const isGeminiMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
|
||||||
const requestedModel =
|
const requestedModel =
|
||||||
options?.model?.trim() ||
|
options?.model?.trim() ||
|
||||||
(isMistralMode
|
(isMistralMode
|
||||||
? process.env.MISTRAL_MODEL?.trim()
|
? process.env.MISTRAL_MODEL?.trim()
|
||||||
: process.env.OPENAI_MODEL?.trim()) ||
|
: process.env.OPENAI_MODEL?.trim()) ||
|
||||||
(isGeminiMode
|
|
||||||
? process.env.GEMINI_MODEL?.trim()
|
|
||||||
: process.env.OPENAI_MODEL?.trim()) ||
|
|
||||||
options?.fallbackModel?.trim() ||
|
options?.fallbackModel?.trim() ||
|
||||||
(isGithubMode ? 'github:copilot' : 'gpt-4o')
|
(isGithubMode ? 'github:copilot' : 'gpt-4o')
|
||||||
const descriptor = parseModelDescriptor(requestedModel)
|
const descriptor = parseModelDescriptor(requestedModel)
|
||||||
const explicitBaseUrl = asEnvUrl(options?.baseUrl)
|
const explicitBaseUrl = asEnvUrl(options?.baseUrl)
|
||||||
|
|
||||||
const normalizedMistralEnvBaseUrl = asNamedEnvUrl(
|
|
||||||
process.env.MISTRAL_BASE_URL,
|
|
||||||
'MISTRAL_BASE_URL',
|
|
||||||
)
|
|
||||||
|
|
||||||
const normalizedGeminiEnvBaseUrl = asNamedEnvUrl(
|
|
||||||
process.env.GEMINI_BASE_URL,
|
|
||||||
'GEMINI_BASE_URL',
|
|
||||||
)
|
|
||||||
|
|
||||||
const primaryEnvBaseUrl = isMistralMode
|
|
||||||
? normalizedMistralEnvBaseUrl
|
|
||||||
: isGeminiMode
|
|
||||||
? normalizedGeminiEnvBaseUrl
|
|
||||||
: asNamedEnvUrl(process.env.OPENAI_BASE_URL, 'OPENAI_BASE_URL')
|
|
||||||
|
|
||||||
const fallbackEnvBaseUrl = isMistralMode
|
|
||||||
? (primaryEnvBaseUrl === undefined
|
|
||||||
? asNamedEnvUrl(process.env.OPENAI_API_BASE, 'OPENAI_API_BASE') ?? DEFAULT_MISTRAL_BASE_URL
|
|
||||||
: undefined)
|
|
||||||
: isGeminiMode
|
|
||||||
? (primaryEnvBaseUrl === undefined
|
|
||||||
? asNamedEnvUrl(process.env.OPENAI_API_BASE, 'OPENAI_API_BASE') ?? DEFAULT_GEMINI_BASE_URL
|
|
||||||
: undefined)
|
|
||||||
: (primaryEnvBaseUrl === undefined
|
|
||||||
? asNamedEnvUrl(process.env.OPENAI_API_BASE, 'OPENAI_API_BASE')
|
|
||||||
: undefined)
|
|
||||||
|
|
||||||
const envBaseUrlRaw =
|
const envBaseUrlRaw =
|
||||||
explicitBaseUrl ??
|
explicitBaseUrl ??
|
||||||
primaryEnvBaseUrl ??
|
asEnvUrl(
|
||||||
fallbackEnvBaseUrl
|
isMistralMode
|
||||||
|
? (process.env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL)
|
||||||
|
: process.env.OPENAI_BASE_URL
|
||||||
|
) ??
|
||||||
|
asEnvUrl(process.env.OPENAI_API_BASE)
|
||||||
|
|
||||||
const isCodexModelForGithub = isGithubMode && isCodexAlias(requestedModel)
|
const isCodexModelForGithub = isGithubMode && isCodexAlias(requestedModel)
|
||||||
const envBaseUrl =
|
const envBaseUrl =
|
||||||
|
|||||||
@@ -110,14 +110,9 @@ export function calculateTokenWarningState(
|
|||||||
? autoCompactThreshold
|
? autoCompactThreshold
|
||||||
: getEffectiveContextWindowSize(model)
|
: getEffectiveContextWindowSize(model)
|
||||||
|
|
||||||
// Use the raw context window (without output reservation) for the percentage
|
|
||||||
// display, so users see remaining context relative to the model's full capacity.
|
|
||||||
// The threshold (which subtracts buffer) should only affect when we warn/compact,
|
|
||||||
// not what percentage we display.
|
|
||||||
const rawContextWindow = getContextWindowForModel(model, getSdkBetas())
|
|
||||||
const percentLeft = Math.max(
|
const percentLeft = Math.max(
|
||||||
0,
|
0,
|
||||||
Math.round(((rawContextWindow - tokenUsage) / rawContextWindow) * 100),
|
Math.round(((threshold - tokenUsage) / threshold) * 100),
|
||||||
)
|
)
|
||||||
|
|
||||||
const warningThreshold = threshold - WARNING_THRESHOLD_BUFFER_TOKENS
|
const warningThreshold = threshold - WARNING_THRESHOLD_BUFFER_TOKENS
|
||||||
|
|||||||
@@ -1,152 +0,0 @@
|
|||||||
import { describe, test, expect, beforeEach, afterEach } from 'bun:test'
|
|
||||||
import { DiagnosticTrackingService } from './diagnosticTracking.js'
|
|
||||||
import type { MCPServerConnection } from './mcp/types.js'
|
|
||||||
|
|
||||||
// Mock the IDE client utility
|
|
||||||
const mockGetConnectedIdeClient = (clients: MCPServerConnection[]) =>
|
|
||||||
clients.find(client => client.type === 'connected')
|
|
||||||
|
|
||||||
describe('DiagnosticTrackingService', () => {
|
|
||||||
let service: DiagnosticTrackingService
|
|
||||||
let mockClients: MCPServerConnection[]
|
|
||||||
let mockIdeClient: MCPServerConnection
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
// Get fresh instance for each test
|
|
||||||
service = DiagnosticTrackingService.getInstance()
|
|
||||||
|
|
||||||
// Setup mock clients
|
|
||||||
mockIdeClient = {
|
|
||||||
type: 'connected',
|
|
||||||
name: 'test-ide',
|
|
||||||
capabilities: {},
|
|
||||||
config: {},
|
|
||||||
cleanup: async () => {},
|
|
||||||
client: {
|
|
||||||
request: async () => ({}),
|
|
||||||
setNotificationHandler: () => {},
|
|
||||||
close: async () => {},
|
|
||||||
},
|
|
||||||
} as unknown as MCPServerConnection
|
|
||||||
|
|
||||||
mockClients = [
|
|
||||||
{ type: 'disconnected', name: 'test-disconnected', config: {} } as unknown as MCPServerConnection,
|
|
||||||
mockIdeClient,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
|
|
||||||
afterEach(async () => {
|
|
||||||
await service.shutdown()
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('handleQueryStart', () => {
|
|
||||||
test('should store MCP clients and initialize service', async () => {
|
|
||||||
await service.handleQueryStart(mockClients)
|
|
||||||
|
|
||||||
// Service should be initialized
|
|
||||||
expect(service).toBeDefined()
|
|
||||||
|
|
||||||
// Should be able to get IDE client from stored clients
|
|
||||||
// We can't directly test private methods, but we can test the behavior
|
|
||||||
const result = await service.getNewDiagnosticsCompat()
|
|
||||||
expect(result).toEqual([]) // Should return empty when no diagnostics
|
|
||||||
})
|
|
||||||
|
|
||||||
test('should reset service if already initialized', async () => {
|
|
||||||
// Initialize first
|
|
||||||
await service.handleQueryStart(mockClients)
|
|
||||||
|
|
||||||
// Call again - should reset without error
|
|
||||||
await service.handleQueryStart(mockClients)
|
|
||||||
|
|
||||||
// Should still work
|
|
||||||
const result = await service.getNewDiagnosticsCompat()
|
|
||||||
expect(result).toEqual([])
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('backward-compatible methods', () => {
|
|
||||||
beforeEach(async () => {
|
|
||||||
await service.handleQueryStart(mockClients)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('beforeFileEditedCompat should work without explicit client', async () => {
|
|
||||||
// Should not throw error and should return undefined when no IDE client
|
|
||||||
const result = await service.beforeFileEditedCompat('/test/file.ts')
|
|
||||||
expect(result).toBeUndefined()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('getNewDiagnosticsCompat should work without explicit client', async () => {
|
|
||||||
const result = await service.getNewDiagnosticsCompat()
|
|
||||||
expect(Array.isArray(result)).toBe(true)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('ensureFileOpenedCompat should work without explicit client', async () => {
|
|
||||||
const result = await service.ensureFileOpenedCompat('/test/file.ts')
|
|
||||||
expect(result).toBeUndefined()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('new explicit client methods', () => {
|
|
||||||
test('beforeFileEdited should require client parameter', async () => {
|
|
||||||
// Should not work without client
|
|
||||||
const result = await service.beforeFileEdited('/test/file.ts', undefined as any)
|
|
||||||
expect(result).toBeUndefined()
|
|
||||||
})
|
|
||||||
|
|
||||||
test('getNewDiagnostics should require client parameter', async () => {
|
|
||||||
// Should not work without client
|
|
||||||
const result = await service.getNewDiagnostics(undefined as any)
|
|
||||||
expect(result).toEqual([])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('ensureFileOpened should require client parameter', async () => {
|
|
||||||
// Should not work without client
|
|
||||||
const result = await service.ensureFileOpened('/test/file.ts', undefined as any)
|
|
||||||
expect(result).toBeUndefined()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('shutdown', () => {
|
|
||||||
test('should clear stored clients on shutdown', async () => {
|
|
||||||
await service.handleQueryStart(mockClients)
|
|
||||||
|
|
||||||
// Verify service is working
|
|
||||||
const beforeResult = await service.getNewDiagnosticsCompat()
|
|
||||||
expect(Array.isArray(beforeResult)).toBe(true)
|
|
||||||
|
|
||||||
// Shutdown
|
|
||||||
await service.shutdown()
|
|
||||||
|
|
||||||
// After shutdown, compat methods should return empty results
|
|
||||||
const afterResult = await service.getNewDiagnosticsCompat()
|
|
||||||
expect(afterResult).toEqual([])
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('integration with existing functionality', () => {
|
|
||||||
test('should maintain existing diagnostic tracking behavior', async () => {
|
|
||||||
await service.handleQueryStart(mockClients)
|
|
||||||
|
|
||||||
// Test baseline tracking
|
|
||||||
await service.beforeFileEditedCompat('/test/file.ts')
|
|
||||||
|
|
||||||
// Test getting new diagnostics (should be empty since no IDE client is actually connected)
|
|
||||||
const newDiagnostics = await service.getNewDiagnosticsCompat()
|
|
||||||
expect(Array.isArray(newDiagnostics)).toBe(true)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('should handle missing IDE client gracefully', async () => {
|
|
||||||
// Test with no connected clients
|
|
||||||
const noIdeClients = [
|
|
||||||
{ type: 'disconnected', name: 'test-disconnected-2', config: {} } as unknown as MCPServerConnection,
|
|
||||||
]
|
|
||||||
|
|
||||||
await service.handleQueryStart(noIdeClients)
|
|
||||||
|
|
||||||
// Should handle gracefully
|
|
||||||
const result = await service.getNewDiagnosticsCompat()
|
|
||||||
expect(result).toEqual([])
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -32,7 +32,7 @@ export class DiagnosticTrackingService {
|
|||||||
private baseline: Map<string, Diagnostic[]> = new Map()
|
private baseline: Map<string, Diagnostic[]> = new Map()
|
||||||
|
|
||||||
private initialized = false
|
private initialized = false
|
||||||
private currentMcpClients: MCPServerConnection[] = []
|
private mcpClient: MCPServerConnection | undefined
|
||||||
|
|
||||||
// Track when files were last processed/fetched
|
// Track when files were last processed/fetched
|
||||||
private lastProcessedTimestamps: Map<string, number> = new Map()
|
private lastProcessedTimestamps: Map<string, number> = new Map()
|
||||||
@@ -48,17 +48,18 @@ export class DiagnosticTrackingService {
|
|||||||
return DiagnosticTrackingService.instance
|
return DiagnosticTrackingService.instance
|
||||||
}
|
}
|
||||||
|
|
||||||
initialize() {
|
initialize(mcpClient: MCPServerConnection) {
|
||||||
if (this.initialized) {
|
if (this.initialized) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Do not cache the connected mcpClient since it can change.
|
||||||
|
this.mcpClient = mcpClient
|
||||||
this.initialized = true
|
this.initialized = true
|
||||||
}
|
}
|
||||||
|
|
||||||
async shutdown(): Promise<void> {
|
async shutdown(): Promise<void> {
|
||||||
this.initialized = false
|
this.initialized = false
|
||||||
this.currentMcpClients = []
|
|
||||||
this.baseline.clear()
|
this.baseline.clear()
|
||||||
this.rightFileDiagnosticsState.clear()
|
this.rightFileDiagnosticsState.clear()
|
||||||
this.lastProcessedTimestamps.clear()
|
this.lastProcessedTimestamps.clear()
|
||||||
@@ -74,46 +75,6 @@ export class DiagnosticTrackingService {
|
|||||||
this.lastProcessedTimestamps.clear()
|
this.lastProcessedTimestamps.clear()
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the current IDE client from stored MCP clients
|
|
||||||
*/
|
|
||||||
private getCurrentIdeClient(): MCPServerConnection | undefined {
|
|
||||||
return getConnectedIdeClient(this.currentMcpClients)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Backward-compatible method that uses stored IDE client
|
|
||||||
*/
|
|
||||||
async beforeFileEditedCompat(filePath: string): Promise<void> {
|
|
||||||
const ideClient = this.getCurrentIdeClient()
|
|
||||||
if (!ideClient) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return await this.beforeFileEdited(filePath, ideClient)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Backward-compatible method that uses stored IDE client
|
|
||||||
*/
|
|
||||||
async getNewDiagnosticsCompat(): Promise<DiagnosticFile[]> {
|
|
||||||
const ideClient = this.getCurrentIdeClient()
|
|
||||||
if (!ideClient) {
|
|
||||||
return []
|
|
||||||
}
|
|
||||||
return await this.getNewDiagnostics(ideClient)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Backward-compatible method that uses stored IDE client
|
|
||||||
*/
|
|
||||||
async ensureFileOpenedCompat(fileUri: string): Promise<void> {
|
|
||||||
const ideClient = this.getCurrentIdeClient()
|
|
||||||
if (!ideClient) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return await this.ensureFileOpened(fileUri, ideClient)
|
|
||||||
}
|
|
||||||
|
|
||||||
private normalizeFileUri(fileUri: string): string {
|
private normalizeFileUri(fileUri: string): string {
|
||||||
// Remove our protocol prefixes
|
// Remove our protocol prefixes
|
||||||
const protocolPrefixes = [
|
const protocolPrefixes = [
|
||||||
@@ -139,11 +100,11 @@ export class DiagnosticTrackingService {
|
|||||||
* Ensure a file is opened in the IDE before processing.
|
* Ensure a file is opened in the IDE before processing.
|
||||||
* This is important for language services like diagnostics to work properly.
|
* This is important for language services like diagnostics to work properly.
|
||||||
*/
|
*/
|
||||||
async ensureFileOpened(fileUri: string, mcpClient: MCPServerConnection): Promise<void> {
|
async ensureFileOpened(fileUri: string): Promise<void> {
|
||||||
if (
|
if (
|
||||||
!this.initialized ||
|
!this.initialized ||
|
||||||
!mcpClient ||
|
!this.mcpClient ||
|
||||||
mcpClient.type !== 'connected'
|
this.mcpClient.type !== 'connected'
|
||||||
) {
|
) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -160,7 +121,7 @@ export class DiagnosticTrackingService {
|
|||||||
selectToEndOfLine: false,
|
selectToEndOfLine: false,
|
||||||
makeFrontmost: false,
|
makeFrontmost: false,
|
||||||
},
|
},
|
||||||
mcpClient,
|
this.mcpClient,
|
||||||
)
|
)
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error as Error)
|
logError(error as Error)
|
||||||
@@ -171,11 +132,11 @@ export class DiagnosticTrackingService {
|
|||||||
* Capture baseline diagnostics for a specific file before editing.
|
* Capture baseline diagnostics for a specific file before editing.
|
||||||
* This is called before editing a file to ensure we have a baseline to compare against.
|
* This is called before editing a file to ensure we have a baseline to compare against.
|
||||||
*/
|
*/
|
||||||
async beforeFileEdited(filePath: string, mcpClient: MCPServerConnection): Promise<void> {
|
async beforeFileEdited(filePath: string): Promise<void> {
|
||||||
if (
|
if (
|
||||||
!this.initialized ||
|
!this.initialized ||
|
||||||
!mcpClient ||
|
!this.mcpClient ||
|
||||||
mcpClient.type !== 'connected'
|
this.mcpClient.type !== 'connected'
|
||||||
) {
|
) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -186,7 +147,7 @@ export class DiagnosticTrackingService {
|
|||||||
const result = await callIdeRpc(
|
const result = await callIdeRpc(
|
||||||
'getDiagnostics',
|
'getDiagnostics',
|
||||||
{ uri: `file://${filePath}` },
|
{ uri: `file://${filePath}` },
|
||||||
mcpClient,
|
this.mcpClient,
|
||||||
)
|
)
|
||||||
const diagnosticFile = this.parseDiagnosticResult(result)[0]
|
const diagnosticFile = this.parseDiagnosticResult(result)[0]
|
||||||
if (diagnosticFile) {
|
if (diagnosticFile) {
|
||||||
@@ -224,11 +185,11 @@ export class DiagnosticTrackingService {
|
|||||||
* Get new diagnostics from file://, _claude_fs_right, and _claude_fs_ URIs that aren't in the baseline.
|
* Get new diagnostics from file://, _claude_fs_right, and _claude_fs_ URIs that aren't in the baseline.
|
||||||
* Only processes diagnostics for files that have been edited.
|
* Only processes diagnostics for files that have been edited.
|
||||||
*/
|
*/
|
||||||
async getNewDiagnostics(mcpClient: MCPServerConnection): Promise<DiagnosticFile[]> {
|
async getNewDiagnostics(): Promise<DiagnosticFile[]> {
|
||||||
if (
|
if (
|
||||||
!this.initialized ||
|
!this.initialized ||
|
||||||
!mcpClient ||
|
!this.mcpClient ||
|
||||||
mcpClient.type !== 'connected'
|
this.mcpClient.type !== 'connected'
|
||||||
) {
|
) {
|
||||||
return []
|
return []
|
||||||
}
|
}
|
||||||
@@ -239,7 +200,7 @@ export class DiagnosticTrackingService {
|
|||||||
const result = await callIdeRpc(
|
const result = await callIdeRpc(
|
||||||
'getDiagnostics',
|
'getDiagnostics',
|
||||||
{}, // Empty params fetches all diagnostics
|
{}, // Empty params fetches all diagnostics
|
||||||
mcpClient,
|
this.mcpClient,
|
||||||
)
|
)
|
||||||
allDiagnosticFiles = this.parseDiagnosticResult(result)
|
allDiagnosticFiles = this.parseDiagnosticResult(result)
|
||||||
} catch (_error) {
|
} catch (_error) {
|
||||||
@@ -367,16 +328,13 @@ export class DiagnosticTrackingService {
|
|||||||
* @param shouldQuery Whether a query is actually being made (not just a command)
|
* @param shouldQuery Whether a query is actually being made (not just a command)
|
||||||
*/
|
*/
|
||||||
async handleQueryStart(clients: MCPServerConnection[]): Promise<void> {
|
async handleQueryStart(clients: MCPServerConnection[]): Promise<void> {
|
||||||
// Store the current MCP clients for later use
|
|
||||||
this.currentMcpClients = clients
|
|
||||||
|
|
||||||
// Only proceed if we should query and have clients
|
// Only proceed if we should query and have clients
|
||||||
if (!this.initialized) {
|
if (!this.initialized) {
|
||||||
// Find the connected IDE client
|
// Find the connected IDE client
|
||||||
const connectedIdeClient = getConnectedIdeClient(clients)
|
const connectedIdeClient = getConnectedIdeClient(clients)
|
||||||
|
|
||||||
if (connectedIdeClient) {
|
if (connectedIdeClient) {
|
||||||
this.initialize()
|
this.initialize(connectedIdeClient)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Reset diagnostic tracking for new query loops
|
// Reset diagnostic tracking for new query loops
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
|
||||||
|
|
||||||
import {
|
import {
|
||||||
DEFAULT_GITHUB_DEVICE_SCOPE,
|
DEFAULT_GITHUB_DEVICE_SCOPE,
|
||||||
@@ -12,15 +12,22 @@ async function importFreshModule() {
|
|||||||
return import(`./deviceFlow.ts?ts=${Date.now()}-${Math.random()}`)
|
return import(`./deviceFlow.ts?ts=${Date.now()}-${Math.random()}`)
|
||||||
}
|
}
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
mock.restore()
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('requestDeviceCode', () => {
|
describe('requestDeviceCode', () => {
|
||||||
|
const originalFetch = globalThis.fetch
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mock.restore()
|
||||||
|
globalThis.fetch = originalFetch
|
||||||
|
})
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
globalThis.fetch = originalFetch
|
||||||
|
})
|
||||||
|
|
||||||
test('parses successful device code response', async () => {
|
test('parses successful device code response', async () => {
|
||||||
const { requestDeviceCode } = await importFreshModule()
|
const { requestDeviceCode } = await importFreshModule()
|
||||||
|
|
||||||
const fetchImpl = mock(() =>
|
globalThis.fetch = mock(() =>
|
||||||
Promise.resolve(
|
Promise.resolve(
|
||||||
new Response(
|
new Response(
|
||||||
JSON.stringify({
|
JSON.stringify({
|
||||||
@@ -37,7 +44,7 @@ describe('requestDeviceCode', () => {
|
|||||||
|
|
||||||
const r = await requestDeviceCode({
|
const r = await requestDeviceCode({
|
||||||
clientId: 'test-client',
|
clientId: 'test-client',
|
||||||
fetchImpl,
|
fetchImpl: globalThis.fetch,
|
||||||
})
|
})
|
||||||
expect(r.device_code).toBe('abc')
|
expect(r.device_code).toBe('abc')
|
||||||
expect(r.user_code).toBe('ABCD-1234')
|
expect(r.user_code).toBe('ABCD-1234')
|
||||||
@@ -50,17 +57,17 @@ describe('requestDeviceCode', () => {
|
|||||||
const { requestDeviceCode, GitHubDeviceFlowError } =
|
const { requestDeviceCode, GitHubDeviceFlowError } =
|
||||||
await importFreshModule()
|
await importFreshModule()
|
||||||
|
|
||||||
const fetchImpl = mock(() =>
|
globalThis.fetch = mock(() =>
|
||||||
Promise.resolve(new Response('bad', { status: 500 })),
|
Promise.resolve(new Response('bad', { status: 500 })),
|
||||||
)
|
)
|
||||||
await expect(
|
await expect(
|
||||||
requestDeviceCode({ clientId: 'x', fetchImpl }),
|
requestDeviceCode({ clientId: 'x', fetchImpl: globalThis.fetch }),
|
||||||
).rejects.toThrow(GitHubDeviceFlowError)
|
).rejects.toThrow(GitHubDeviceFlowError)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('uses OAuth-safe default scope', async () => {
|
test('uses OAuth-safe default scope', async () => {
|
||||||
let capturedScope = ''
|
let capturedScope = ''
|
||||||
const fetchImpl = mock((_url: RequestInfo | URL, init?: RequestInit) => {
|
globalThis.fetch = mock((_url: RequestInfo | URL, init?: RequestInit) => {
|
||||||
const body = init?.body
|
const body = init?.body
|
||||||
if (body instanceof URLSearchParams) {
|
if (body instanceof URLSearchParams) {
|
||||||
capturedScope = body.get('scope') ?? ''
|
capturedScope = body.get('scope') ?? ''
|
||||||
@@ -80,7 +87,7 @@ describe('requestDeviceCode', () => {
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
await requestDeviceCode({ clientId: 'test-client', fetchImpl })
|
await requestDeviceCode({ clientId: 'test-client', fetchImpl: globalThis.fetch })
|
||||||
expect(capturedScope).toBe(DEFAULT_GITHUB_DEVICE_SCOPE)
|
expect(capturedScope).toBe(DEFAULT_GITHUB_DEVICE_SCOPE)
|
||||||
expect(capturedScope).toBe('read:user')
|
expect(capturedScope).toBe('read:user')
|
||||||
})
|
})
|
||||||
@@ -89,7 +96,7 @@ describe('requestDeviceCode', () => {
|
|||||||
const scopesSeen: string[] = []
|
const scopesSeen: string[] = []
|
||||||
let callCount = 0
|
let callCount = 0
|
||||||
|
|
||||||
const fetchImpl = mock((_url: RequestInfo | URL, init?: RequestInit) => {
|
globalThis.fetch = mock((_url: RequestInfo | URL, init?: RequestInit) => {
|
||||||
const body = init?.body
|
const body = init?.body
|
||||||
const scope =
|
const scope =
|
||||||
body instanceof URLSearchParams
|
body instanceof URLSearchParams
|
||||||
@@ -125,7 +132,7 @@ describe('requestDeviceCode', () => {
|
|||||||
const result = await requestDeviceCode({
|
const result = await requestDeviceCode({
|
||||||
clientId: 'test-client',
|
clientId: 'test-client',
|
||||||
scope: 'read:user,models:read',
|
scope: 'read:user,models:read',
|
||||||
fetchImpl,
|
fetchImpl: globalThis.fetch,
|
||||||
})
|
})
|
||||||
|
|
||||||
expect(result.device_code).toBe('abc')
|
expect(result.device_code).toBe('abc')
|
||||||
@@ -135,11 +142,17 @@ describe('requestDeviceCode', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
describe('pollAccessToken', () => {
|
describe('pollAccessToken', () => {
|
||||||
|
const originalFetch = globalThis.fetch
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
globalThis.fetch = originalFetch
|
||||||
|
})
|
||||||
|
|
||||||
test('returns token when GitHub responds with access_token immediately', async () => {
|
test('returns token when GitHub responds with access_token immediately', async () => {
|
||||||
const { pollAccessToken } = await importFreshModule()
|
const { pollAccessToken } = await importFreshModule()
|
||||||
|
|
||||||
let calls = 0
|
let calls = 0
|
||||||
const fetchImpl = mock(() => {
|
globalThis.fetch = mock(() => {
|
||||||
calls++
|
calls++
|
||||||
return Promise.resolve(
|
return Promise.resolve(
|
||||||
new Response(JSON.stringify({ access_token: 'tok-xyz' }), {
|
new Response(JSON.stringify({ access_token: 'tok-xyz' }), {
|
||||||
@@ -150,7 +163,7 @@ describe('pollAccessToken', () => {
|
|||||||
|
|
||||||
const token = await pollAccessToken('dev-code', {
|
const token = await pollAccessToken('dev-code', {
|
||||||
clientId: 'cid',
|
clientId: 'cid',
|
||||||
fetchImpl,
|
fetchImpl: globalThis.fetch,
|
||||||
})
|
})
|
||||||
expect(token).toBe('tok-xyz')
|
expect(token).toBe('tok-xyz')
|
||||||
expect(calls).toBe(1)
|
expect(calls).toBe(1)
|
||||||
@@ -159,7 +172,7 @@ describe('pollAccessToken', () => {
|
|||||||
test('throws on access_denied', async () => {
|
test('throws on access_denied', async () => {
|
||||||
const { pollAccessToken } = await importFreshModule()
|
const { pollAccessToken } = await importFreshModule()
|
||||||
|
|
||||||
const fetchImpl = mock(() =>
|
globalThis.fetch = mock(() =>
|
||||||
Promise.resolve(
|
Promise.resolve(
|
||||||
new Response(JSON.stringify({ error: 'access_denied' }), {
|
new Response(JSON.stringify({ error: 'access_denied' }), {
|
||||||
status: 200,
|
status: 200,
|
||||||
@@ -169,17 +182,23 @@ describe('pollAccessToken', () => {
|
|||||||
await expect(
|
await expect(
|
||||||
pollAccessToken('dc', {
|
pollAccessToken('dc', {
|
||||||
clientId: 'c',
|
clientId: 'c',
|
||||||
fetchImpl,
|
fetchImpl: globalThis.fetch,
|
||||||
}),
|
}),
|
||||||
).rejects.toThrow(/denied/)
|
).rejects.toThrow(/denied/)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('exchangeForCopilotToken', () => {
|
describe('exchangeForCopilotToken', () => {
|
||||||
|
const originalFetch = globalThis.fetch
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
globalThis.fetch = originalFetch
|
||||||
|
})
|
||||||
|
|
||||||
test('parses successful Copilot token response', async () => {
|
test('parses successful Copilot token response', async () => {
|
||||||
const { exchangeForCopilotToken } = await importFreshModule()
|
const { exchangeForCopilotToken } = await importFreshModule()
|
||||||
|
|
||||||
const fetchImpl = mock(() =>
|
globalThis.fetch = mock(() =>
|
||||||
Promise.resolve(
|
Promise.resolve(
|
||||||
new Response(
|
new Response(
|
||||||
JSON.stringify({
|
JSON.stringify({
|
||||||
@@ -195,7 +214,7 @@ describe('exchangeForCopilotToken', () => {
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
const result = await exchangeForCopilotToken('oauth-token', fetchImpl)
|
const result = await exchangeForCopilotToken('oauth-token', globalThis.fetch)
|
||||||
expect(result.token).toBe('copilot-token-xyz')
|
expect(result.token).toBe('copilot-token-xyz')
|
||||||
expect(result.expires_at).toBe(1700000000)
|
expect(result.expires_at).toBe(1700000000)
|
||||||
expect(result.refresh_in).toBe(3600)
|
expect(result.refresh_in).toBe(3600)
|
||||||
@@ -206,24 +225,24 @@ describe('exchangeForCopilotToken', () => {
|
|||||||
const { exchangeForCopilotToken, GitHubDeviceFlowError } =
|
const { exchangeForCopilotToken, GitHubDeviceFlowError } =
|
||||||
await importFreshModule()
|
await importFreshModule()
|
||||||
|
|
||||||
const fetchImpl = mock(() =>
|
globalThis.fetch = mock(() =>
|
||||||
Promise.resolve(new Response('unauthorized', { status: 401 })),
|
Promise.resolve(new Response('unauthorized', { status: 401 })),
|
||||||
)
|
)
|
||||||
await expect(
|
await expect(
|
||||||
exchangeForCopilotToken('bad-token', fetchImpl),
|
exchangeForCopilotToken('bad-token', globalThis.fetch),
|
||||||
).rejects.toThrow(GitHubDeviceFlowError)
|
).rejects.toThrow(GitHubDeviceFlowError)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('throws on malformed response', async () => {
|
test('throws on malformed response', async () => {
|
||||||
const { exchangeForCopilotToken } = await importFreshModule()
|
const { exchangeForCopilotToken } = await importFreshModule()
|
||||||
|
|
||||||
const fetchImpl = mock(() =>
|
globalThis.fetch = mock(() =>
|
||||||
Promise.resolve(
|
Promise.resolve(
|
||||||
new Response(JSON.stringify({ invalid: 'data' }), { status: 200 }),
|
new Response(JSON.stringify({ invalid: 'data' }), { status: 200 }),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
await expect(
|
await expect(
|
||||||
exchangeForCopilotToken('oauth-token', fetchImpl),
|
exchangeForCopilotToken('oauth-token', globalThis.fetch),
|
||||||
).rejects.toThrow(/Malformed/)
|
).rejects.toThrow(/Malformed/)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ import { TodoWriteTool } from './tools/TodoWriteTool/TodoWriteTool.js'
|
|||||||
import { ExitPlanModeV2Tool } from './tools/ExitPlanModeTool/ExitPlanModeV2Tool.js'
|
import { ExitPlanModeV2Tool } from './tools/ExitPlanModeTool/ExitPlanModeV2Tool.js'
|
||||||
import { TestingPermissionTool } from './tools/testing/TestingPermissionTool.js'
|
import { TestingPermissionTool } from './tools/testing/TestingPermissionTool.js'
|
||||||
import { GrepTool } from './tools/GrepTool/GrepTool.js'
|
import { GrepTool } from './tools/GrepTool/GrepTool.js'
|
||||||
|
import { RepoMapTool } from './tools/RepoMapTool/RepoMapTool.js'
|
||||||
// Lazy require to break circular dependency: tools.ts -> TeamCreateTool/TeamDeleteTool -> ... -> tools.ts
|
// Lazy require to break circular dependency: tools.ts -> TeamCreateTool/TeamDeleteTool -> ... -> tools.ts
|
||||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||||
const getTeamCreateTool = () =>
|
const getTeamCreateTool = () =>
|
||||||
@@ -188,6 +189,7 @@ export function getAllBaseTools(): Tools {
|
|||||||
// trick as ripgrep). When available, find/grep in Claude's shell are aliased
|
// trick as ripgrep). When available, find/grep in Claude's shell are aliased
|
||||||
// to these fast tools, so the dedicated Glob/Grep tools are unnecessary.
|
// to these fast tools, so the dedicated Glob/Grep tools are unnecessary.
|
||||||
...(hasEmbeddedSearchTools() ? [] : [GlobTool, GrepTool]),
|
...(hasEmbeddedSearchTools() ? [] : [GlobTool, GrepTool]),
|
||||||
|
RepoMapTool,
|
||||||
ExitPlanModeV2Tool,
|
ExitPlanModeV2Tool,
|
||||||
FileReadTool,
|
FileReadTool,
|
||||||
FileEditTool,
|
FileEditTool,
|
||||||
|
|||||||
@@ -422,7 +422,7 @@ export const FileEditTool = buildTool({
|
|||||||
activateConditionalSkillsForPaths([absoluteFilePath], cwd)
|
activateConditionalSkillsForPaths([absoluteFilePath], cwd)
|
||||||
}
|
}
|
||||||
|
|
||||||
await diagnosticTracker.beforeFileEditedCompat(absoluteFilePath)
|
await diagnosticTracker.beforeFileEdited(absoluteFilePath)
|
||||||
|
|
||||||
// Ensure parent directory exists before the atomic read-modify-write section.
|
// Ensure parent directory exists before the atomic read-modify-write section.
|
||||||
// These awaits must stay OUTSIDE the critical section below — a yield between
|
// These awaits must stay OUTSIDE the critical section below — a yield between
|
||||||
|
|||||||
@@ -244,7 +244,7 @@ export const FileWriteTool = buildTool({
|
|||||||
// Activate conditional skills whose path patterns match this file
|
// Activate conditional skills whose path patterns match this file
|
||||||
activateConditionalSkillsForPaths([fullFilePath], cwd)
|
activateConditionalSkillsForPaths([fullFilePath], cwd)
|
||||||
|
|
||||||
await diagnosticTracker.beforeFileEditedCompat(fullFilePath)
|
await diagnosticTracker.beforeFileEdited(fullFilePath)
|
||||||
|
|
||||||
// Ensure parent directory exists before the atomic read-modify-write section.
|
// Ensure parent directory exists before the atomic read-modify-write section.
|
||||||
// Must stay OUTSIDE the critical section below (a yield between the staleness
|
// Must stay OUTSIDE the critical section below (a yield between the staleness
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
import { Ajv } from 'ajv'
|
|
||||||
import { z } from 'zod/v4'
|
import { z } from 'zod/v4'
|
||||||
import { buildTool, type ToolDef, type ValidationResult } from '../../Tool.js'
|
import { buildTool, type ToolDef } from '../../Tool.js'
|
||||||
import { lazySchema } from '../../utils/lazySchema.js'
|
import { lazySchema } from '../../utils/lazySchema.js'
|
||||||
import type { PermissionResult } from '../../types/permissions.js'
|
import type { PermissionResult } from '../../utils/permissions/PermissionResult.js'
|
||||||
import { isOutputLineTruncated } from '../../utils/terminal.js'
|
import { isOutputLineTruncated } from '../../utils/terminal.js'
|
||||||
import { DESCRIPTION, PROMPT } from './prompt.js'
|
import { DESCRIPTION, PROMPT } from './prompt.js'
|
||||||
import {
|
import {
|
||||||
@@ -38,8 +37,6 @@ export type Output = z.infer<OutputSchema>
|
|||||||
// Re-export MCPProgress from centralized types to break import cycles
|
// Re-export MCPProgress from centralized types to break import cycles
|
||||||
export type { MCPProgress } from '../../types/tools.js'
|
export type { MCPProgress } from '../../types/tools.js'
|
||||||
|
|
||||||
const ajv = new Ajv({ strict: false })
|
|
||||||
|
|
||||||
export const MCPTool = buildTool({
|
export const MCPTool = buildTool({
|
||||||
isMcp: true,
|
isMcp: true,
|
||||||
// Overridden in mcpClient.ts with the real MCP tool name + args
|
// Overridden in mcpClient.ts with the real MCP tool name + args
|
||||||
@@ -75,27 +72,6 @@ export const MCPTool = buildTool({
|
|||||||
message: 'MCPTool requires permission.',
|
message: 'MCPTool requires permission.',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
async validateInput(input, context): Promise<ValidationResult> {
|
|
||||||
if (this.inputJSONSchema) {
|
|
||||||
try {
|
|
||||||
const validate = ajv.compile(this.inputJSONSchema)
|
|
||||||
if (!validate(input)) {
|
|
||||||
return {
|
|
||||||
result: false,
|
|
||||||
message: ajv.errorsText(validate.errors),
|
|
||||||
errorCode: 400,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
return {
|
|
||||||
result: false,
|
|
||||||
message: `Failed to compile JSON schema for validation: ${error}`,
|
|
||||||
errorCode: 500,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return { result: true }
|
|
||||||
},
|
|
||||||
renderToolUseMessage,
|
renderToolUseMessage,
|
||||||
// Overridden in mcpClient.ts
|
// Overridden in mcpClient.ts
|
||||||
userFacingName: () => 'mcp',
|
userFacingName: () => 'mcp',
|
||||||
@@ -124,4 +100,3 @@ export const MCPTool = buildTool({
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
} satisfies ToolDef<InputSchema, Output>)
|
} satisfies ToolDef<InputSchema, Output>)
|
||||||
|
|
||||||
|
|||||||
167
src/tools/RepoMapTool/RepoMapTool.test.ts
Normal file
167
src/tools/RepoMapTool/RepoMapTool.test.ts
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
import { beforeAll, describe, expect, test } from 'bun:test'
|
||||||
|
import { cpSync, mkdtempSync, rmSync } from 'fs'
|
||||||
|
import { tmpdir } from 'os'
|
||||||
|
import { join } from 'path'
|
||||||
|
import { initParser } from '../../context/repoMap/parser.js'
|
||||||
|
import { invalidateCache } from '../../context/repoMap/index.js'
|
||||||
|
import { RepoMapTool } from './RepoMapTool.js'
|
||||||
|
import { getToolUseSummary } from './UI.js'
|
||||||
|
|
||||||
|
const FIXTURE_ROOT = join(
|
||||||
|
import.meta.dir,
|
||||||
|
'..',
|
||||||
|
'..',
|
||||||
|
'context',
|
||||||
|
'repoMap',
|
||||||
|
'__fixtures__',
|
||||||
|
'mini-repo',
|
||||||
|
)
|
||||||
|
const FIXTURE_FILES = [
|
||||||
|
'fileA.ts',
|
||||||
|
'fileB.ts',
|
||||||
|
'fileC.ts',
|
||||||
|
'fileD.ts',
|
||||||
|
'fileE.ts',
|
||||||
|
]
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
await initParser()
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
describe('RepoMapTool schema', () => {
|
||||||
|
test('validates a minimal input {}', () => {
|
||||||
|
const schema = RepoMapTool.inputSchema
|
||||||
|
const result = schema.safeParse({})
|
||||||
|
expect(result.success).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('rejects max_tokens below 256', () => {
|
||||||
|
const schema = RepoMapTool.inputSchema
|
||||||
|
const result = schema.safeParse({ max_tokens: 100 })
|
||||||
|
expect(result.success).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('rejects max_tokens above 16384', () => {
|
||||||
|
const schema = RepoMapTool.inputSchema
|
||||||
|
const result = schema.safeParse({ max_tokens: 20000 })
|
||||||
|
expect(result.success).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('accepts focus_files as string[]', () => {
|
||||||
|
const schema = RepoMapTool.inputSchema
|
||||||
|
const result = schema.safeParse({
|
||||||
|
focus_files: ['src/tools/', 'src/context.ts'],
|
||||||
|
})
|
||||||
|
expect(result.success).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('RepoMapTool call', () => {
|
||||||
|
test('returns a rendered map for a directory', async () => {
|
||||||
|
const tempDir = mkdtempSync(join(tmpdir(), 'repomap-tool-'))
|
||||||
|
try {
|
||||||
|
for (const f of FIXTURE_FILES) {
|
||||||
|
cpSync(join(FIXTURE_ROOT, f), join(tempDir, f))
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to call buildRepoMap directly since getCwd patching is complex
|
||||||
|
const { buildRepoMap } = await import(
|
||||||
|
'../../context/repoMap/index.js'
|
||||||
|
)
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root: tempDir,
|
||||||
|
maxTokens: 1024,
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(result.map.length).toBeGreaterThan(0)
|
||||||
|
expect(result.fileCount).toBeGreaterThan(0)
|
||||||
|
expect(result.totalFileCount).toBe(5)
|
||||||
|
expect(result.tokenCount).toBeGreaterThan(0)
|
||||||
|
expect(result.tokenCount).toBeLessThanOrEqual(1024)
|
||||||
|
} finally {
|
||||||
|
rmSync(tempDir, { recursive: true, force: true })
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
test('respects max_tokens parameter', async () => {
|
||||||
|
const tempDir = mkdtempSync(join(tmpdir(), 'repomap-tool-'))
|
||||||
|
try {
|
||||||
|
for (const f of FIXTURE_FILES) {
|
||||||
|
cpSync(join(FIXTURE_ROOT, f), join(tempDir, f))
|
||||||
|
}
|
||||||
|
|
||||||
|
const { buildRepoMap } = await import(
|
||||||
|
'../../context/repoMap/index.js'
|
||||||
|
)
|
||||||
|
|
||||||
|
const small = await buildRepoMap({ root: tempDir, maxTokens: 256 })
|
||||||
|
const large = await buildRepoMap({ root: tempDir, maxTokens: 4096 })
|
||||||
|
|
||||||
|
expect(small.tokenCount).toBeLessThanOrEqual(256)
|
||||||
|
// Large budget should include more or equal content
|
||||||
|
expect(large.map.length).toBeGreaterThanOrEqual(small.map.length)
|
||||||
|
} finally {
|
||||||
|
rmSync(tempDir, { recursive: true, force: true })
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
test('focus_files boosts specified files in the ranking', async () => {
|
||||||
|
const tempDir = mkdtempSync(join(tmpdir(), 'repomap-tool-'))
|
||||||
|
try {
|
||||||
|
for (const f of FIXTURE_FILES) {
|
||||||
|
cpSync(join(FIXTURE_ROOT, f), join(tempDir, f))
|
||||||
|
}
|
||||||
|
|
||||||
|
const { buildRepoMap } = await import(
|
||||||
|
'../../context/repoMap/index.js'
|
||||||
|
)
|
||||||
|
|
||||||
|
// Without focus, fileE is ranked last (isolated)
|
||||||
|
const noFocus = await buildRepoMap({ root: tempDir, maxTokens: 2048 })
|
||||||
|
const lines = noFocus.map.split('\n')
|
||||||
|
const fileEPos = lines.findIndex(l => l === 'fileE.ts:')
|
||||||
|
|
||||||
|
// With focus on fileE
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
const withFocus = await buildRepoMap({
|
||||||
|
root: tempDir,
|
||||||
|
maxTokens: 2048,
|
||||||
|
focusFiles: ['fileE.ts'],
|
||||||
|
})
|
||||||
|
const focusLines = withFocus.map.split('\n')
|
||||||
|
const fileEFocusPos = focusLines.findIndex(l => l === 'fileE.ts:')
|
||||||
|
|
||||||
|
// fileE should rank higher (earlier position) with focus
|
||||||
|
expect(fileEFocusPos).toBeLessThan(fileEPos)
|
||||||
|
} finally {
|
||||||
|
rmSync(tempDir, { recursive: true, force: true })
|
||||||
|
invalidateCache(tempDir)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('RepoMapTool properties', () => {
|
||||||
|
test('is marked read-only and concurrency-safe', () => {
|
||||||
|
expect(RepoMapTool.isReadOnly({})).toBe(true)
|
||||||
|
expect(RepoMapTool.isConcurrencySafe({})).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('RepoMapTool UI', () => {
|
||||||
|
test('getToolUseSummary returns descriptive string including focus', () => {
|
||||||
|
expect(getToolUseSummary(undefined)).toBe('Repository map')
|
||||||
|
expect(getToolUseSummary({})).toBe('Repository map')
|
||||||
|
expect(getToolUseSummary({ focus_files: ['src/tools/'] })).toContain(
|
||||||
|
'focus:',
|
||||||
|
)
|
||||||
|
expect(getToolUseSummary({ focus_files: ['src/tools/'] })).toContain(
|
||||||
|
'src/tools/',
|
||||||
|
)
|
||||||
|
expect(
|
||||||
|
getToolUseSummary({ focus_symbols: ['buildTool'] }),
|
||||||
|
).toContain('buildTool')
|
||||||
|
})
|
||||||
|
})
|
||||||
176
src/tools/RepoMapTool/RepoMapTool.ts
Normal file
176
src/tools/RepoMapTool/RepoMapTool.ts
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
import { z } from 'zod/v4'
|
||||||
|
import { buildTool, type ToolDef } from '../../Tool.js'
|
||||||
|
import { getCwd } from '../../utils/cwd.js'
|
||||||
|
import { lazySchema } from '../../utils/lazySchema.js'
|
||||||
|
import { checkReadPermissionForTool } from '../../utils/permissions/filesystem.js'
|
||||||
|
import type { PermissionDecision } from '../../utils/permissions/PermissionResult.js'
|
||||||
|
import { buildRepoMap } from '../../context/repoMap/index.js'
|
||||||
|
import { REPO_MAP_TOOL_NAME, getDescription } from './prompt.js'
|
||||||
|
import {
|
||||||
|
getToolUseSummary,
|
||||||
|
renderToolResultMessage,
|
||||||
|
renderToolUseErrorMessage,
|
||||||
|
renderToolUseMessage,
|
||||||
|
} from './UI.js'
|
||||||
|
|
||||||
|
const inputSchema = lazySchema(() =>
|
||||||
|
z.strictObject({
|
||||||
|
max_tokens: z
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.min(256)
|
||||||
|
.max(16384)
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Maximum token budget for the rendered map. Higher values include more files. Default: 1024.',
|
||||||
|
),
|
||||||
|
focus_files: z
|
||||||
|
.array(z.string())
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Relative file or directory paths to boost in the ranking (e.g. ["src/tools/", "src/context.ts"]).',
|
||||||
|
),
|
||||||
|
focus_symbols: z
|
||||||
|
.array(z.string())
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Symbol names to boost — files defining these symbols rank higher (e.g. ["buildTool", "ToolUseContext"]).',
|
||||||
|
),
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
type InputSchema = ReturnType<typeof inputSchema>
|
||||||
|
|
||||||
|
const outputSchema = lazySchema(() =>
|
||||||
|
z.object({
|
||||||
|
rendered: z.string(),
|
||||||
|
token_count: z.number(),
|
||||||
|
file_count: z.number(),
|
||||||
|
total_file_count: z.number(),
|
||||||
|
cache_hit: z.boolean(),
|
||||||
|
build_time_ms: z.number(),
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
type OutputSchema = ReturnType<typeof outputSchema>
|
||||||
|
|
||||||
|
type Output = z.infer<OutputSchema>
|
||||||
|
|
||||||
|
export const RepoMapTool = buildTool({
|
||||||
|
name: REPO_MAP_TOOL_NAME,
|
||||||
|
searchHint: 'structural map of repository files and symbols',
|
||||||
|
maxResultSizeChars: 50_000,
|
||||||
|
async description() {
|
||||||
|
return getDescription()
|
||||||
|
},
|
||||||
|
userFacingName() {
|
||||||
|
return 'Repository map'
|
||||||
|
},
|
||||||
|
getToolUseSummary,
|
||||||
|
getActivityDescription(input) {
|
||||||
|
if (input?.focus_files?.length) {
|
||||||
|
return `Building repository map (focus: ${input.focus_files.join(', ')})`
|
||||||
|
}
|
||||||
|
return 'Building repository map'
|
||||||
|
},
|
||||||
|
get inputSchema(): InputSchema {
|
||||||
|
return inputSchema()
|
||||||
|
},
|
||||||
|
get outputSchema(): OutputSchema {
|
||||||
|
return outputSchema()
|
||||||
|
},
|
||||||
|
isConcurrencySafe() {
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
isReadOnly() {
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
isSearchOrReadCommand() {
|
||||||
|
return { isSearch: false, isRead: true }
|
||||||
|
},
|
||||||
|
toAutoClassifierInput(input) {
|
||||||
|
const parts: string[] = ['repomap']
|
||||||
|
if (input.focus_files?.length) parts.push(`focus: ${input.focus_files.join(',')}`)
|
||||||
|
return parts.join(' ')
|
||||||
|
},
|
||||||
|
async checkPermissions(input, context): Promise<PermissionDecision> {
|
||||||
|
const appState = context.getAppState()
|
||||||
|
return checkReadPermissionForTool(
|
||||||
|
RepoMapTool,
|
||||||
|
input,
|
||||||
|
appState.toolPermissionContext,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
async prompt() {
|
||||||
|
return getDescription()
|
||||||
|
},
|
||||||
|
renderToolUseMessage,
|
||||||
|
renderToolUseErrorMessage,
|
||||||
|
renderToolResultMessage,
|
||||||
|
extractSearchText({ rendered }) {
|
||||||
|
return rendered
|
||||||
|
},
|
||||||
|
mapToolResultToToolResultBlockParam(output, toolUseID) {
|
||||||
|
const summary = [
|
||||||
|
`Repository map: ${output.file_count} files ranked (${output.total_file_count} total), ${output.token_count} tokens`,
|
||||||
|
output.cache_hit ? '(cached)' : `(built in ${output.build_time_ms}ms)`,
|
||||||
|
].join(' ')
|
||||||
|
|
||||||
|
return {
|
||||||
|
tool_use_id: toolUseID,
|
||||||
|
type: 'tool_result',
|
||||||
|
content: `${summary}\n\n${output.rendered}`,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
async call(
|
||||||
|
{ max_tokens = 1024, focus_files, focus_symbols },
|
||||||
|
{ abortController },
|
||||||
|
) {
|
||||||
|
const root = getCwd()
|
||||||
|
|
||||||
|
// Resolve focus_symbols to file paths by searching the tag cache
|
||||||
|
let resolvedFocusFiles = focus_files ?? []
|
||||||
|
if (focus_symbols?.length) {
|
||||||
|
// Import the symbol lookup dynamically to avoid circular deps at module load
|
||||||
|
const { getRepoFiles } = await import('../../context/repoMap/gitFiles.js')
|
||||||
|
const { extractTags } = await import('../../context/repoMap/symbolExtractor.js')
|
||||||
|
const { initParser } = await import('../../context/repoMap/parser.js')
|
||||||
|
|
||||||
|
await initParser()
|
||||||
|
const files = await getRepoFiles(root)
|
||||||
|
const symbolFiles: string[] = []
|
||||||
|
const symbolSet = new Set(focus_symbols)
|
||||||
|
|
||||||
|
// Scan files for matching symbol definitions
|
||||||
|
for (const file of files) {
|
||||||
|
if (abortController.signal.aborted) break
|
||||||
|
const tags = await extractTags(file, root)
|
||||||
|
if (tags) {
|
||||||
|
const hasMatch = tags.tags.some(
|
||||||
|
t => t.kind === 'def' && symbolSet.has(t.name),
|
||||||
|
)
|
||||||
|
if (hasMatch) {
|
||||||
|
symbolFiles.push(file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resolvedFocusFiles = [...resolvedFocusFiles, ...symbolFiles]
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await buildRepoMap({
|
||||||
|
root,
|
||||||
|
maxTokens: max_tokens,
|
||||||
|
focusFiles: resolvedFocusFiles.length > 0 ? resolvedFocusFiles : undefined,
|
||||||
|
})
|
||||||
|
|
||||||
|
const output: Output = {
|
||||||
|
rendered: result.map,
|
||||||
|
token_count: result.tokenCount,
|
||||||
|
file_count: result.fileCount,
|
||||||
|
total_file_count: result.totalFileCount,
|
||||||
|
cache_hit: result.cacheHit,
|
||||||
|
build_time_ms: result.buildTimeMs,
|
||||||
|
}
|
||||||
|
|
||||||
|
return { data: output }
|
||||||
|
},
|
||||||
|
} satisfies ToolDef<InputSchema, Output>)
|
||||||
96
src/tools/RepoMapTool/UI.tsx
Normal file
96
src/tools/RepoMapTool/UI.tsx
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
import type { ToolResultBlockParam } from '@anthropic-ai/sdk/resources/index.mjs'
|
||||||
|
import React from 'react'
|
||||||
|
import { FallbackToolUseErrorMessage } from '../../components/FallbackToolUseErrorMessage.js'
|
||||||
|
import { MessageResponse } from '../../components/MessageResponse.js'
|
||||||
|
import { TOOL_SUMMARY_MAX_LENGTH } from '../../constants/toolLimits.js'
|
||||||
|
import { Text } from '../../ink.js'
|
||||||
|
import type { ToolProgressData } from '../../Tool.js'
|
||||||
|
import type { ProgressMessage } from '../../types/message.js'
|
||||||
|
import { truncate } from '../../utils/format.js'
|
||||||
|
|
||||||
|
type Output = {
|
||||||
|
rendered: string
|
||||||
|
token_count: number
|
||||||
|
file_count: number
|
||||||
|
total_file_count: number
|
||||||
|
cache_hit: boolean
|
||||||
|
build_time_ms: number
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getToolUseSummary(
|
||||||
|
input:
|
||||||
|
| Partial<{
|
||||||
|
max_tokens?: number
|
||||||
|
focus_files?: string[]
|
||||||
|
focus_symbols?: string[]
|
||||||
|
}>
|
||||||
|
| undefined,
|
||||||
|
): string | null {
|
||||||
|
if (!input) return 'Repository map'
|
||||||
|
const parts: string[] = []
|
||||||
|
if (input.focus_files?.length) {
|
||||||
|
parts.push(input.focus_files.join(', '))
|
||||||
|
}
|
||||||
|
if (input.focus_symbols?.length) {
|
||||||
|
parts.push(input.focus_symbols.join(', '))
|
||||||
|
}
|
||||||
|
if (parts.length > 0) {
|
||||||
|
return truncate(`Repository map (focus: ${parts.join('; ')})`, TOOL_SUMMARY_MAX_LENGTH)
|
||||||
|
}
|
||||||
|
return 'Repository map'
|
||||||
|
}
|
||||||
|
|
||||||
|
export function renderToolUseMessage(
|
||||||
|
input: Partial<{
|
||||||
|
max_tokens?: number
|
||||||
|
focus_files?: string[]
|
||||||
|
focus_symbols?: string[]
|
||||||
|
}>,
|
||||||
|
): React.ReactNode {
|
||||||
|
const parts: string[] = []
|
||||||
|
if (input.max_tokens) {
|
||||||
|
parts.push(`max_tokens: ${input.max_tokens}`)
|
||||||
|
}
|
||||||
|
if (input.focus_files?.length) {
|
||||||
|
parts.push(`focus: ${input.focus_files.join(', ')}`)
|
||||||
|
}
|
||||||
|
if (input.focus_symbols?.length) {
|
||||||
|
parts.push(`symbols: ${input.focus_symbols.join(', ')}`)
|
||||||
|
}
|
||||||
|
return parts.length > 0 ? parts.join(', ') : null
|
||||||
|
}
|
||||||
|
|
||||||
|
export function renderToolResultMessage(
|
||||||
|
output: Output,
|
||||||
|
_progressMessages: ProgressMessage<ToolProgressData>[],
|
||||||
|
{ verbose }: { verbose: boolean },
|
||||||
|
): React.ReactNode {
|
||||||
|
const summary = `${output.file_count} files ranked, ${output.token_count} tokens${output.cache_hit ? ' (cached)' : `, ${output.build_time_ms}ms`}`
|
||||||
|
|
||||||
|
if (verbose) {
|
||||||
|
return (
|
||||||
|
<MessageResponse>
|
||||||
|
<Text>
|
||||||
|
Built repository map: {summary}
|
||||||
|
{'\n'}
|
||||||
|
({output.total_file_count} total files considered)
|
||||||
|
</Text>
|
||||||
|
</MessageResponse>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<MessageResponse height={1}>
|
||||||
|
<Text>
|
||||||
|
Built repository map: {summary}
|
||||||
|
</Text>
|
||||||
|
</MessageResponse>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function renderToolUseErrorMessage(
|
||||||
|
result: ToolResultBlockParam['content'],
|
||||||
|
{ verbose }: { verbose: boolean },
|
||||||
|
): React.ReactNode {
|
||||||
|
return <FallbackToolUseErrorMessage result={result} verbose={verbose} />
|
||||||
|
}
|
||||||
31
src/tools/RepoMapTool/prompt.ts
Normal file
31
src/tools/RepoMapTool/prompt.ts
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
export const REPO_MAP_TOOL_NAME = 'RepoMap'
|
||||||
|
|
||||||
|
export function getDescription(): string {
|
||||||
|
return `Build a structural map of the repository showing ranked files and their key signatures (functions, classes, types, interfaces).
|
||||||
|
|
||||||
|
## When to use
|
||||||
|
- At the start of a session on an unfamiliar repository to understand the codebase architecture
|
||||||
|
- Before cross-file refactors to identify which files are structurally connected
|
||||||
|
- When searching for where a concept or feature lives across the codebase
|
||||||
|
- When the user asks "how is this repo organized" or "what are the important files"
|
||||||
|
|
||||||
|
## When NOT to use
|
||||||
|
- To read the contents of a specific file — use Read instead
|
||||||
|
- To search for exact text or patterns — use Grep instead
|
||||||
|
- To find files by name or glob pattern — use Glob instead
|
||||||
|
- When you already know which files to examine
|
||||||
|
|
||||||
|
## How it works
|
||||||
|
The tool parses every supported source file (TypeScript, JavaScript, Python) using tree-sitter, extracts symbol definitions and references, builds a cross-file reference graph weighted by symbol importance (IDF), and ranks files using PageRank. The output is a token-budgeted summary showing the highest-ranked files with their key signatures (function/class/type declarations).
|
||||||
|
|
||||||
|
## Parameters
|
||||||
|
- **max_tokens**: Controls how many files fit in the output. Use 1024 for a quick overview, 4096+ for comprehensive maps. Default: 1024.
|
||||||
|
- **focus_files**: Pass relative paths (e.g. \`["src/tools/"]\`) to boost specific files and their neighbors in the ranking. Use when the user mentions specific directories or files.
|
||||||
|
- **focus_symbols**: Pass symbol names (e.g. \`["buildTool", "ToolUseContext"]\`) to boost files that define those symbols. Use when the user asks about specific functions or types.
|
||||||
|
|
||||||
|
## Important notes
|
||||||
|
- The map shows **signatures only**, not function bodies. Use Read to see implementations.
|
||||||
|
- Results are **auto-cached** on disk — repeat calls with the same parameters return instantly.
|
||||||
|
- Files are ranked by structural importance: files imported by many others rank highest.
|
||||||
|
`
|
||||||
|
}
|
||||||
@@ -9,7 +9,6 @@ import { z } from 'zod/v4'
|
|||||||
import { getFeatureValue_CACHED_MAY_BE_STALE } from '../../services/analytics/growthbook.js'
|
import { getFeatureValue_CACHED_MAY_BE_STALE } from '../../services/analytics/growthbook.js'
|
||||||
import { queryModelWithStreaming } from '../../services/api/claude.js'
|
import { queryModelWithStreaming } from '../../services/api/claude.js'
|
||||||
import { collectCodexCompletedResponse } from '../../services/api/codexShim.js'
|
import { collectCodexCompletedResponse } from '../../services/api/codexShim.js'
|
||||||
import { fetchWithProxyRetry } from '../../services/api/fetchWithProxyRetry.js'
|
|
||||||
import {
|
import {
|
||||||
resolveCodexApiCredentials,
|
resolveCodexApiCredentials,
|
||||||
resolveProviderRequest,
|
resolveProviderRequest,
|
||||||
@@ -315,7 +314,7 @@ async function runCodexWebSearch(
|
|||||||
body.reasoning = request.reasoning
|
body.reasoning = request.reasoning
|
||||||
}
|
}
|
||||||
|
|
||||||
const response = await fetchWithProxyRetry(`${request.baseUrl}/responses`, {
|
const response = await fetch(`${request.baseUrl}/responses`, {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
|
|||||||
@@ -148,42 +148,6 @@ type Position = {
|
|||||||
column: number
|
column: number
|
||||||
}
|
}
|
||||||
|
|
||||||
export function maskTextWithVisibleEdges(
|
|
||||||
value: string,
|
|
||||||
mask: string,
|
|
||||||
visiblePrefix = 3,
|
|
||||||
visibleSuffix = 3,
|
|
||||||
): string {
|
|
||||||
if (!mask || !value) return value
|
|
||||||
|
|
||||||
const graphemes = Array.from(getGraphemeSegmenter().segment(value))
|
|
||||||
const secretGraphemeCount = graphemes.filter(
|
|
||||||
({ segment }) => segment !== '\n',
|
|
||||||
).length
|
|
||||||
const visibleCount = visiblePrefix + visibleSuffix
|
|
||||||
|
|
||||||
if (secretGraphemeCount <= visibleCount) {
|
|
||||||
return graphemes
|
|
||||||
.map(({ segment }) => (segment === '\n' ? segment : mask))
|
|
||||||
.join('')
|
|
||||||
}
|
|
||||||
|
|
||||||
let secretIndex = 0
|
|
||||||
return graphemes
|
|
||||||
.map(({ segment }) => {
|
|
||||||
if (segment === '\n') return segment
|
|
||||||
|
|
||||||
const nextSegment =
|
|
||||||
secretIndex < visiblePrefix ||
|
|
||||||
secretIndex >= secretGraphemeCount - visibleSuffix
|
|
||||||
? segment
|
|
||||||
: mask
|
|
||||||
secretIndex += 1
|
|
||||||
return nextSegment
|
|
||||||
})
|
|
||||||
.join('')
|
|
||||||
}
|
|
||||||
|
|
||||||
export class Cursor {
|
export class Cursor {
|
||||||
readonly offset: number
|
readonly offset: number
|
||||||
constructor(
|
constructor(
|
||||||
@@ -244,12 +208,7 @@ export class Cursor {
|
|||||||
maxVisibleLines?: number,
|
maxVisibleLines?: number,
|
||||||
) {
|
) {
|
||||||
const { line, column } = this.getPosition()
|
const { line, column } = this.getPosition()
|
||||||
const allLines = mask
|
const allLines = this.measuredText.getWrappedText()
|
||||||
? new MeasuredText(
|
|
||||||
maskTextWithVisibleEdges(this.text, mask),
|
|
||||||
this.measuredText.columns,
|
|
||||||
).getWrappedText()
|
|
||||||
: this.measuredText.getWrappedText()
|
|
||||||
|
|
||||||
const startLine = this.getViewportStartLine(maxVisibleLines)
|
const startLine = this.getViewportStartLine(maxVisibleLines)
|
||||||
const endLine =
|
const endLine =
|
||||||
@@ -262,6 +221,23 @@ export class Cursor {
|
|||||||
.map((text, i) => {
|
.map((text, i) => {
|
||||||
const currentLine = i + startLine
|
const currentLine = i + startLine
|
||||||
let displayText = text
|
let displayText = text
|
||||||
|
if (mask) {
|
||||||
|
const graphemes = Array.from(getGraphemeSegmenter().segment(text))
|
||||||
|
if (currentLine === allLines.length - 1) {
|
||||||
|
// Last line: mask all but the trailing 6 chars so the user can
|
||||||
|
// confirm they pasted the right thing without exposing the full token
|
||||||
|
const visibleCount = Math.min(6, graphemes.length)
|
||||||
|
const maskCount = graphemes.length - visibleCount
|
||||||
|
const splitOffset =
|
||||||
|
graphemes.length > visibleCount ? graphemes[maskCount]!.index : 0
|
||||||
|
displayText = mask.repeat(maskCount) + text.slice(splitOffset)
|
||||||
|
} else {
|
||||||
|
// Earlier wrapped lines: fully mask. Previously only the last line
|
||||||
|
// was masked, leaking the start of the token on narrow terminals
|
||||||
|
// where the pasted OAuth code wraps across multiple lines.
|
||||||
|
displayText = mask.repeat(graphemes.length)
|
||||||
|
}
|
||||||
|
}
|
||||||
// looking for the line with the cursor
|
// looking for the line with the cursor
|
||||||
if (line !== currentLine) return displayText.trimEnd()
|
if (line !== currentLine) return displayText.trimEnd()
|
||||||
|
|
||||||
|
|||||||
@@ -78,28 +78,3 @@ test('toolToAPISchema keeps skill required for SkillTool', async () => {
|
|||||||
required: ['skill'],
|
required: ['skill'],
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
test('toolToAPISchema removes extra required keys not in properties (MCP schema sanitization)', async () => {
|
|
||||||
const schema = await toolToAPISchema(
|
|
||||||
{
|
|
||||||
name: 'mcp__test__create_object',
|
|
||||||
inputSchema: z.strictObject({}),
|
|
||||||
inputJSONSchema: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
name: { type: 'string' },
|
|
||||||
},
|
|
||||||
required: ['name', 'attributes'],
|
|
||||||
},
|
|
||||||
prompt: async () => 'Create an object',
|
|
||||||
} as unknown as Tool,
|
|
||||||
{
|
|
||||||
getToolPermissionContext: async () => getEmptyToolPermissionContext(),
|
|
||||||
tools: [] as unknown as Tools,
|
|
||||||
agents: [],
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
const inputSchema = (schema as { input_schema: { required?: string[] } }).input_schema
|
|
||||||
expect(inputSchema.required).toEqual(['name'])
|
|
||||||
})
|
|
||||||
|
|||||||
@@ -111,60 +111,11 @@ function filterSwarmFieldsFromSchema(
|
|||||||
delete filteredProps[field]
|
delete filteredProps[field]
|
||||||
}
|
}
|
||||||
filtered.properties = filteredProps
|
filtered.properties = filteredProps
|
||||||
|
|
||||||
// Keep `required` in sync after removing properties
|
|
||||||
if (Array.isArray(filtered.required)) {
|
|
||||||
filtered.required = filtered.required.filter(
|
|
||||||
(key: string) => key in filteredProps,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return filtered
|
return filtered
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Ensure `required` only lists keys present in `properties`.
|
|
||||||
* MCP servers may emit schemas where these are out of sync, causing
|
|
||||||
* API 400 errors ("Extra required key supplied").
|
|
||||||
* Recurses into nested object schemas.
|
|
||||||
*/
|
|
||||||
function sanitizeSchemaRequired(
|
|
||||||
schema: Anthropic.Tool.InputSchema,
|
|
||||||
): Anthropic.Tool.InputSchema {
|
|
||||||
if (!schema || typeof schema !== 'object') {
|
|
||||||
return schema
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = { ...schema }
|
|
||||||
const props = result.properties as Record<string, unknown> | undefined
|
|
||||||
|
|
||||||
if (props && Array.isArray(result.required)) {
|
|
||||||
result.required = result.required.filter(
|
|
||||||
(key: string) => key in props,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recurse into nested object properties
|
|
||||||
if (props) {
|
|
||||||
const sanitizedProps = { ...props }
|
|
||||||
for (const [key, value] of Object.entries(sanitizedProps)) {
|
|
||||||
if (
|
|
||||||
value &&
|
|
||||||
typeof value === 'object' &&
|
|
||||||
(value as Record<string, unknown>).type === 'object'
|
|
||||||
) {
|
|
||||||
sanitizedProps[key] = sanitizeSchemaRequired(
|
|
||||||
value as Anthropic.Tool.InputSchema,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result.properties = sanitizedProps
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function toolToAPISchema(
|
export async function toolToAPISchema(
|
||||||
tool: Tool,
|
tool: Tool,
|
||||||
options: {
|
options: {
|
||||||
@@ -205,7 +156,7 @@ export async function toolToAPISchema(
|
|||||||
// Use tool's JSON schema directly if provided, otherwise convert Zod schema
|
// Use tool's JSON schema directly if provided, otherwise convert Zod schema
|
||||||
let input_schema = (
|
let input_schema = (
|
||||||
'inputJSONSchema' in tool && tool.inputJSONSchema
|
'inputJSONSchema' in tool && tool.inputJSONSchema
|
||||||
? sanitizeSchemaRequired(tool.inputJSONSchema as Anthropic.Tool.InputSchema)
|
? tool.inputJSONSchema
|
||||||
: zodToJsonSchema(tool.inputSchema)
|
: zodToJsonSchema(tool.inputSchema)
|
||||||
) as Anthropic.Tool.InputSchema
|
) as Anthropic.Tool.InputSchema
|
||||||
|
|
||||||
|
|||||||
@@ -2882,7 +2882,7 @@ async function getDiagnosticAttachments(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get new diagnostics from the tracker (IDE diagnostics via MCP)
|
// Get new diagnostics from the tracker (IDE diagnostics via MCP)
|
||||||
const newDiagnostics = await diagnosticTracker.getNewDiagnosticsCompat()
|
const newDiagnostics = await diagnosticTracker.getNewDiagnostics()
|
||||||
if (newDiagnostics.length === 0) {
|
if (newDiagnostics.length === 0) {
|
||||||
return []
|
return []
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -155,7 +155,7 @@ export {
|
|||||||
NOTIFICATION_CHANNELS,
|
NOTIFICATION_CHANNELS,
|
||||||
} from './configConstants.js'
|
} from './configConstants.js'
|
||||||
|
|
||||||
import type { EDITOR_MODES, NOTIFICATION_CHANNELS, PROVIDERS } from './configConstants.js'
|
import type { EDITOR_MODES, NOTIFICATION_CHANNELS } from './configConstants.js'
|
||||||
|
|
||||||
export type NotificationChannel = (typeof NOTIFICATION_CHANNELS)[number]
|
export type NotificationChannel = (typeof NOTIFICATION_CHANNELS)[number]
|
||||||
|
|
||||||
@@ -181,12 +181,10 @@ export type DiffTool = 'terminal' | 'auto'
|
|||||||
|
|
||||||
export type OutputStyle = string
|
export type OutputStyle = string
|
||||||
|
|
||||||
export type Providers = typeof PROVIDERS[number]
|
|
||||||
|
|
||||||
export type ProviderProfile = {
|
export type ProviderProfile = {
|
||||||
id: string
|
id: string
|
||||||
name: string
|
name: string
|
||||||
provider: Providers
|
provider: 'openai' | 'anthropic'
|
||||||
baseUrl: string
|
baseUrl: string
|
||||||
model: string
|
model: string
|
||||||
apiKey?: string
|
apiKey?: string
|
||||||
|
|||||||
@@ -19,5 +19,3 @@ export const EDITOR_MODES = ['normal', 'vim'] as const
|
|||||||
// 'in-process' = in-process teammates running in same process
|
// 'in-process' = in-process teammates running in same process
|
||||||
// 'auto' = automatically choose based on context (default)
|
// 'auto' = automatically choose based on context (default)
|
||||||
export const TEAMMATE_MODES = ['auto', 'tmux', 'in-process'] as const
|
export const TEAMMATE_MODES = ['auto', 'tmux', 'in-process'] as const
|
||||||
|
|
||||||
export const PROVIDERS = ['openai', 'anthropic', 'mistral', 'gemini'] as const
|
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ import {
|
|||||||
const originalEnv = {
|
const originalEnv = {
|
||||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||||
CLAUDE_CODE_MAX_OUTPUT_TOKENS: process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS,
|
CLAUDE_CODE_MAX_OUTPUT_TOKENS: process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS,
|
||||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
afterEach(() => {
|
afterEach(() => {
|
||||||
@@ -24,17 +23,11 @@ afterEach(() => {
|
|||||||
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS =
|
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS =
|
||||||
originalEnv.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
originalEnv.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||||
}
|
}
|
||||||
if (originalEnv.OPENAI_MODEL === undefined) {
|
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
} else {
|
|
||||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
test('deepseek-chat uses provider-specific context and output caps', () => {
|
test('deepseek-chat uses provider-specific context and output caps', () => {
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('deepseek-chat')).toBe(128_000)
|
expect(getContextWindowForModel('deepseek-chat')).toBe(128_000)
|
||||||
expect(getModelMaxOutputTokens('deepseek-chat')).toEqual({
|
expect(getModelMaxOutputTokens('deepseek-chat')).toEqual({
|
||||||
@@ -47,7 +40,6 @@ test('deepseek-chat uses provider-specific context and output caps', () => {
|
|||||||
test('deepseek-chat clamps oversized max output overrides to the provider limit', () => {
|
test('deepseek-chat clamps oversized max output overrides to the provider limit', () => {
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
|
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
|
|
||||||
expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192)
|
expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192)
|
||||||
})
|
})
|
||||||
@@ -55,7 +47,6 @@ test('deepseek-chat clamps oversized max output overrides to the provider limit'
|
|||||||
test('gpt-4o uses provider-specific context and output caps', () => {
|
test('gpt-4o uses provider-specific context and output caps', () => {
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('gpt-4o')).toBe(128_000)
|
expect(getContextWindowForModel('gpt-4o')).toBe(128_000)
|
||||||
expect(getModelMaxOutputTokens('gpt-4o')).toEqual({
|
expect(getModelMaxOutputTokens('gpt-4o')).toEqual({
|
||||||
@@ -68,7 +59,6 @@ test('gpt-4o uses provider-specific context and output caps', () => {
|
|||||||
test('gpt-4o clamps oversized max output overrides to the provider limit', () => {
|
test('gpt-4o clamps oversized max output overrides to the provider limit', () => {
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
|
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
|
|
||||||
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
|
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
|
||||||
})
|
})
|
||||||
@@ -76,7 +66,6 @@ test('gpt-4o clamps oversized max output overrides to the provider limit', () =>
|
|||||||
test('gpt-5.4 family uses provider-specific context and output caps', () => {
|
test('gpt-5.4 family uses provider-specific context and output caps', () => {
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('gpt-5.4')).toBe(1_050_000)
|
expect(getContextWindowForModel('gpt-5.4')).toBe(1_050_000)
|
||||||
expect(getModelMaxOutputTokens('gpt-5.4')).toEqual({
|
expect(getModelMaxOutputTokens('gpt-5.4')).toEqual({
|
||||||
@@ -109,7 +98,6 @@ test('gpt-5.4 family keeps large max output overrides within provider limits', (
|
|||||||
test('MiniMax-M2.7 uses explicit provider-specific context and output caps', () => {
|
test('MiniMax-M2.7 uses explicit provider-specific context and output caps', () => {
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('MiniMax-M2.7')).toBe(204_800)
|
expect(getContextWindowForModel('MiniMax-M2.7')).toBe(204_800)
|
||||||
expect(getModelMaxOutputTokens('MiniMax-M2.7')).toEqual({
|
expect(getModelMaxOutputTokens('MiniMax-M2.7')).toEqual({
|
||||||
@@ -122,7 +110,6 @@ test('MiniMax-M2.7 uses explicit provider-specific context and output caps', ()
|
|||||||
test('unknown openai-compatible models use the 128k fallback window (not 8k, see #635)', () => {
|
test('unknown openai-compatible models use the 128k fallback window (not 8k, see #635)', () => {
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('some-unknown-3p-model')).toBe(128_000)
|
expect(getContextWindowForModel('some-unknown-3p-model')).toBe(128_000)
|
||||||
})
|
})
|
||||||
@@ -130,7 +117,6 @@ test('unknown openai-compatible models use the 128k fallback window (not 8k, see
|
|||||||
test('MiniMax-M2.5 and M2.1 use explicit provider-specific context and output caps', () => {
|
test('MiniMax-M2.5 and M2.1 use explicit provider-specific context and output caps', () => {
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('MiniMax-M2.5')).toBe(204_800)
|
expect(getContextWindowForModel('MiniMax-M2.5')).toBe(204_800)
|
||||||
expect(getContextWindowForModel('MiniMax-M2.5-highspeed')).toBe(204_800)
|
expect(getContextWindowForModel('MiniMax-M2.5-highspeed')).toBe(204_800)
|
||||||
@@ -141,116 +127,3 @@ test('MiniMax-M2.5 and M2.1 use explicit provider-specific context and output ca
|
|||||||
upperLimit: 131_072,
|
upperLimit: 131_072,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
test('DashScope qwen3.6-plus uses provider-specific context and output caps', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('qwen3.6-plus')).toBe(1_000_000)
|
|
||||||
expect(getModelMaxOutputTokens('qwen3.6-plus')).toEqual({
|
|
||||||
default: 65_536,
|
|
||||||
upperLimit: 65_536,
|
|
||||||
})
|
|
||||||
expect(getMaxOutputTokensForModel('qwen3.6-plus')).toBe(65_536)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope qwen3.5-plus uses provider-specific context and output caps', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('qwen3.5-plus')).toBe(1_000_000)
|
|
||||||
expect(getModelMaxOutputTokens('qwen3.5-plus')).toEqual({
|
|
||||||
default: 65_536,
|
|
||||||
upperLimit: 65_536,
|
|
||||||
})
|
|
||||||
expect(getMaxOutputTokensForModel('qwen3.5-plus')).toBe(65_536)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope qwen3-coder-plus uses provider-specific context and output caps', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('qwen3-coder-plus')).toBe(1_000_000)
|
|
||||||
expect(getModelMaxOutputTokens('qwen3-coder-plus')).toEqual({
|
|
||||||
default: 65_536,
|
|
||||||
upperLimit: 65_536,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope qwen3-coder-next uses provider-specific context and output caps', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('qwen3-coder-next')).toBe(262_144)
|
|
||||||
expect(getModelMaxOutputTokens('qwen3-coder-next')).toEqual({
|
|
||||||
default: 65_536,
|
|
||||||
upperLimit: 65_536,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope qwen3-max uses provider-specific context and output caps', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('qwen3-max')).toBe(262_144)
|
|
||||||
expect(getModelMaxOutputTokens('qwen3-max')).toEqual({
|
|
||||||
default: 32_768,
|
|
||||||
upperLimit: 32_768,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope qwen3-max dated variant resolves to base entry via prefix match', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('qwen3-max-2026-01-23')).toBe(262_144)
|
|
||||||
expect(getModelMaxOutputTokens('qwen3-max-2026-01-23')).toEqual({
|
|
||||||
default: 32_768,
|
|
||||||
upperLimit: 32_768,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope kimi-k2.5 uses provider-specific context and output caps', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('kimi-k2.5')).toBe(262_144)
|
|
||||||
expect(getModelMaxOutputTokens('kimi-k2.5')).toEqual({
|
|
||||||
default: 32_768,
|
|
||||||
upperLimit: 32_768,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope glm-5 uses provider-specific context and output caps', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('glm-5')).toBe(202_752)
|
|
||||||
expect(getModelMaxOutputTokens('glm-5')).toEqual({
|
|
||||||
default: 16_384,
|
|
||||||
upperLimit: 16_384,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope glm-4.7 uses provider-specific context and output caps', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
|
||||||
|
|
||||||
expect(getContextWindowForModel('glm-4.7')).toBe(202_752)
|
|
||||||
expect(getModelMaxOutputTokens('glm-4.7')).toEqual({
|
|
||||||
default: 16_384,
|
|
||||||
upperLimit: 16_384,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
test('DashScope models clamp oversized max output overrides to the provider limit', () => {
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '100000'
|
|
||||||
|
|
||||||
expect(getMaxOutputTokensForModel('qwen3.6-plus')).toBe(65_536)
|
|
||||||
expect(getMaxOutputTokensForModel('qwen3.5-plus')).toBe(65_536)
|
|
||||||
expect(getMaxOutputTokensForModel('qwen3-coder-next')).toBe(65_536)
|
|
||||||
expect(getMaxOutputTokensForModel('qwen3-max')).toBe(32_768)
|
|
||||||
expect(getMaxOutputTokensForModel('kimi-k2.5')).toBe(32_768)
|
|
||||||
expect(getMaxOutputTokensForModel('glm-5')).toBe(16_384)
|
|
||||||
})
|
|
||||||
|
|||||||
@@ -37,8 +37,6 @@ export const CLAUDE_3_7_SONNET_CONFIG = {
|
|||||||
gemini: 'gemini-2.0-flash',
|
gemini: 'gemini-2.0-flash',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_3_5_V2_SONNET_CONFIG = {
|
export const CLAUDE_3_5_V2_SONNET_CONFIG = {
|
||||||
@@ -50,8 +48,6 @@ export const CLAUDE_3_5_V2_SONNET_CONFIG = {
|
|||||||
gemini: 'gemini-2.0-flash',
|
gemini: 'gemini-2.0-flash',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_3_5_HAIKU_CONFIG = {
|
export const CLAUDE_3_5_HAIKU_CONFIG = {
|
||||||
@@ -63,8 +59,6 @@ export const CLAUDE_3_5_HAIKU_CONFIG = {
|
|||||||
gemini: 'gemini-2.0-flash-lite',
|
gemini: 'gemini-2.0-flash-lite',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_HAIKU_4_5_CONFIG = {
|
export const CLAUDE_HAIKU_4_5_CONFIG = {
|
||||||
@@ -76,8 +70,6 @@ export const CLAUDE_HAIKU_4_5_CONFIG = {
|
|||||||
gemini: 'gemini-2.0-flash-lite',
|
gemini: 'gemini-2.0-flash-lite',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_SONNET_4_CONFIG = {
|
export const CLAUDE_SONNET_4_CONFIG = {
|
||||||
@@ -89,8 +81,6 @@ export const CLAUDE_SONNET_4_CONFIG = {
|
|||||||
gemini: 'gemini-2.0-flash',
|
gemini: 'gemini-2.0-flash',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_SONNET_4_5_CONFIG = {
|
export const CLAUDE_SONNET_4_5_CONFIG = {
|
||||||
@@ -102,8 +92,6 @@ export const CLAUDE_SONNET_4_5_CONFIG = {
|
|||||||
gemini: 'gemini-2.0-flash',
|
gemini: 'gemini-2.0-flash',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_OPUS_4_CONFIG = {
|
export const CLAUDE_OPUS_4_CONFIG = {
|
||||||
@@ -115,8 +103,6 @@ export const CLAUDE_OPUS_4_CONFIG = {
|
|||||||
gemini: 'gemini-2.5-pro-preview-03-25',
|
gemini: 'gemini-2.5-pro-preview-03-25',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_OPUS_4_1_CONFIG = {
|
export const CLAUDE_OPUS_4_1_CONFIG = {
|
||||||
@@ -128,8 +114,6 @@ export const CLAUDE_OPUS_4_1_CONFIG = {
|
|||||||
gemini: 'gemini-2.5-pro-preview-03-25',
|
gemini: 'gemini-2.5-pro-preview-03-25',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_OPUS_4_5_CONFIG = {
|
export const CLAUDE_OPUS_4_5_CONFIG = {
|
||||||
@@ -141,8 +125,6 @@ export const CLAUDE_OPUS_4_5_CONFIG = {
|
|||||||
gemini: 'gemini-2.5-pro-preview-03-25',
|
gemini: 'gemini-2.5-pro-preview-03-25',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_OPUS_4_6_CONFIG = {
|
export const CLAUDE_OPUS_4_6_CONFIG = {
|
||||||
@@ -154,8 +136,6 @@ export const CLAUDE_OPUS_4_6_CONFIG = {
|
|||||||
gemini: 'gemini-2.5-pro-preview-03-25',
|
gemini: 'gemini-2.5-pro-preview-03-25',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
export const CLAUDE_SONNET_4_6_CONFIG = {
|
export const CLAUDE_SONNET_4_6_CONFIG = {
|
||||||
@@ -167,8 +147,6 @@ export const CLAUDE_SONNET_4_6_CONFIG = {
|
|||||||
gemini: 'gemini-2.0-flash',
|
gemini: 'gemini-2.0-flash',
|
||||||
github: 'github:copilot',
|
github: 'github:copilot',
|
||||||
codex: 'gpt-5.4',
|
codex: 'gpt-5.4',
|
||||||
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
minimax: 'MiniMax-M2.5',
|
|
||||||
} as const satisfies ModelConfig
|
} as const satisfies ModelConfig
|
||||||
|
|
||||||
// @[MODEL LAUNCH]: Register the new config here.
|
// @[MODEL LAUNCH]: Register the new config here.
|
||||||
|
|||||||
@@ -1,46 +0,0 @@
|
|||||||
/**
|
|
||||||
* MiniMax model list for the /model picker.
|
|
||||||
* Full model catalog from MiniMax API.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import type { ModelOption } from './modelOptions.js'
|
|
||||||
import { getAPIProvider } from './providers.js'
|
|
||||||
import { isEnvTruthy } from '../envUtils.js'
|
|
||||||
|
|
||||||
export function isMiniMaxProvider(): boolean {
|
|
||||||
if (isEnvTruthy(process.env.MINIMAX_API_KEY)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
const baseUrl = process.env.OPENAI_BASE_URL ?? ''
|
|
||||||
if (baseUrl.includes('minimax')) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return getAPIProvider() === 'minimax'
|
|
||||||
}
|
|
||||||
|
|
||||||
function getMiniMaxModels(): ModelOption[] {
|
|
||||||
return [
|
|
||||||
// Latest Generation Models - use correct MiniMax naming with M prefix
|
|
||||||
{ value: 'MiniMax-M2', label: 'MiniMax M2', description: 'MoE model - 131K context - Chat/Code/Reasoning' },
|
|
||||||
{ value: 'MiniMax-M2.1', label: 'MiniMax M2.1', description: 'Enhanced - 200K context - Vision' },
|
|
||||||
{ value: 'MiniMax-M2.5', label: 'MiniMax M2.5', description: 'Flagship - 256K context - Vision/Function-calling' },
|
|
||||||
{ value: 'MiniMax-Text-01', label: 'MiniMax Text 01', description: 'Text-focused - 512K context - FREE' },
|
|
||||||
{ value: 'MiniMax-Text-01-Preview', label: 'MiniMax Text 01 Preview', description: 'Preview - 256K context - FREE' },
|
|
||||||
{ value: 'MiniMax-Vision-01', label: 'MiniMax Vision 01', description: 'Vision model - 32K context' },
|
|
||||||
{ value: 'MiniMax-Vision-01-Fast', label: 'MiniMax Vision 01 Fast', description: 'Fast vision - 16K context - FREE' },
|
|
||||||
// Legacy free tier models
|
|
||||||
{ value: 'abab6.5s-chat', label: 'ABAB 6.5S Chat', description: 'Legacy free - 16K context' },
|
|
||||||
{ value: 'abab6.5-chat', label: 'ABAB 6.5 Chat', description: 'Legacy free - 32K context' },
|
|
||||||
{ value: 'abab6.5g-chat', label: 'ABAB 6.5G Chat', description: 'Generation 6.5 - 32K context' },
|
|
||||||
{ value: 'abab6-chat', label: 'ABAB 6 Chat', description: 'Legacy - 8K context' },
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
let cachedMiniMaxOptions: ModelOption[] | null = null
|
|
||||||
|
|
||||||
export function getCachedMiniMaxModelOptions(): ModelOption[] {
|
|
||||||
if (!cachedMiniMaxOptions) {
|
|
||||||
cachedMiniMaxOptions = getMiniMaxModels()
|
|
||||||
}
|
|
||||||
return cachedMiniMaxOptions
|
|
||||||
}
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
import { afterEach, beforeEach, expect, test } from 'bun:test'
|
|
||||||
|
|
||||||
import { saveGlobalConfig } from '../config.js'
|
|
||||||
import { getDefaultMainLoopModelSetting, getUserSpecifiedModelSetting } from './model.js'
|
|
||||||
|
|
||||||
const env = {
|
|
||||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
|
||||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
|
||||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
|
||||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
|
||||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
|
||||||
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
|
||||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
|
||||||
}
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
|
||||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
|
||||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
|
||||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
|
||||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
|
||||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
saveGlobalConfig(current => ({
|
|
||||||
...current,
|
|
||||||
model: ({ bad: true } as unknown) as string,
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
process.env.CLAUDE_CODE_USE_GITHUB = env.CLAUDE_CODE_USE_GITHUB
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = env.CLAUDE_CODE_USE_OPENAI
|
|
||||||
process.env.CLAUDE_CODE_USE_GEMINI = env.CLAUDE_CODE_USE_GEMINI
|
|
||||||
process.env.CLAUDE_CODE_USE_BEDROCK = env.CLAUDE_CODE_USE_BEDROCK
|
|
||||||
process.env.CLAUDE_CODE_USE_VERTEX = env.CLAUDE_CODE_USE_VERTEX
|
|
||||||
process.env.CLAUDE_CODE_USE_FOUNDRY = env.CLAUDE_CODE_USE_FOUNDRY
|
|
||||||
process.env.OPENAI_MODEL = env.OPENAI_MODEL
|
|
||||||
saveGlobalConfig(current => ({
|
|
||||||
...current,
|
|
||||||
model: undefined,
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
test('github default model setting ignores non-string saved model', () => {
|
|
||||||
const model = getDefaultMainLoopModelSetting()
|
|
||||||
expect(typeof model).toBe('string')
|
|
||||||
expect(model).not.toBe('[object Object]')
|
|
||||||
expect(model.length).toBeGreaterThan(0)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('user specified model ignores non-string saved model', () => {
|
|
||||||
const model = getUserSpecifiedModelSetting()
|
|
||||||
if (model !== undefined && model !== null) {
|
|
||||||
expect(typeof model).toBe('string')
|
|
||||||
expect(model).not.toBe('[object Object]')
|
|
||||||
}
|
|
||||||
})
|
|
||||||
@@ -33,12 +33,6 @@ export type ModelShortName = string
|
|||||||
export type ModelName = string
|
export type ModelName = string
|
||||||
export type ModelSetting = ModelName | ModelAlias | null
|
export type ModelSetting = ModelName | ModelAlias | null
|
||||||
|
|
||||||
function normalizeModelSetting(value: unknown): ModelName | ModelAlias | undefined {
|
|
||||||
if (typeof value !== 'string') return undefined
|
|
||||||
const trimmed = value.trim()
|
|
||||||
return trimmed.length > 0 ? trimmed : undefined
|
|
||||||
}
|
|
||||||
|
|
||||||
export function getSmallFastModel(): ModelName {
|
export function getSmallFastModel(): ModelName {
|
||||||
if (process.env.ANTHROPIC_SMALL_FAST_MODEL) return process.env.ANTHROPIC_SMALL_FAST_MODEL
|
if (process.env.ANTHROPIC_SMALL_FAST_MODEL) return process.env.ANTHROPIC_SMALL_FAST_MODEL
|
||||||
// For Gemini provider, use a fast model
|
// For Gemini provider, use a fast model
|
||||||
@@ -88,7 +82,6 @@ export function getUserSpecifiedModelSetting(): ModelSetting | undefined {
|
|||||||
specifiedModel = modelOverride
|
specifiedModel = modelOverride
|
||||||
} else {
|
} else {
|
||||||
const settings = getSettings_DEPRECATED() || {}
|
const settings = getSettings_DEPRECATED() || {}
|
||||||
const setting = normalizeModelSetting(settings.model)
|
|
||||||
// Read the model env var that matches the active provider to prevent
|
// Read the model env var that matches the active provider to prevent
|
||||||
// cross-provider leaks (e.g. ANTHROPIC_MODEL sent to the OpenAI API).
|
// cross-provider leaks (e.g. ANTHROPIC_MODEL sent to the OpenAI API).
|
||||||
const provider = getAPIProvider()
|
const provider = getAPIProvider()
|
||||||
@@ -97,7 +90,7 @@ export function getUserSpecifiedModelSetting(): ModelSetting | undefined {
|
|||||||
(provider === 'mistral' ? process.env.MISTRAL_MODEL : undefined) ||
|
(provider === 'mistral' ? process.env.MISTRAL_MODEL : undefined) ||
|
||||||
(provider === 'openai' || provider === 'gemini' || provider === 'mistral' || provider === 'github' ? process.env.OPENAI_MODEL : undefined) ||
|
(provider === 'openai' || provider === 'gemini' || provider === 'mistral' || provider === 'github' ? process.env.OPENAI_MODEL : undefined) ||
|
||||||
(provider === 'firstParty' ? process.env.ANTHROPIC_MODEL : undefined) ||
|
(provider === 'firstParty' ? process.env.ANTHROPIC_MODEL : undefined) ||
|
||||||
setting ||
|
settings.model ||
|
||||||
undefined
|
undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,11 +264,7 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
|
|||||||
// GitHub Copilot provider: check settings.model first, then env, then default
|
// GitHub Copilot provider: check settings.model first, then env, then default
|
||||||
if (getAPIProvider() === 'github') {
|
if (getAPIProvider() === 'github') {
|
||||||
const settings = getSettings_DEPRECATED() || {}
|
const settings = getSettings_DEPRECATED() || {}
|
||||||
return (
|
return settings.model || process.env.OPENAI_MODEL || 'github:copilot'
|
||||||
normalizeModelSetting(settings.model) ||
|
|
||||||
normalizeModelSetting(process.env.OPENAI_MODEL) ||
|
|
||||||
'github:copilot'
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
// Gemini provider: always use the configured Gemini model
|
// Gemini provider: always use the configured Gemini model
|
||||||
if (getAPIProvider() === 'gemini') {
|
if (getAPIProvider() === 'gemini') {
|
||||||
@@ -606,10 +595,7 @@ export function getPublicModelName(model: ModelName): string {
|
|||||||
export function parseUserSpecifiedModel(
|
export function parseUserSpecifiedModel(
|
||||||
modelInput: ModelName | ModelAlias,
|
modelInput: ModelName | ModelAlias,
|
||||||
): ModelName {
|
): ModelName {
|
||||||
const modelInputTrimmed = normalizeModelSetting(modelInput)
|
const modelInputTrimmed = modelInput.trim()
|
||||||
if (!modelInputTrimmed) {
|
|
||||||
return getDefaultSonnetModel()
|
|
||||||
}
|
|
||||||
const normalizedModel = modelInputTrimmed.toLowerCase()
|
const normalizedModel = modelInputTrimmed.toLowerCase()
|
||||||
|
|
||||||
const has1mTag = has1mContext(normalizedModel)
|
const has1mTag = has1mContext(normalizedModel)
|
||||||
|
|||||||
@@ -33,14 +33,8 @@ import {
|
|||||||
} from './model.js'
|
} from './model.js'
|
||||||
import { has1mContext } from '../context.js'
|
import { has1mContext } from '../context.js'
|
||||||
import { getGlobalConfig } from '../config.js'
|
import { getGlobalConfig } from '../config.js'
|
||||||
import {
|
import { getActiveOpenAIModelOptionsCache } from '../providerProfiles.js'
|
||||||
getActiveOpenAIModelOptionsCache,
|
|
||||||
getActiveProviderProfile,
|
|
||||||
getProfileModelOptions,
|
|
||||||
} from '../providerProfiles.js'
|
|
||||||
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
|
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
|
||||||
import { getCachedNvidiaNimModelOptions, isNvidiaNimProvider } from './nvidiaNimModels.js'
|
|
||||||
import { getCachedMiniMaxModelOptions, isMiniMaxProvider } from './minimaxModels.js'
|
|
||||||
import { getAntModels } from './antModels.js'
|
import { getAntModels } from './antModels.js'
|
||||||
|
|
||||||
// @[MODEL LAUNCH]: Update all the available and default model option strings below.
|
// @[MODEL LAUNCH]: Update all the available and default model option strings below.
|
||||||
@@ -396,26 +390,6 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
|
|||||||
return [defaultOption]
|
return [defaultOption]
|
||||||
}
|
}
|
||||||
|
|
||||||
// When using NVIDIA NIM, show models from the NVIDIA catalog
|
|
||||||
if (isNvidiaNimProvider()) {
|
|
||||||
const defaultOption = getDefaultOptionForUser(fastMode)
|
|
||||||
const nvidiaModels = getCachedNvidiaNimModelOptions()
|
|
||||||
if (nvidiaModels.length > 0) {
|
|
||||||
return [defaultOption, ...nvidiaModels]
|
|
||||||
}
|
|
||||||
return [defaultOption]
|
|
||||||
}
|
|
||||||
|
|
||||||
// When using MiniMax, show models from the MiniMax catalog
|
|
||||||
if (isMiniMaxProvider()) {
|
|
||||||
const defaultOption = getDefaultOptionForUser(fastMode)
|
|
||||||
const minimaxModels = getCachedMiniMaxModelOptions()
|
|
||||||
if (minimaxModels.length > 0) {
|
|
||||||
return [defaultOption, ...minimaxModels]
|
|
||||||
}
|
|
||||||
return [defaultOption]
|
|
||||||
}
|
|
||||||
|
|
||||||
if (process.env.USER_TYPE === 'ant') {
|
if (process.env.USER_TYPE === 'ant') {
|
||||||
// Build options from antModels config
|
// Build options from antModels config
|
||||||
const antModelOptions: ModelOption[] = getAntModels().map(m => ({
|
const antModelOptions: ModelOption[] = getAntModels().map(m => ({
|
||||||
@@ -480,20 +454,6 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
// When a provider profile's env is applied, collect its models so they
|
|
||||||
// can be appended to the standard picker options below.
|
|
||||||
// We check PROFILE_ENV_APPLIED to avoid the ?? profiles[0] fallback in
|
|
||||||
// getActiveProviderProfile which would affect users with inactive profiles.
|
|
||||||
const profileEnvApplied = process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED === '1'
|
|
||||||
const profileModelOptions: ModelOption[] = []
|
|
||||||
if (profileEnvApplied) {
|
|
||||||
const activeProfile = getActiveProviderProfile()
|
|
||||||
if (activeProfile) {
|
|
||||||
const models = getProfileModelOptions(activeProfile)
|
|
||||||
profileModelOptions.push(...models)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PAYG 1P API: Default (Sonnet) + Sonnet 1M + Opus 4.6 + Opus 1M + Haiku
|
// PAYG 1P API: Default (Sonnet) + Sonnet 1M + Opus 4.6 + Opus 1M + Haiku
|
||||||
if (getAPIProvider() === 'firstParty') {
|
if (getAPIProvider() === 'firstParty') {
|
||||||
const payg1POptions = [getDefaultOptionForUser(fastMode)]
|
const payg1POptions = [getDefaultOptionForUser(fastMode)]
|
||||||
@@ -509,7 +469,6 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
payg1POptions.push(getHaiku45Option())
|
payg1POptions.push(getHaiku45Option())
|
||||||
payg1POptions.push(...profileModelOptions)
|
|
||||||
return payg1POptions
|
return payg1POptions
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -549,7 +508,6 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
|
|||||||
} else {
|
} else {
|
||||||
payg3pOptions.push(getHaikuOption())
|
payg3pOptions.push(getHaikuOption())
|
||||||
}
|
}
|
||||||
payg3pOptions.push(...profileModelOptions)
|
|
||||||
return payg3pOptions
|
return payg3pOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,161 +0,0 @@
|
|||||||
/**
|
|
||||||
* NVIDIA NIM model list for the /model picker.
|
|
||||||
* Filtered to chat/instruct models only - embedding, reward, safety, vision, etc. excluded.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import type { ModelOption } from './modelOptions.js'
|
|
||||||
import { getAPIProvider } from './providers.js'
|
|
||||||
import { isEnvTruthy } from '../envUtils.js'
|
|
||||||
|
|
||||||
export function isNvidiaNimProvider(): boolean {
|
|
||||||
// Check if explicitly set via NVIDIA_NIM or via provider flag
|
|
||||||
if (isEnvTruthy(process.env.NVIDIA_NIM)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Also check if using NVIDIA NIM endpoint
|
|
||||||
const baseUrl = process.env.OPENAI_BASE_URL ?? ''
|
|
||||||
if (baseUrl.includes('nvidia') || baseUrl.includes('integrate.api.nvidia')) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return getAPIProvider() === 'nvidia-nim'
|
|
||||||
}
|
|
||||||
|
|
||||||
function getNvidiaNimModels(): ModelOption[] {
|
|
||||||
return [
|
|
||||||
// AGENTIC REASONING MODELS
|
|
||||||
{ value: 'nvidia/cosmos-reason2-8b', label: 'Cosmos Reason 2 8B', description: 'Reasoning' },
|
|
||||||
{ value: 'microsoft/phi-4-mini-flash-reasoning', label: 'Phi 4 Mini Flash Reasoning', description: 'Reasoning' },
|
|
||||||
{ value: 'qwen/qwen3-next-80b-a3b-thinking', label: 'Qwen 3 Next 80B Thinking', description: 'Reasoning' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-r1-distill-qwen-32b', label: 'DeepSeek R1 Qwen 32B', description: 'Reasoning' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-r1-distill-qwen-14b', label: 'DeepSeek R1 Qwen 14B', description: 'Reasoning' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-r1-distill-qwen-7b', label: 'DeepSeek R1 Qwen 7B', description: 'Reasoning' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-r1-distill-llama-8b', label: 'DeepSeek R1 Llama 8B', description: 'Reasoning' },
|
|
||||||
{ value: 'qwen/qwq-32b', label: 'QwQ 32B Reasoning', description: 'Reasoning' },
|
|
||||||
// CODE MODELS
|
|
||||||
{ value: 'meta/codellama-70b', label: 'CodeLlama 70B', description: 'Code' },
|
|
||||||
{ value: 'bigcode/starcoder2-15b', label: 'StarCoder2 15B', description: 'Code' },
|
|
||||||
{ value: 'bigcode/starcoder2-7b', label: 'StarCoder2 7B', description: 'Code' },
|
|
||||||
{ value: 'mistralai/codestral-22b-instruct-v0.1', label: 'Codestral 22B', description: 'Code' },
|
|
||||||
{ value: 'mistralai/mamba-codestral-7b-v0.1', label: 'Mamba Codestral 7B', description: 'Code' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-coder-6.7b-instruct', label: 'DeepSeek Coder 6.7B', description: 'Code' },
|
|
||||||
{ value: 'google/codegemma-7b', label: 'CodeGemma 7B', description: 'Code' },
|
|
||||||
{ value: 'google/codegemma-1.1-7b', label: 'CodeGemma 1.1 7B', description: 'Code' },
|
|
||||||
{ value: 'qwen/qwen2.5-coder-32b-instruct', label: 'Qwen 2.5 Coder 32B', description: 'Code' },
|
|
||||||
{ value: 'qwen/qwen2.5-coder-7b-instruct', label: 'Qwen 2.5 Coder 7B', description: 'Code' },
|
|
||||||
{ value: 'qwen/qwen3-coder-480b-a35b-instruct', label: 'Qwen 3 Coder 480B', description: 'Code' },
|
|
||||||
{ value: 'ibm/granite-34b-code-instruct', label: 'Granite 34B Code', description: 'Code' },
|
|
||||||
{ value: 'ibm/granite-8b-code-instruct', label: 'Granite 8B Code', description: 'Code' },
|
|
||||||
// NEMOTRON MODELS - NVIDIA Flagship
|
|
||||||
{ value: 'nvidia/llama-3.1-nemotron-70b-instruct', label: 'Nemotron 70B Instruct', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/llama-3.1-nemotron-51b-instruct', label: 'Nemotron 51B Instruct', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/llama-3.1-nemotron-ultra-253b-v1', label: 'Nemotron Ultra 253B', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/llama-3.3-nemotron-super-49b-v1', label: 'Nemotron Super 49B v1', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/llama-3.3-nemotron-super-49b-v1.5', label: 'Nemotron Super 49B v1.5', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/nemotron-4-340b-instruct', label: 'Nemotron 4 340B', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/nemotron-3-super-120b-a12b', label: 'Nemotron 3 Super 120B', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/nemotron-3-nano-30b-a3b', label: 'Nemotron 3 Nano 30B', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/nemotron-mini-4b-instruct', label: 'Nemotron Mini 4B', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/llama-3.1-nemotron-nano-8b-v1', label: 'Nemotron Nano 8B', description: 'NVIDIA Flagship' },
|
|
||||||
{ value: 'nvidia/llama-3.1-nemotron-nano-4b-v1.1', label: 'Nemotron Nano 4B v1.1', description: 'NVIDIA Flagship' },
|
|
||||||
// CHATQA MODELS
|
|
||||||
{ value: 'nvidia/llama3-chatqa-1.5-70b', label: 'Llama3 ChatQA 1.5 70B', description: 'Chat' },
|
|
||||||
{ value: 'nvidia/llama3-chatqa-1.5-8b', label: 'Llama3 ChatQA 1.5 8B', description: 'Chat' },
|
|
||||||
// META LLAMA MODELS
|
|
||||||
{ value: 'meta/llama-3.1-405b-instruct', label: 'Llama 3.1 405B', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-3.1-70b-instruct', label: 'Llama 3.1 70B', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-3.1-8b-instruct', label: 'Llama 3.1 8B', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-3.2-90b-vision-instruct', label: 'Llama 3.2 90B Vision', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-3.2-11b-vision-instruct', label: 'Llama 3.2 11B Vision', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-3.2-3b-instruct', label: 'Llama 3.2 3B', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-3.2-1b-instruct', label: 'Llama 3.2 1B', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-3.3-70b-instruct', label: 'Llama 3.3 70B', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-4-maverick-17b-128e-instruct', label: 'Llama 4 Maverick 17B', description: 'Meta Llama' },
|
|
||||||
{ value: 'meta/llama-4-scout-17b-16e-instruct', label: 'Llama 4 Scout 17B', description: 'Meta Llama' },
|
|
||||||
// GOOGLE GEMMA MODELS (text only - no vision)
|
|
||||||
{ value: 'google/gemma-4-31b-it', label: 'Gemma 4 31B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-3-27b-it', label: 'Gemma 3 27B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-3-12b-it', label: 'Gemma 3 12B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-3-4b-it', label: 'Gemma 3 4B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-3-1b-it', label: 'Gemma 3 1B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-3n-e4b-it', label: 'Gemma 3N E4B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-3n-e2b-it', label: 'Gemma 3N E2B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-2-27b-it', label: 'Gemma 2 27B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-2-9b-it', label: 'Gemma 2 9B', description: 'Google Gemma' },
|
|
||||||
{ value: 'google/gemma-2-2b-it', label: 'Gemma 2 2B', description: 'Google Gemma' },
|
|
||||||
// MISTRAL MODELS
|
|
||||||
{ value: 'mistralai/mistral-large-3-675b-instruct-2512', label: 'Mistral Large 3 675B', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-large-2-instruct', label: 'Mistral Large 2', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-large', label: 'Mistral Large', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-medium-3-instruct', label: 'Mistral Medium 3', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-small-4-119b-2603', label: 'Mistral Small 4 119B', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-small-3.1-24b-instruct-2503', label: 'Mistral Small 3.1 24B', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-small-24b-instruct', label: 'Mistral Small 24B', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-7b-instruct-v0.3', label: 'Mistral 7B v0.3', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-7b-instruct-v0.2', label: 'Mistral 7B v0.2', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mixtral-8x22b-instruct-v0.1', label: 'Mixtral 8x22B', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mixtral-8x22b-instruct-v0.1', label: 'Mixtral 8x22B Instruct', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mixtral-8x7b-instruct-v0.1', label: 'Mixtral 8x7B', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mistral-nemotron', label: 'Mistral Nemotron', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/mathstral-7b-v0.1', label: 'Mathstral 7B', description: 'Math' },
|
|
||||||
{ value: 'mistralai/ministral-14b-instruct-2512', label: 'Ministral 14B', description: 'Mistral' },
|
|
||||||
{ value: 'mistralai/devstral-2-123b-instruct-2512', label: 'Devstral 2 123B', description: 'Code' },
|
|
||||||
{ value: 'mistralai/magistral-small-2506', label: 'Magistral Small', description: 'Mistral' },
|
|
||||||
// MICROSOFT PHI MODELS (text only - no vision)
|
|
||||||
{ value: 'microsoft/phi-4-multimodal-instruct', label: 'Phi 4 Multimodal', description: 'Multimodal' },
|
|
||||||
{ value: 'microsoft/phi-4-mini-instruct', label: 'Phi 4 Mini', description: 'Phi' },
|
|
||||||
{ value: 'microsoft/phi-3.5-mini-instruct', label: 'Phi 3.5 Mini', description: 'Phi' },
|
|
||||||
{ value: 'microsoft/phi-3-small-128k-instruct', label: 'Phi 3 Small 128K', description: 'Phi' },
|
|
||||||
{ value: 'microsoft/phi-3-small-8k-instruct', label: 'Phi 3 Small 8K', description: 'Phi' },
|
|
||||||
{ value: 'microsoft/phi-3-medium-128k-instruct', label: 'Phi 3 Medium 128K', description: 'Phi' },
|
|
||||||
{ value: 'microsoft/phi-3-medium-4k-instruct', label: 'Phi 3 Medium 4K', description: 'Phi' },
|
|
||||||
{ value: 'microsoft/phi-3-mini-128k-instruct', label: 'Phi 3 Mini 128K', description: 'Phi' },
|
|
||||||
{ value: 'microsoft/phi-3-mini-4k-instruct', label: 'Phi 3 Mini 4K', description: 'Phi' },
|
|
||||||
// QWEN MODELS
|
|
||||||
{ value: 'qwen/qwen3.5-397b-a17b', label: 'Qwen 3.5 397B', description: 'Qwen' },
|
|
||||||
{ value: 'qwen/qwen3.5-122b-a10b', label: 'Qwen 3.5 122B', description: 'Qwen' },
|
|
||||||
{ value: 'qwen/qwen3-next-80b-a3b-instruct', label: 'Qwen 3 Next 80B', description: 'Qwen' },
|
|
||||||
{ value: 'qwen/qwen2.5-7b-instruct', label: 'Qwen 2.5 7B', description: 'Qwen' },
|
|
||||||
{ value: 'qwen/qwen2-7b-instruct', label: 'Qwen 2 7B', description: 'Qwen' },
|
|
||||||
{ value: 'qwen/qwen3-32b', label: 'Qwen 3 32B', description: 'Qwen' },
|
|
||||||
{ value: 'qwen/qwen3-8b', label: 'Qwen 3 8B', description: 'Qwen' },
|
|
||||||
// DEEPSEEK MODELS
|
|
||||||
{ value: 'deepseek-ai/deepseek-r1', label: 'DeepSeek R1', description: 'DeepSeek' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-v3', label: 'DeepSeek V3', description: 'DeepSeek' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-v3.2', label: 'DeepSeek V3.2', description: 'DeepSeek' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-v3.1-terminus', label: 'DeepSeek V3.1 Terminus', description: 'DeepSeek' },
|
|
||||||
{ value: 'deepseek-ai/deepseek-v3.1', label: 'DeepSeek V3.1', description: 'DeepSeek' },
|
|
||||||
// IBM GRANITE MODELS
|
|
||||||
{ value: 'ibm/granite-3.3-8b-instruct', label: 'Granite 3.3 8B', description: 'IBM Granite' },
|
|
||||||
{ value: 'ibm/granite-3.0-8b-instruct', label: 'Granite 3.0 8B', description: 'IBM Granite' },
|
|
||||||
{ value: 'ibm/granite-3.0-3b-a800m-instruct', label: 'Granite 3.0 3B', description: 'IBM Granite' },
|
|
||||||
// OTHER MODELS
|
|
||||||
{ value: 'databricks/dbrx-instruct', label: 'DBRX Instruct', description: 'Other' },
|
|
||||||
{ value: '01-ai/yi-large', label: 'Yi Large', description: 'Other' },
|
|
||||||
{ value: 'ai21labs/jamba-1.5-large-instruct', label: 'Jamba 1.5 Large', description: 'Other' },
|
|
||||||
{ value: 'ai21labs/jamba-1.5-mini-instruct', label: 'Jamba 1.5 Mini', description: 'Other' },
|
|
||||||
{ value: 'writer/palmyra-creative-122b', label: 'Palmyra Creative 122B', description: 'Other' },
|
|
||||||
{ value: 'writer/palmyra-fin-70b-32k', label: 'Palmyra Fin 70B 32K', description: 'Other' },
|
|
||||||
{ value: 'writer/palmyra-med-70b', label: 'Palmyra Med 70B', description: 'Other' },
|
|
||||||
{ value: 'writer/palmyra-med-70b-32k', label: 'Palmyra Med 70B 32K', description: 'Other' },
|
|
||||||
// Z-AI GLM MODELS
|
|
||||||
{ value: 'z-ai/glm5', label: 'GLM-5', description: 'Z-AI' },
|
|
||||||
{ value: 'z-ai/glm4.7', label: 'GLM-4.7', description: 'Z-AI' },
|
|
||||||
// MINIMAX MODELS
|
|
||||||
{ value: 'minimaxai/minimax-m2.5', label: 'MiniMax M2.5', description: 'MiniMax' },
|
|
||||||
// MOONSHOT KIMI MODELS
|
|
||||||
{ value: 'moonshotai/kimi-k2.5', label: 'Kimi K2.5', description: 'Moonshot' },
|
|
||||||
{ value: 'moonshotai/kimi-k2-instruct', label: 'Kimi K2 Instruct', description: 'Moonshot' },
|
|
||||||
{ value: 'moonshotai/kimi-k2-thinking', label: 'Kimi K2 Thinking', description: 'Moonshot' },
|
|
||||||
{ value: 'moonshotai/kimi-k2.5-thinking', label: 'Kimi K2.5 Thinking', description: 'Moonshot' },
|
|
||||||
{ value: 'moonshotai/kimi-k2-instruct-0905', label: 'Kimi K2 Instruct 0905', description: 'Moonshot' },
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
let cachedNvidiaNimOptions: ModelOption[] | null = null
|
|
||||||
|
|
||||||
export function getCachedNvidiaNimModelOptions(): ModelOption[] {
|
|
||||||
if (!cachedNvidiaNimOptions) {
|
|
||||||
cachedNvidiaNimOptions = getNvidiaNimModels()
|
|
||||||
}
|
|
||||||
return cachedNvidiaNimOptions
|
|
||||||
}
|
|
||||||
@@ -104,57 +104,6 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
|||||||
'devstral-latest': 256_000,
|
'devstral-latest': 256_000,
|
||||||
'ministral-3b-latest': 256_000,
|
'ministral-3b-latest': 256_000,
|
||||||
|
|
||||||
// NVIDIA NIM - popular models
|
|
||||||
'nvidia/llama-3.1-nemotron-70b-instruct': 128_000,
|
|
||||||
'nvidia/llama-3.1-nemotron-ultra-253b-v1': 128_000,
|
|
||||||
'nvidia/nemotron-mini-4b-instruct': 32_768,
|
|
||||||
'meta/llama-3.1-405b-instruct': 128_000,
|
|
||||||
'meta/llama-3.1-70b-instruct': 128_000,
|
|
||||||
'meta/llama-3.1-8b-instruct': 128_000,
|
|
||||||
'meta/llama-3.2-90b-instruct': 128_000,
|
|
||||||
'meta/llama-3.2-1b-instruct': 128_000,
|
|
||||||
'meta/llama-3.2-3b-instruct': 128_000,
|
|
||||||
'meta/llama-3.3-70b-instruct': 128_000,
|
|
||||||
// Google Gemma via NVIDIA NIM
|
|
||||||
'google/gemma-2-27b-it': 8_192,
|
|
||||||
'google/gemma-2-9b-it': 8_192,
|
|
||||||
'google/gemma-3-27b-it': 131_072,
|
|
||||||
'google/gemma-3-12b-it': 131_072,
|
|
||||||
'google/gemma-3-4b-it': 131_072,
|
|
||||||
// DeepSeek via NVIDIA NIM
|
|
||||||
'deepseek-ai/deepseek-r1': 128_000,
|
|
||||||
'deepseek-ai/deepseek-v3': 128_000,
|
|
||||||
'deepseek-ai/deepseek-v3.2': 128_000,
|
|
||||||
// Qwen via NVIDIA NIM
|
|
||||||
'qwen/qwen3-32b': 128_000,
|
|
||||||
'qwen/qwen3-8b': 128_000,
|
|
||||||
'qwen/qwen2.5-7b-instruct': 32_768,
|
|
||||||
// Mistral via NVIDIA NIM
|
|
||||||
'mistralai/mistral-large-3-675b-instruct-2512': 256_000,
|
|
||||||
'mistralai/mistral-large-2-instruct': 256_000,
|
|
||||||
'mistralai/mistral-small-3.1-24b-instruct-2503': 32_768,
|
|
||||||
'mistralai/mixtral-8x7b-instruct-v0.1': 32_768,
|
|
||||||
// Microsoft Phi via NVIDIA NIM
|
|
||||||
'microsoft/phi-4-mini-instruct': 16_384,
|
|
||||||
'microsoft/phi-3.5-mini-instruct': 16_384,
|
|
||||||
'microsoft/phi-3-mini-128k-instruct': 128_000,
|
|
||||||
// IBM Granite via NVIDIA NIM
|
|
||||||
'ibm/granite-3.3-8b-instruct': 8_192,
|
|
||||||
'ibm/granite-8b-code-instruct': 8_192,
|
|
||||||
// GLM models via NVIDIA NIM
|
|
||||||
'z-ai/glm5': 200_000,
|
|
||||||
'z-ai/glm4.7': 128_000,
|
|
||||||
// Kimi models via NVIDIA NIM
|
|
||||||
'moonshotai/kimi-k2.5': 200_000,
|
|
||||||
'moonshotai/kimi-k2-instruct': 128_000,
|
|
||||||
// DBRX via NVIDIA NIM
|
|
||||||
'databricks/dbrx-instruct': 131_072,
|
|
||||||
// Jamba via NVIDIA NIM
|
|
||||||
'ai21labs/jamba-1.5-large-instruct': 256_000,
|
|
||||||
'ai21labs/jamba-1.5-mini-instruct': 256_000,
|
|
||||||
// Yi via NVIDIA NIM
|
|
||||||
'01-ai/yi-large': 32_768,
|
|
||||||
|
|
||||||
// MiniMax (all M2.x variants share 204,800 context, 131,072 max output)
|
// MiniMax (all M2.x variants share 204,800 context, 131,072 max output)
|
||||||
'MiniMax-M2.7': 204_800,
|
'MiniMax-M2.7': 204_800,
|
||||||
'MiniMax-M2.7-highspeed': 204_800,
|
'MiniMax-M2.7-highspeed': 204_800,
|
||||||
@@ -169,23 +118,14 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
|||||||
'minimax-m2.1': 204_800,
|
'minimax-m2.1': 204_800,
|
||||||
'minimax-m2.1-highspeed': 204_800,
|
'minimax-m2.1-highspeed': 204_800,
|
||||||
|
|
||||||
// MiniMax new models
|
|
||||||
'MiniMax-Text-01': 524_288,
|
|
||||||
'MiniMax-Text-01-Preview': 262_144,
|
|
||||||
'MiniMax-Vision-01': 32_768,
|
|
||||||
'MiniMax-Vision-01-Fast': 16_384,
|
|
||||||
'MiniMax-M2': 204_800,
|
|
||||||
|
|
||||||
// Google (via OpenRouter)
|
// Google (via OpenRouter)
|
||||||
'google/gemini-2.0-flash':1_048_576,
|
'google/gemini-2.0-flash':1_048_576,
|
||||||
'google/gemini-2.5-pro': 1_048_576,
|
'google/gemini-2.5-pro': 1_048_576,
|
||||||
|
|
||||||
// Google (native via CLAUDE_CODE_USE_GEMINI)
|
// Google (native via CLAUDE_CODE_USE_GEMINI)
|
||||||
'gemini-2.0-flash': 1_048_576,
|
'gemini-2.0-flash': 1_048_576,
|
||||||
'gemini-2.5-pro': 1_048_576,
|
'gemini-2.5-pro': 1_048_576,
|
||||||
'gemini-2.5-flash': 1_048_576,
|
'gemini-2.5-flash': 1_048_576,
|
||||||
'gemini-3.1-pro': 1_048_576,
|
|
||||||
'gemini-3.1-flash-lite-preview': 1_048_576,
|
|
||||||
|
|
||||||
// Ollama local models
|
// Ollama local models
|
||||||
// Llama 3.1+ models support 128k context natively (Meta official specs).
|
// Llama 3.1+ models support 128k context natively (Meta official specs).
|
||||||
@@ -204,21 +144,6 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
|||||||
'llama3.2:1b': 128_000,
|
'llama3.2:1b': 128_000,
|
||||||
'qwen3:8b': 128_000,
|
'qwen3:8b': 128_000,
|
||||||
'codestral': 32_768,
|
'codestral': 32_768,
|
||||||
|
|
||||||
// Alibaba DashScope (Coding Plan)
|
|
||||||
// Model context windows from DashScope API /models endpoint (April 2026).
|
|
||||||
// Values sourced from: qwen3.5-plus/qwen3-coder-plus (1M), qwen3-coder-next/max (256K),
|
|
||||||
// kimi-k2.5 (256K), glm-5/glm-4.7 (198K).
|
|
||||||
// Max output tokens: Qwen variants (64K/32K), GLM (16K).
|
|
||||||
'qwen3.6-plus': 1_000_000,
|
|
||||||
'qwen3.5-plus': 1_000_000,
|
|
||||||
'qwen3-coder-plus': 1_000_000,
|
|
||||||
'qwen3-coder-next': 262_144,
|
|
||||||
'qwen3-max': 262_144,
|
|
||||||
'qwen3-max-2026-01-23': 262_144,
|
|
||||||
'kimi-k2.5': 262_144,
|
|
||||||
'glm-5': 202_752,
|
|
||||||
'glm-4.7': 202_752,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -321,23 +246,15 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
|||||||
'minimax-m2.5-highspeed': 131_072,
|
'minimax-m2.5-highspeed': 131_072,
|
||||||
'minimax-m2.1': 131_072,
|
'minimax-m2.1': 131_072,
|
||||||
'minimax-m2.1-highspeed': 131_072,
|
'minimax-m2.1-highspeed': 131_072,
|
||||||
// New MiniMax models
|
|
||||||
'MiniMax-M2': 131_072,
|
|
||||||
'MiniMax-Text-01': 65_536,
|
|
||||||
'MiniMax-Text-01-Preview': 65_536,
|
|
||||||
'MiniMax-Vision-01': 16_384,
|
|
||||||
'MiniMax-Vision-01-Fast': 16_384,
|
|
||||||
|
|
||||||
// Google (via OpenRouter)
|
// Google (via OpenRouter)
|
||||||
'google/gemini-2.0-flash': 8_192,
|
'google/gemini-2.0-flash': 8_192,
|
||||||
'google/gemini-2.5-pro': 65_536,
|
'google/gemini-2.5-pro': 65_536,
|
||||||
|
|
||||||
// Google (native via CLAUDE_CODE_USE_GEMINI)
|
// Google (native via CLAUDE_CODE_USE_GEMINI)
|
||||||
'gemini-2.0-flash': 8_192,
|
'gemini-2.0-flash': 8_192,
|
||||||
'gemini-2.5-pro': 65_536,
|
'gemini-2.5-pro': 65_536,
|
||||||
'gemini-2.5-flash': 65_536,
|
'gemini-2.5-flash': 65_536,
|
||||||
'gemini-3.1-pro': 65_536,
|
|
||||||
'gemini-3.1-flash-lite-preview': 65_536,
|
|
||||||
|
|
||||||
// Ollama local models (conservative safe defaults)
|
// Ollama local models (conservative safe defaults)
|
||||||
'llama3.3:70b': 4_096,
|
'llama3.3:70b': 4_096,
|
||||||
@@ -354,43 +271,6 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
|||||||
'llama3.2:1b': 4_096,
|
'llama3.2:1b': 4_096,
|
||||||
'qwen3:8b': 8_192,
|
'qwen3:8b': 8_192,
|
||||||
'codestral': 8_192,
|
'codestral': 8_192,
|
||||||
|
|
||||||
// NVIDIA NIM models
|
|
||||||
'nvidia/llama-3.1-nemotron-70b-instruct': 32_768,
|
|
||||||
'nvidia/nemotron-mini-4b-instruct': 8_192,
|
|
||||||
'meta/llama-3.1-405b-instruct': 32_768,
|
|
||||||
'meta/llama-3.1-70b-instruct': 32_768,
|
|
||||||
'meta/llama-3.2-90b-instruct': 32_768,
|
|
||||||
'meta/llama-3.3-70b-instruct': 32_768,
|
|
||||||
'google/gemma-2-27b-it': 4_096,
|
|
||||||
'google/gemma-3-27b-it': 16_384,
|
|
||||||
'google/gemma-3-12b-it': 16_384,
|
|
||||||
'deepseek-ai/deepseek-r1': 32_768,
|
|
||||||
'deepseek-ai/deepseek-v3': 32_768,
|
|
||||||
'deepseek-ai/deepseek-v3.2': 32_768,
|
|
||||||
'qwen/qwen3-32b': 32_768,
|
|
||||||
'qwen/qwen2.5-7b-instruct': 8_192,
|
|
||||||
'mistralai/mistral-large-3-675b-instruct-2512': 32_768,
|
|
||||||
'mistralai/mixtral-8x7b-instruct-v0.1': 8_192,
|
|
||||||
'microsoft/phi-4-mini-instruct': 4_096,
|
|
||||||
'microsoft/phi-3.5-mini-instruct': 4_096,
|
|
||||||
'ibm/granite-3.3-8b-instruct': 4_096,
|
|
||||||
'z-ai/glm5': 32_768,
|
|
||||||
'moonshotai/kimi-k2.5': 32_768,
|
|
||||||
'databricks/dbrx-instruct': 32_768,
|
|
||||||
'ai21labs/jamba-1.5-large-instruct': 32_768,
|
|
||||||
'01-ai/yi-large': 8_192,
|
|
||||||
|
|
||||||
// Alibaba DashScope (Coding Plan)
|
|
||||||
'qwen3.6-plus': 65_536,
|
|
||||||
'qwen3.5-plus': 65_536,
|
|
||||||
'qwen3-coder-plus': 65_536,
|
|
||||||
'qwen3-coder-next': 65_536,
|
|
||||||
'qwen3-max': 32_768,
|
|
||||||
'qwen3-max-2026-01-23': 32_768,
|
|
||||||
'kimi-k2.5': 32_768,
|
|
||||||
'glm-5': 16_384,
|
|
||||||
'glm-4.7': 16_384,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {
|
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {
|
||||||
|
|||||||
@@ -11,17 +11,9 @@ export type APIProvider =
|
|||||||
| 'gemini'
|
| 'gemini'
|
||||||
| 'github'
|
| 'github'
|
||||||
| 'codex'
|
| 'codex'
|
||||||
| 'nvidia-nim'
|
|
||||||
| 'minimax'
|
|
||||||
| 'mistral'
|
| 'mistral'
|
||||||
|
|
||||||
export function getAPIProvider(): APIProvider {
|
export function getAPIProvider(): APIProvider {
|
||||||
if (isEnvTruthy(process.env.NVIDIA_NIM)) {
|
|
||||||
return 'nvidia-nim'
|
|
||||||
}
|
|
||||||
if (isEnvTruthy(process.env.MINIMAX_API_KEY)) {
|
|
||||||
return 'minimax'
|
|
||||||
}
|
|
||||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||||
? 'gemini'
|
? 'gemini'
|
||||||
:
|
:
|
||||||
|
|||||||
@@ -11,8 +11,6 @@ import {
|
|||||||
} from '@anthropic-ai/sdk'
|
} from '@anthropic-ai/sdk'
|
||||||
import { getModelStrings } from './modelStrings.js'
|
import { getModelStrings } from './modelStrings.js'
|
||||||
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
|
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
|
||||||
import { getCachedNvidiaNimModelOptions, isNvidiaNimProvider } from './nvidiaNimModels.js'
|
|
||||||
import { getCachedMiniMaxModelOptions, isMiniMaxProvider } from './minimaxModels.js'
|
|
||||||
|
|
||||||
// Cache valid models to avoid repeated API calls
|
// Cache valid models to avoid repeated API calls
|
||||||
const validModelCache = new Map<string, boolean>()
|
const validModelCache = new Map<string, boolean>()
|
||||||
@@ -49,40 +47,6 @@ export async function validateModel(
|
|||||||
// If cache is empty, fall through to API validation
|
// If cache is empty, fall through to API validation
|
||||||
}
|
}
|
||||||
|
|
||||||
// For NVIDIA NIM provider, validate against cached model list
|
|
||||||
if (isNvidiaNimProvider()) {
|
|
||||||
const nvidiaModels = getCachedNvidiaNimModelOptions()
|
|
||||||
const found = nvidiaModels.some(m => m.value === normalizedModel)
|
|
||||||
if (found) {
|
|
||||||
validModelCache.set(normalizedModel, true)
|
|
||||||
return { valid: true }
|
|
||||||
}
|
|
||||||
if (nvidiaModels.length > 0) {
|
|
||||||
const MAX_SHOWN = 5
|
|
||||||
const names = nvidiaModels.map(m => m.value)
|
|
||||||
const shown = names.slice(0, MAX_SHOWN).join(', ')
|
|
||||||
const suffix = names.length > MAX_SHOWN ? ` and ${names.length - MAX_SHOWN} more` : ''
|
|
||||||
return { valid: false, error: `Model '${normalizedModel}' not found in NVIDIA NIM catalog. Available: ${shown}${suffix}` }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For MiniMax provider, validate against cached model list
|
|
||||||
if (isMiniMaxProvider()) {
|
|
||||||
const minimaxModels = getCachedMiniMaxModelOptions()
|
|
||||||
const found = minimaxModels.some(m => m.value === normalizedModel)
|
|
||||||
if (found) {
|
|
||||||
validModelCache.set(normalizedModel, true)
|
|
||||||
return { valid: true }
|
|
||||||
}
|
|
||||||
if (minimaxModels.length > 0) {
|
|
||||||
const MAX_SHOWN = 5
|
|
||||||
const names = minimaxModels.map(m => m.value)
|
|
||||||
const shown = names.slice(0, MAX_SHOWN).join(', ')
|
|
||||||
const suffix = names.length > MAX_SHOWN ? ` and ${names.length - MAX_SHOWN} more` : ''
|
|
||||||
return { valid: false, error: `Model '${normalizedModel}' not found in MiniMax catalog. Available: ${shown}${suffix}` }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check against availableModels allowlist before any API call
|
// Check against availableModels allowlist before any API call
|
||||||
if (!isModelAllowed(normalizedModel)) {
|
if (!isModelAllowed(normalizedModel)) {
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -76,9 +76,7 @@ describe('OpenClaude paths', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
test('local installer uses openclaude wrapper path', async () => {
|
test('local installer uses openclaude wrapper path', async () => {
|
||||||
// Force .openclaude config home so the test doesn't fall back to
|
delete process.env.CLAUDE_CONFIG_DIR
|
||||||
// ~/.claude when ~/.openclaude doesn't exist on this machine.
|
|
||||||
process.env.CLAUDE_CONFIG_DIR = join(homedir(), '.openclaude')
|
|
||||||
const { getLocalClaudePath } = await importFreshLocalInstaller()
|
const { getLocalClaudePath } = await importFreshLocalInstaller()
|
||||||
|
|
||||||
expect(getLocalClaudePath()).toBe(
|
expect(getLocalClaudePath()).toBe(
|
||||||
|
|||||||
@@ -105,14 +105,6 @@ export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string
|
|||||||
) {
|
) {
|
||||||
return 'text-generation-webui'
|
return 'text-generation-webui'
|
||||||
}
|
}
|
||||||
// Check for NVIDIA NIM
|
|
||||||
if (host.includes('nvidia') || haystack.includes('nvidia') || host.includes('integrate.api.nvidia')) {
|
|
||||||
return 'NVIDIA NIM'
|
|
||||||
}
|
|
||||||
// Check for MiniMax (both api.minimax.io and api.minimax.chat)
|
|
||||||
if (host.includes('minimax') || haystack.includes('minimax')) {
|
|
||||||
return 'MiniMax'
|
|
||||||
}
|
|
||||||
} catch {
|
} catch {
|
||||||
// Fall back to the generic label when the base URL is malformed.
|
// Fall back to the generic label when the base URL is malformed.
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ export const VALID_PROVIDERS = [
|
|||||||
'bedrock',
|
'bedrock',
|
||||||
'vertex',
|
'vertex',
|
||||||
'ollama',
|
'ollama',
|
||||||
'nvidia-nim',
|
|
||||||
'minimax',
|
|
||||||
] as const
|
] as const
|
||||||
|
|
||||||
export type ProviderFlagName = (typeof VALID_PROVIDERS)[number]
|
export type ProviderFlagName = (typeof VALID_PROVIDERS)[number]
|
||||||
@@ -133,21 +131,6 @@ export function applyProviderFlag(
|
|||||||
}
|
}
|
||||||
if (model) process.env.OPENAI_MODEL = model
|
if (model) process.env.OPENAI_MODEL = model
|
||||||
break
|
break
|
||||||
|
|
||||||
case 'nvidia-nim':
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
process.env.OPENAI_BASE_URL ??= 'https://integrate.api.nvidia.com/v1'
|
|
||||||
process.env.NVIDIA_NIM = '1'
|
|
||||||
process.env.OPENAI_MODEL ??= 'nvidia/llama-3.1-nemotron-70b-instruct'
|
|
||||||
if (model) process.env.OPENAI_MODEL = model
|
|
||||||
break
|
|
||||||
|
|
||||||
case 'minimax':
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
process.env.OPENAI_BASE_URL ??= 'https://api.minimax.io/v1'
|
|
||||||
process.env.OPENAI_MODEL ??= 'MiniMax-M2.5'
|
|
||||||
if (model) process.env.OPENAI_MODEL = model
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return {}
|
return {}
|
||||||
|
|||||||
@@ -1,108 +0,0 @@
|
|||||||
import { describe, expect, test } from 'bun:test'
|
|
||||||
|
|
||||||
import {
|
|
||||||
getPrimaryModel,
|
|
||||||
hasMultipleModels,
|
|
||||||
parseModelList,
|
|
||||||
} from './providerModels.ts'
|
|
||||||
|
|
||||||
// ── parseModelList ────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
describe('parseModelList', () => {
|
|
||||||
test('splits comma-separated models', () => {
|
|
||||||
expect(parseModelList('glm-4.7, glm-4.7-flash')).toEqual([
|
|
||||||
'glm-4.7',
|
|
||||||
'glm-4.7-flash',
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns single model in an array', () => {
|
|
||||||
expect(parseModelList('llama3.1:8b')).toEqual(['llama3.1:8b'])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('trims whitespace around each model', () => {
|
|
||||||
expect(parseModelList(' gpt-4o , gpt-4o-mini , o3-mini ')).toEqual([
|
|
||||||
'gpt-4o',
|
|
||||||
'gpt-4o-mini',
|
|
||||||
'o3-mini',
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('filters out empty entries from trailing commas', () => {
|
|
||||||
expect(parseModelList('gpt-4o,,gpt-4o-mini,')).toEqual([
|
|
||||||
'gpt-4o',
|
|
||||||
'gpt-4o-mini',
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns empty array for empty string', () => {
|
|
||||||
expect(parseModelList('')).toEqual([])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns empty array for whitespace-only string', () => {
|
|
||||||
expect(parseModelList(' ')).toEqual([])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns empty array for comma-only string', () => {
|
|
||||||
expect(parseModelList(',,,')).toEqual([])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('handles models with colons', () => {
|
|
||||||
expect(parseModelList('qwen2.5-coder:7b, llama3.1:8b')).toEqual([
|
|
||||||
'qwen2.5-coder:7b',
|
|
||||||
'llama3.1:8b',
|
|
||||||
])
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ── getPrimaryModel ───────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
describe('getPrimaryModel', () => {
|
|
||||||
test('returns first model from comma-separated list', () => {
|
|
||||||
expect(getPrimaryModel('glm-4.7, glm-4.7-flash')).toBe('glm-4.7')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns the only model when single model is provided', () => {
|
|
||||||
expect(getPrimaryModel('llama3.1:8b')).toBe('llama3.1:8b')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns the original string when input is empty', () => {
|
|
||||||
expect(getPrimaryModel('')).toBe('')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns first model after trimming', () => {
|
|
||||||
expect(getPrimaryModel(' gpt-4o , gpt-4o-mini')).toBe('gpt-4o')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns first model when others are empty from trailing commas', () => {
|
|
||||||
expect(getPrimaryModel('claude-sonnet-4-6,,')).toBe('claude-sonnet-4-6')
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// ── hasMultipleModels ─────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
describe('hasMultipleModels', () => {
|
|
||||||
test('returns true when multiple models are present', () => {
|
|
||||||
expect(hasMultipleModels('glm-4.7, glm-4.7-flash')).toBe(true)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns false for a single model', () => {
|
|
||||||
expect(hasMultipleModels('llama3.1:8b')).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns false for empty string', () => {
|
|
||||||
expect(hasMultipleModels('')).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns false for whitespace-only string', () => {
|
|
||||||
expect(hasMultipleModels(' ')).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns false when extra commas produce no extra models', () => {
|
|
||||||
expect(hasMultipleModels('gpt-4o,,')).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns true for three models', () => {
|
|
||||||
expect(hasMultipleModels('a, b, c')).toBe(true)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
/**
|
|
||||||
* Utility functions for parsing comma-separated model names in provider profiles.
|
|
||||||
*
|
|
||||||
* Example: "glm-4.7, glm-4.7-flash" -> ["glm-4.7", "glm-4.7-flash"]
|
|
||||||
* Single model: "llama3.1:8b" -> ["llama3.1:8b"]
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Splits a comma-separated model field into an array of trimmed model names,
|
|
||||||
* filtering out any empty entries.
|
|
||||||
*/
|
|
||||||
export function parseModelList(modelField: string): string[] {
|
|
||||||
return modelField
|
|
||||||
.split(',')
|
|
||||||
.map((part) => part.trim())
|
|
||||||
.filter((part) => part.length > 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the first (primary) model from a comma-separated model field.
|
|
||||||
* Falls back to the original string if parsing yields no results.
|
|
||||||
*/
|
|
||||||
export function getPrimaryModel(modelField: string): string {
|
|
||||||
const models = parseModelList(modelField)
|
|
||||||
return models.length > 0 ? models[0] : modelField
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns true if the model field contains more than one model.
|
|
||||||
*/
|
|
||||||
export function hasMultipleModels(modelField: string): boolean {
|
|
||||||
return parseModelList(modelField).length > 1
|
|
||||||
}
|
|
||||||
@@ -166,7 +166,7 @@ test('matching persisted gemini env is reused for gemini launch', async () => {
|
|||||||
assert.equal(env.GEMINI_BASE_URL, 'https://example.test/v1beta/openai')
|
assert.equal(env.GEMINI_BASE_URL, 'https://example.test/v1beta/openai')
|
||||||
})
|
})
|
||||||
|
|
||||||
test('openai env variables take precedence over gemini', async () => {
|
test('gemini launch ignores mismatched persisted openai env and strips other provider secrets', async () => {
|
||||||
const env = await buildLaunchEnv({
|
const env = await buildLaunchEnv({
|
||||||
profile: 'gemini',
|
profile: 'gemini',
|
||||||
persisted: profile('openai', {
|
persisted: profile('openai', {
|
||||||
@@ -187,16 +187,16 @@ test('openai env variables take precedence over gemini', async () => {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
assert.equal(env.CLAUDE_CODE_USE_GEMINI, undefined)
|
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
|
||||||
assert.equal(env.CLAUDE_CODE_USE_OPENAI, '1')
|
assert.equal(env.CLAUDE_CODE_USE_OPENAI, undefined)
|
||||||
assert.equal(env.GEMINI_MODEL, undefined)
|
assert.equal(env.GEMINI_MODEL, 'gemini-2.0-flash')
|
||||||
assert.equal(env.GEMINI_API_KEY, undefined)
|
assert.equal(env.GEMINI_API_KEY, 'gem-live')
|
||||||
assert.equal(
|
assert.equal(
|
||||||
env.GEMINI_BASE_URL,
|
env.GEMINI_BASE_URL,
|
||||||
undefined,
|
'https://generativelanguage.googleapis.com/v1beta/openai',
|
||||||
)
|
)
|
||||||
assert.equal(env.GOOGLE_API_KEY, undefined)
|
assert.equal(env.GOOGLE_API_KEY, undefined)
|
||||||
assert.equal(env.OPENAI_API_KEY, 'sk-live')
|
assert.equal(env.OPENAI_API_KEY, undefined)
|
||||||
assert.equal(env.CODEX_API_KEY, undefined)
|
assert.equal(env.CODEX_API_KEY, undefined)
|
||||||
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
|
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
|
||||||
})
|
})
|
||||||
@@ -562,13 +562,8 @@ test('buildStartupEnvFromProfile leaves explicit provider selections untouched',
|
|||||||
processEnv,
|
processEnv,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Remove the strict object equality check: assert.equal(env, processEnv)
|
assert.equal(env, processEnv)
|
||||||
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
|
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
|
||||||
assert.equal(env.GEMINI_API_KEY, 'gem-live')
|
|
||||||
assert.equal(env.GEMINI_MODEL, 'gemini-2.0-flash')
|
|
||||||
// Add the new default fields injected by the function
|
|
||||||
assert.equal(env.GEMINI_BASE_URL, 'https://generativelanguage.googleapis.com/v1beta/openai')
|
|
||||||
assert.equal(env.GEMINI_AUTH_MODE, 'api-key')
|
|
||||||
assert.equal(env.OPENAI_API_KEY, undefined)
|
assert.equal(env.OPENAI_API_KEY, undefined)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -612,17 +607,14 @@ test('buildStartupEnvFromProfile treats explicit falsey provider flags as user i
|
|||||||
processEnv,
|
processEnv,
|
||||||
})
|
})
|
||||||
|
|
||||||
assert.equal(env.CLAUDE_CODE_USE_OPENAI, undefined)
|
assert.equal(env, processEnv)
|
||||||
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
|
assert.equal(env.CLAUDE_CODE_USE_OPENAI, '0')
|
||||||
assert.equal(env.GEMINI_API_KEY, 'gem-persisted')
|
assert.equal(env.GEMINI_API_KEY, undefined)
|
||||||
assert.equal(env.GEMINI_MODEL, 'gemini-2.5-flash')
|
|
||||||
assert.equal(env.GEMINI_BASE_URL, 'https://generativelanguage.googleapis.com/v1beta/openai')
|
|
||||||
assert.equal(env.GEMINI_AUTH_MODE, 'api-key')
|
|
||||||
})
|
})
|
||||||
|
|
||||||
test('maskSecretForDisplay preserves only a short prefix and suffix', () => {
|
test('maskSecretForDisplay preserves only a short prefix and suffix', () => {
|
||||||
assert.equal(maskSecretForDisplay('sk-secret-12345678'), 'sk-...678')
|
assert.equal(maskSecretForDisplay('sk-secret-12345678'), 'sk-...5678')
|
||||||
assert.equal(maskSecretForDisplay('AIzaSecret12345678'), 'AIz...678')
|
assert.equal(maskSecretForDisplay('AIzaSecret12345678'), 'AIza...5678')
|
||||||
})
|
})
|
||||||
|
|
||||||
test('redactSecretValueForDisplay masks poisoned display fields that equal configured secrets', () => {
|
test('redactSecretValueForDisplay masks poisoned display fields that equal configured secrets', () => {
|
||||||
@@ -630,7 +622,7 @@ test('redactSecretValueForDisplay masks poisoned display fields that equal confi
|
|||||||
|
|
||||||
assert.equal(
|
assert.equal(
|
||||||
redactSecretValueForDisplay(apiKey, { OPENAI_API_KEY: apiKey }),
|
redactSecretValueForDisplay(apiKey, { OPENAI_API_KEY: apiKey }),
|
||||||
'sk-...678',
|
'sk-...5678',
|
||||||
)
|
)
|
||||||
assert.equal(
|
assert.equal(
|
||||||
redactSecretValueForDisplay('gpt-4o', { OPENAI_API_KEY: apiKey }),
|
redactSecretValueForDisplay('gpt-4o', { OPENAI_API_KEY: apiKey }),
|
||||||
|
|||||||
@@ -6,32 +6,29 @@ import {
|
|||||||
isCodexBaseUrl,
|
isCodexBaseUrl,
|
||||||
resolveCodexApiCredentials,
|
resolveCodexApiCredentials,
|
||||||
resolveProviderRequest,
|
resolveProviderRequest,
|
||||||
} from '../services/api/providerConfig.js'
|
} from '../services/api/providerConfig.ts'
|
||||||
import { parseChatgptAccountId } from '../services/api/codexOAuthShared.js'
|
import { parseChatgptAccountId } from '../services/api/codexOAuthShared.js'
|
||||||
import {
|
import {
|
||||||
getGoalDefaultOpenAIModel,
|
getGoalDefaultOpenAIModel,
|
||||||
normalizeRecommendationGoal,
|
normalizeRecommendationGoal,
|
||||||
type RecommendationGoal,
|
type RecommendationGoal,
|
||||||
} from './providerRecommendation.js'
|
} from './providerRecommendation.ts'
|
||||||
import { readGeminiAccessToken } from './geminiCredentials.js'
|
import { readGeminiAccessToken } from './geminiCredentials.ts'
|
||||||
import { getOllamaChatBaseUrl } from './providerDiscovery.js'
|
import { getOllamaChatBaseUrl } from './providerDiscovery.ts'
|
||||||
import { getProviderValidationError } from './providerValidation.js'
|
import { getProviderValidationError } from './providerValidation.ts'
|
||||||
import {
|
import {
|
||||||
maskSecretForDisplay,
|
maskSecretForDisplay,
|
||||||
redactSecretValueForDisplay,
|
redactSecretValueForDisplay,
|
||||||
sanitizeApiKey,
|
sanitizeApiKey,
|
||||||
sanitizeProviderConfigValue,
|
sanitizeProviderConfigValue,
|
||||||
} from './providerSecrets.js'
|
} from './providerSecrets.ts'
|
||||||
|
|
||||||
export {
|
export {
|
||||||
maskSecretForDisplay,
|
maskSecretForDisplay,
|
||||||
redactSecretValueForDisplay,
|
redactSecretValueForDisplay,
|
||||||
sanitizeApiKey,
|
sanitizeApiKey,
|
||||||
sanitizeProviderConfigValue,
|
sanitizeProviderConfigValue,
|
||||||
} from './providerSecrets.js'
|
} from './providerSecrets.ts'
|
||||||
import { isEnvTruthy } from './envUtils.ts'
|
|
||||||
|
|
||||||
import { PROVIDERS } from './configConstants.js'
|
|
||||||
|
|
||||||
export const PROFILE_FILE_NAME = '.openclaude-profile.json'
|
export const PROFILE_FILE_NAME = '.openclaude-profile.json'
|
||||||
export const DEFAULT_GEMINI_BASE_URL =
|
export const DEFAULT_GEMINI_BASE_URL =
|
||||||
@@ -60,28 +57,18 @@ const PROFILE_ENV_KEYS = [
|
|||||||
'GEMINI_MODEL',
|
'GEMINI_MODEL',
|
||||||
'GEMINI_BASE_URL',
|
'GEMINI_BASE_URL',
|
||||||
'GOOGLE_API_KEY',
|
'GOOGLE_API_KEY',
|
||||||
'NVIDIA_NIM',
|
|
||||||
'NVIDIA_API_KEY',
|
|
||||||
'NVIDIA_MODEL',
|
|
||||||
'MINIMAX_API_KEY',
|
|
||||||
'MINIMAX_BASE_URL',
|
|
||||||
'MINIMAX_MODEL',
|
|
||||||
'MISTRAL_BASE_URL',
|
'MISTRAL_BASE_URL',
|
||||||
'MISTRAL_API_KEY',
|
'MISTRAL_API_KEY',
|
||||||
'MISTRAL_MODEL',
|
'MISTRAL_MODEL',
|
||||||
] as const
|
] as const
|
||||||
|
|
||||||
const SECRET_ENV_KEYS = [
|
export type ProviderProfile =
|
||||||
'OPENAI_API_KEY',
|
| 'openai'
|
||||||
'CODEX_API_KEY',
|
| 'ollama'
|
||||||
'GEMINI_API_KEY',
|
| 'codex'
|
||||||
'GOOGLE_API_KEY',
|
| 'gemini'
|
||||||
'NVIDIA_API_KEY',
|
| 'atomic-chat'
|
||||||
'MINIMAX_API_KEY',
|
| 'mistral'
|
||||||
'MISTRAL_API_KEY',
|
|
||||||
] as const
|
|
||||||
|
|
||||||
export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' | 'atomic-chat' | 'nvidia-nim' | 'minimax' | 'mistral'
|
|
||||||
|
|
||||||
export type ProfileEnv = {
|
export type ProfileEnv = {
|
||||||
OPENAI_BASE_URL?: string
|
OPENAI_BASE_URL?: string
|
||||||
@@ -95,12 +82,6 @@ export type ProfileEnv = {
|
|||||||
GEMINI_AUTH_MODE?: 'api-key' | 'access-token' | 'adc'
|
GEMINI_AUTH_MODE?: 'api-key' | 'access-token' | 'adc'
|
||||||
GEMINI_MODEL?: string
|
GEMINI_MODEL?: string
|
||||||
GEMINI_BASE_URL?: string
|
GEMINI_BASE_URL?: string
|
||||||
GOOGLE_API_KEY?: string
|
|
||||||
NVIDIA_NIM?: string
|
|
||||||
NVIDIA_API_KEY?: string
|
|
||||||
MINIMAX_API_KEY?: string
|
|
||||||
MINIMAX_BASE_URL?: string
|
|
||||||
MINIMAX_MODEL?: string
|
|
||||||
MISTRAL_BASE_URL?: string
|
MISTRAL_BASE_URL?: string
|
||||||
MISTRAL_API_KEY?: string
|
MISTRAL_API_KEY?: string
|
||||||
MISTRAL_MODEL?: string
|
MISTRAL_MODEL?: string
|
||||||
@@ -112,19 +93,6 @@ export type ProfileFile = {
|
|||||||
createdAt: string
|
createdAt: string
|
||||||
}
|
}
|
||||||
|
|
||||||
type SecretValueSource = Partial<
|
|
||||||
Record<
|
|
||||||
| 'OPENAI_API_KEY'
|
|
||||||
| 'CODEX_API_KEY'
|
|
||||||
| 'GEMINI_API_KEY'
|
|
||||||
| 'GOOGLE_API_KEY'
|
|
||||||
| 'NVIDIA_API_KEY'
|
|
||||||
| 'MINIMAX_API_KEY'
|
|
||||||
| 'MISTRAL_API_KEY',
|
|
||||||
string | undefined
|
|
||||||
>
|
|
||||||
>
|
|
||||||
|
|
||||||
type ProfileFileLocation = {
|
type ProfileFileLocation = {
|
||||||
cwd?: string
|
cwd?: string
|
||||||
filePath?: string
|
filePath?: string
|
||||||
@@ -145,8 +113,6 @@ export function isProviderProfile(value: unknown): value is ProviderProfile {
|
|||||||
value === 'codex' ||
|
value === 'codex' ||
|
||||||
value === 'gemini' ||
|
value === 'gemini' ||
|
||||||
value === 'atomic-chat' ||
|
value === 'atomic-chat' ||
|
||||||
value === 'nvidia-nim' ||
|
|
||||||
value === 'minimax' ||
|
|
||||||
value === 'mistral'
|
value === 'mistral'
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -177,67 +143,6 @@ export function buildAtomicChatProfileEnv(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function buildNvidiaNimProfileEnv(options: {
|
|
||||||
model?: string | null
|
|
||||||
baseUrl?: string | null
|
|
||||||
apiKey?: string | null
|
|
||||||
processEnv?: NodeJS.ProcessEnv
|
|
||||||
}): ProfileEnv | null {
|
|
||||||
const processEnv = options.processEnv ?? process.env
|
|
||||||
const key = sanitizeApiKey(options.apiKey ?? processEnv.NVIDIA_API_KEY)
|
|
||||||
if (!key) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
const defaultBaseUrl = 'https://integrate.api.nvidia.com/v1'
|
|
||||||
const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
|
|
||||||
|
|
||||||
return {
|
|
||||||
OPENAI_BASE_URL:
|
|
||||||
sanitizeProviderConfigValue(options.baseUrl, secretSource) ||
|
|
||||||
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
|
|
||||||
defaultBaseUrl,
|
|
||||||
OPENAI_MODEL:
|
|
||||||
sanitizeProviderConfigValue(options.model, secretSource) ||
|
|
||||||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
|
|
||||||
'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
OPENAI_API_KEY: key,
|
|
||||||
NVIDIA_NIM: '1',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export function buildMiniMaxProfileEnv(options: {
|
|
||||||
model?: string | null
|
|
||||||
baseUrl?: string | null
|
|
||||||
apiKey?: string | null
|
|
||||||
processEnv?: NodeJS.ProcessEnv
|
|
||||||
}): ProfileEnv | null {
|
|
||||||
const processEnv = options.processEnv ?? process.env
|
|
||||||
const key = sanitizeApiKey(options.apiKey ?? processEnv.MINIMAX_API_KEY)
|
|
||||||
if (!key) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
const defaultBaseUrl = 'https://api.minimax.io/v1'
|
|
||||||
const defaultModel = 'MiniMax-M2.5'
|
|
||||||
const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
|
|
||||||
|
|
||||||
return {
|
|
||||||
OPENAI_BASE_URL:
|
|
||||||
sanitizeProviderConfigValue(options.baseUrl, secretSource) ||
|
|
||||||
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
|
|
||||||
defaultBaseUrl,
|
|
||||||
OPENAI_MODEL:
|
|
||||||
sanitizeProviderConfigValue(options.model, secretSource) ||
|
|
||||||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
|
|
||||||
defaultModel,
|
|
||||||
OPENAI_API_KEY: key,
|
|
||||||
MINIMAX_API_KEY: key,
|
|
||||||
MINIMAX_BASE_URL: defaultBaseUrl,
|
|
||||||
MINIMAX_MODEL: defaultModel,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export function buildGeminiProfileEnv(options: {
|
export function buildGeminiProfileEnv(options: {
|
||||||
model?: string | null
|
model?: string | null
|
||||||
baseUrl?: string | null
|
baseUrl?: string | null
|
||||||
@@ -256,13 +161,15 @@ export function buildGeminiProfileEnv(options: {
|
|||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
|
|
||||||
const secretSource: SecretValueSource = key ? { GEMINI_API_KEY: key } : {}
|
|
||||||
|
|
||||||
const env: ProfileEnv = {
|
const env: ProfileEnv = {
|
||||||
GEMINI_AUTH_MODE: authMode,
|
GEMINI_AUTH_MODE: authMode,
|
||||||
GEMINI_MODEL:
|
GEMINI_MODEL:
|
||||||
sanitizeProviderConfigValue(options.model, secretSource) ||
|
sanitizeProviderConfigValue(options.model, { GEMINI_API_KEY: key }, processEnv) ||
|
||||||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) ||
|
sanitizeProviderConfigValue(
|
||||||
|
processEnv.GEMINI_MODEL,
|
||||||
|
{ GEMINI_API_KEY: key },
|
||||||
|
processEnv,
|
||||||
|
) ||
|
||||||
DEFAULT_GEMINI_MODEL,
|
DEFAULT_GEMINI_MODEL,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,8 +178,12 @@ export function buildGeminiProfileEnv(options: {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const baseUrl =
|
const baseUrl =
|
||||||
sanitizeProviderConfigValue(options.baseUrl, secretSource) ||
|
sanitizeProviderConfigValue(options.baseUrl, { GEMINI_API_KEY: key }, processEnv) ||
|
||||||
sanitizeProviderConfigValue(processEnv.GEMINI_BASE_URL, secretSource)
|
sanitizeProviderConfigValue(
|
||||||
|
processEnv.GEMINI_BASE_URL,
|
||||||
|
{ GEMINI_API_KEY: key },
|
||||||
|
processEnv,
|
||||||
|
)
|
||||||
if (baseUrl) {
|
if (baseUrl) {
|
||||||
env.GEMINI_BASE_URL = baseUrl
|
env.GEMINI_BASE_URL = baseUrl
|
||||||
}
|
}
|
||||||
@@ -294,14 +205,15 @@ export function buildOpenAIProfileEnv(options: {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const defaultModel = getGoalDefaultOpenAIModel(options.goal)
|
const defaultModel = getGoalDefaultOpenAIModel(options.goal)
|
||||||
const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
|
|
||||||
const shellOpenAIModel = sanitizeProviderConfigValue(
|
const shellOpenAIModel = sanitizeProviderConfigValue(
|
||||||
processEnv.OPENAI_MODEL,
|
processEnv.OPENAI_MODEL,
|
||||||
secretSource,
|
{ OPENAI_API_KEY: key },
|
||||||
|
processEnv,
|
||||||
)
|
)
|
||||||
const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
|
const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
|
||||||
processEnv.OPENAI_BASE_URL,
|
processEnv.OPENAI_BASE_URL,
|
||||||
secretSource,
|
{ OPENAI_API_KEY: key },
|
||||||
|
processEnv,
|
||||||
)
|
)
|
||||||
const shellOpenAIRequest = resolveProviderRequest({
|
const shellOpenAIRequest = resolveProviderRequest({
|
||||||
model: shellOpenAIModel,
|
model: shellOpenAIModel,
|
||||||
@@ -312,11 +224,19 @@ export function buildOpenAIProfileEnv(options: {
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
OPENAI_BASE_URL:
|
OPENAI_BASE_URL:
|
||||||
sanitizeProviderConfigValue(options.baseUrl, secretSource) ||
|
sanitizeProviderConfigValue(
|
||||||
|
options.baseUrl,
|
||||||
|
{ OPENAI_API_KEY: key },
|
||||||
|
processEnv,
|
||||||
|
) ||
|
||||||
(useShellOpenAIConfig ? shellOpenAIBaseUrl : undefined) ||
|
(useShellOpenAIConfig ? shellOpenAIBaseUrl : undefined) ||
|
||||||
DEFAULT_OPENAI_BASE_URL,
|
DEFAULT_OPENAI_BASE_URL,
|
||||||
OPENAI_MODEL:
|
OPENAI_MODEL:
|
||||||
sanitizeProviderConfigValue(options.model, secretSource) ||
|
sanitizeProviderConfigValue(
|
||||||
|
options.model,
|
||||||
|
{ OPENAI_API_KEY: key },
|
||||||
|
processEnv,
|
||||||
|
) ||
|
||||||
(useShellOpenAIConfig ? shellOpenAIModel : undefined) ||
|
(useShellOpenAIConfig ? shellOpenAIModel : undefined) ||
|
||||||
defaultModel,
|
defaultModel,
|
||||||
OPENAI_API_KEY: key,
|
OPENAI_API_KEY: key,
|
||||||
@@ -373,19 +293,21 @@ export function buildMistralProfileEnv(options: {
|
|||||||
const env: ProfileEnv = {
|
const env: ProfileEnv = {
|
||||||
MISTRAL_API_KEY: key,
|
MISTRAL_API_KEY: key,
|
||||||
MISTRAL_MODEL:
|
MISTRAL_MODEL:
|
||||||
sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }) ||
|
sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }, processEnv) ||
|
||||||
sanitizeProviderConfigValue(
|
sanitizeProviderConfigValue(
|
||||||
processEnv.MISTRAL_MODEL,
|
processEnv.MISTRAL_MODEL,
|
||||||
{ MISTRAL_API_KEY: key },
|
{ MISTRAL_API_KEY: key },
|
||||||
|
processEnv,
|
||||||
) ||
|
) ||
|
||||||
DEFAULT_MISTRAL_MODEL,
|
DEFAULT_MISTRAL_MODEL,
|
||||||
}
|
}
|
||||||
|
|
||||||
const baseUrl =
|
const baseUrl =
|
||||||
sanitizeProviderConfigValue(options.baseUrl, { MISTRAL_API_KEY: key }) ||
|
sanitizeProviderConfigValue(options.baseUrl, { MISTRAL_API_KEY: key }, processEnv) ||
|
||||||
sanitizeProviderConfigValue(
|
sanitizeProviderConfigValue(
|
||||||
processEnv.MISTRAL_BASE_URL,
|
processEnv.MISTRAL_BASE_URL,
|
||||||
{ MISTRAL_API_KEY: key },
|
{ MISTRAL_API_KEY: key },
|
||||||
|
processEnv,
|
||||||
)
|
)
|
||||||
if (baseUrl) {
|
if (baseUrl) {
|
||||||
env.MISTRAL_BASE_URL = baseUrl
|
env.MISTRAL_BASE_URL = baseUrl
|
||||||
@@ -501,13 +423,13 @@ export function hasExplicitProviderSelection(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_OPENAI) ||
|
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
|
||||||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB) ||
|
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
|
||||||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_GEMINI) ||
|
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
|
||||||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_MISTRAL) ||
|
processEnv.CLAUDE_CODE_USE_MISTRAL !== undefined ||
|
||||||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_BEDROCK) ||
|
processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined ||
|
||||||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_VERTEX) ||
|
processEnv.CLAUDE_CODE_USE_VERTEX !== undefined ||
|
||||||
isEnvTruthy(processEnv.CLAUDE_CODE_USE_FOUNDRY)
|
processEnv.CLAUDE_CODE_USE_FOUNDRY !== undefined
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -543,11 +465,11 @@ export async function buildLaunchEnv(options: {
|
|||||||
)
|
)
|
||||||
const shellOpenAIModel = sanitizeProviderConfigValue(
|
const shellOpenAIModel = sanitizeProviderConfigValue(
|
||||||
processEnv.OPENAI_MODEL,
|
processEnv.OPENAI_MODEL,
|
||||||
processEnv as SecretValueSource,
|
processEnv,
|
||||||
)
|
)
|
||||||
const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
|
const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
|
||||||
processEnv.OPENAI_BASE_URL,
|
processEnv.OPENAI_BASE_URL,
|
||||||
processEnv as SecretValueSource,
|
processEnv,
|
||||||
)
|
)
|
||||||
const persistedGeminiModel = sanitizeProviderConfigValue(
|
const persistedGeminiModel = sanitizeProviderConfigValue(
|
||||||
persistedEnv.GEMINI_MODEL,
|
persistedEnv.GEMINI_MODEL,
|
||||||
@@ -559,11 +481,11 @@ export async function buildLaunchEnv(options: {
|
|||||||
)
|
)
|
||||||
const shellGeminiModel = sanitizeProviderConfigValue(
|
const shellGeminiModel = sanitizeProviderConfigValue(
|
||||||
processEnv.GEMINI_MODEL,
|
processEnv.GEMINI_MODEL,
|
||||||
processEnv as SecretValueSource,
|
processEnv,
|
||||||
)
|
)
|
||||||
const shellGeminiBaseUrl = sanitizeProviderConfigValue(
|
const shellGeminiBaseUrl = sanitizeProviderConfigValue(
|
||||||
processEnv.GEMINI_BASE_URL,
|
processEnv.GEMINI_BASE_URL,
|
||||||
processEnv as SecretValueSource,
|
processEnv,
|
||||||
)
|
)
|
||||||
const shellGeminiAccessToken =
|
const shellGeminiAccessToken =
|
||||||
processEnv.GEMINI_ACCESS_TOKEN?.trim() || undefined
|
processEnv.GEMINI_ACCESS_TOKEN?.trim() || undefined
|
||||||
@@ -576,20 +498,6 @@ export async function buildLaunchEnv(options: {
|
|||||||
const persistedGeminiKey = sanitizeApiKey(persistedEnv.GEMINI_API_KEY)
|
const persistedGeminiKey = sanitizeApiKey(persistedEnv.GEMINI_API_KEY)
|
||||||
const persistedGeminiAuthMode = persistedEnv.GEMINI_AUTH_MODE
|
const persistedGeminiAuthMode = persistedEnv.GEMINI_AUTH_MODE
|
||||||
|
|
||||||
if (hasExplicitProviderSelection(processEnv)) {
|
|
||||||
for (let provider of PROVIDERS) {
|
|
||||||
if (provider === "anthropic") {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const env_key_name = `CLAUDE_CODE_USE_${provider.toUpperCase()}`
|
|
||||||
|
|
||||||
if (env_key_name in processEnv && isEnvTruthy(processEnv[env_key_name])) {
|
|
||||||
options.profile = provider;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (options.profile === 'gemini') {
|
if (options.profile === 'gemini') {
|
||||||
const env: NodeJS.ProcessEnv = {
|
const env: NodeJS.ProcessEnv = {
|
||||||
...processEnv,
|
...processEnv,
|
||||||
@@ -659,15 +567,19 @@ export async function buildLaunchEnv(options: {
|
|||||||
|
|
||||||
const shellMistralModel = sanitizeProviderConfigValue(
|
const shellMistralModel = sanitizeProviderConfigValue(
|
||||||
processEnv.MISTRAL_MODEL,
|
processEnv.MISTRAL_MODEL,
|
||||||
|
processEnv,
|
||||||
)
|
)
|
||||||
const persistedMistralModel = sanitizeProviderConfigValue(
|
const persistedMistralModel = sanitizeProviderConfigValue(
|
||||||
persistedEnv.MISTRAL_MODEL,
|
persistedEnv.MISTRAL_MODEL,
|
||||||
|
persistedEnv,
|
||||||
)
|
)
|
||||||
const shellMistralBaseUrl = sanitizeProviderConfigValue(
|
const shellMistralBaseUrl = sanitizeProviderConfigValue(
|
||||||
processEnv.MISTRAL_BASE_URL,
|
processEnv.MISTRAL_BASE_URL,
|
||||||
|
processEnv,
|
||||||
)
|
)
|
||||||
const persistedMistralBaseUrl = sanitizeProviderConfigValue(
|
const persistedMistralBaseUrl = sanitizeProviderConfigValue(
|
||||||
persistedEnv.MISTRAL_BASE_URL,
|
persistedEnv.MISTRAL_BASE_URL,
|
||||||
|
persistedEnv,
|
||||||
)
|
)
|
||||||
|
|
||||||
env.MISTRAL_MODEL =
|
env.MISTRAL_MODEL =
|
||||||
@@ -842,18 +754,12 @@ export async function buildStartupEnvFromProfile(options?: {
|
|||||||
const persisted = options?.persisted ?? loadProfileFile()
|
const persisted = options?.persisted ?? loadProfileFile()
|
||||||
|
|
||||||
// Saved /provider profiles should still win over provider-manager env that was
|
// Saved /provider profiles should still win over provider-manager env that was
|
||||||
// auto-applied during startup. Only an explicit shell/flag provider selection
|
// auto-applied during startup. Only explicit shell/flag provider selection
|
||||||
// should bypass the persisted startup profile.
|
// should bypass the persisted startup profile.
|
||||||
//
|
|
||||||
const profileManagedEnv = processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED === '1'
|
const profileManagedEnv = processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED === '1'
|
||||||
|
if (hasExplicitProviderSelection(processEnv) && !profileManagedEnv) {
|
||||||
// If the user explicitly selected a provider via env, allow it to bypass
|
return processEnv
|
||||||
// the persisted profile only when we can prove it was managed by the
|
}
|
||||||
// persisted profile env itself.
|
|
||||||
//
|
|
||||||
// Practically: on initial startup, provider routing env vars can already
|
|
||||||
// be present due to earlier auto-application steps. We should still apply
|
|
||||||
// the persisted profile rather than returning early.
|
|
||||||
|
|
||||||
if (!persisted) {
|
if (!persisted) {
|
||||||
return processEnv
|
return processEnv
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ const RESTORED_KEYS = [
|
|||||||
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID',
|
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID',
|
||||||
'CLAUDE_CODE_USE_OPENAI',
|
'CLAUDE_CODE_USE_OPENAI',
|
||||||
'CLAUDE_CODE_USE_GEMINI',
|
'CLAUDE_CODE_USE_GEMINI',
|
||||||
'CLAUDE_CODE_USE_MISTRAL',
|
|
||||||
'CLAUDE_CODE_USE_GITHUB',
|
'CLAUDE_CODE_USE_GITHUB',
|
||||||
'CLAUDE_CODE_USE_BEDROCK',
|
'CLAUDE_CODE_USE_BEDROCK',
|
||||||
'CLAUDE_CODE_USE_VERTEX',
|
'CLAUDE_CODE_USE_VERTEX',
|
||||||
@@ -25,15 +24,6 @@ const RESTORED_KEYS = [
|
|||||||
'ANTHROPIC_BASE_URL',
|
'ANTHROPIC_BASE_URL',
|
||||||
'ANTHROPIC_MODEL',
|
'ANTHROPIC_MODEL',
|
||||||
'ANTHROPIC_API_KEY',
|
'ANTHROPIC_API_KEY',
|
||||||
'GEMINI_BASE_URL',
|
|
||||||
'GEMINI_MODEL',
|
|
||||||
'GEMINI_API_KEY',
|
|
||||||
'GEMINI_AUTH_MODE',
|
|
||||||
'GEMINI_ACCESS_TOKEN',
|
|
||||||
'GOOGLE_API_KEY',
|
|
||||||
'MISTRAL_BASE_URL',
|
|
||||||
'MISTRAL_MODEL',
|
|
||||||
'MISTRAL_API_KEY',
|
|
||||||
] as const
|
] as const
|
||||||
|
|
||||||
type MockConfigState = {
|
type MockConfigState = {
|
||||||
@@ -108,24 +98,6 @@ function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function buildMistralProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
|
|
||||||
return buildProfile({
|
|
||||||
provider: 'mistral',
|
|
||||||
baseUrl: 'https://api.mistral.ai/v1',
|
|
||||||
model: 'devstral-latest',
|
|
||||||
...overrides,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
function buildGeminiProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
|
|
||||||
return buildProfile({
|
|
||||||
provider: 'gemini',
|
|
||||||
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
|
||||||
model: 'gemini-3-flash-preview',
|
|
||||||
...overrides,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('applyProviderProfileToProcessEnv', () => {
|
describe('applyProviderProfileToProcessEnv', () => {
|
||||||
test('openai profile clears competing gemini/github flags', async () => {
|
test('openai profile clears competing gemini/github flags', async () => {
|
||||||
const { applyProviderProfileToProcessEnv } =
|
const { applyProviderProfileToProcessEnv } =
|
||||||
@@ -146,36 +118,6 @@ describe('applyProviderProfileToProcessEnv', () => {
|
|||||||
expect(getFreshAPIProvider()).toBe('openai')
|
expect(getFreshAPIProvider()).toBe('openai')
|
||||||
})
|
})
|
||||||
|
|
||||||
test('mistral profile sets CLAUDE_CODE_USE_MISTRAL and clears openai flags', async () => {
|
|
||||||
const { applyProviderProfileToProcessEnv } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
|
|
||||||
applyProviderProfileToProcessEnv(buildMistralProfile())
|
|
||||||
const { getAPIProvider: getFreshAPIProvider } =
|
|
||||||
await importFreshProvidersModule()
|
|
||||||
|
|
||||||
expect(process.env.CLAUDE_CODE_USE_MISTRAL).toBe('1')
|
|
||||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
|
||||||
expect(process.env.MISTRAL_MODEL).toBe('devstral-latest')
|
|
||||||
expect(getFreshAPIProvider()).toBe('mistral')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('gemini profile sets CLAUDE_CODE_USE_GEMINI and clears openai flags', async () => {
|
|
||||||
const { applyProviderProfileToProcessEnv } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
||||||
|
|
||||||
applyProviderProfileToProcessEnv(buildGeminiProfile())
|
|
||||||
const { getAPIProvider: getFreshAPIProvider } =
|
|
||||||
await importFreshProvidersModule()
|
|
||||||
|
|
||||||
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBe('1')
|
|
||||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
|
||||||
expect(process.env.GEMINI_MODEL).toBe('gemini-3-flash-preview')
|
|
||||||
expect(getFreshAPIProvider()).toBe('gemini')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('anthropic profile clears competing gemini/github flags', async () => {
|
test('anthropic profile clears competing gemini/github flags', async () => {
|
||||||
const { applyProviderProfileToProcessEnv } =
|
const { applyProviderProfileToProcessEnv } =
|
||||||
await importFreshProviderProfileModules()
|
await importFreshProviderProfileModules()
|
||||||
@@ -197,39 +139,6 @@ describe('applyProviderProfileToProcessEnv', () => {
|
|||||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||||
expect(getFreshAPIProvider()).toBe('firstParty')
|
expect(getFreshAPIProvider()).toBe('firstParty')
|
||||||
})
|
})
|
||||||
|
|
||||||
test('openai profile with multi-model string sets only first model in OPENAI_MODEL', async () => {
|
|
||||||
const { applyProviderProfileToProcessEnv } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
|
|
||||||
applyProviderProfileToProcessEnv(
|
|
||||||
buildProfile({
|
|
||||||
provider: 'openai',
|
|
||||||
baseUrl: 'https://api.openai.com/v1',
|
|
||||||
model: 'glm-4.7, glm-4.7-flash, glm-4.7-plus',
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(process.env.OPENAI_MODEL).toBe('glm-4.7')
|
|
||||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
|
||||||
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('anthropic profile with multi-model string sets only first model in ANTHROPIC_MODEL', async () => {
|
|
||||||
const { applyProviderProfileToProcessEnv } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
|
|
||||||
applyProviderProfileToProcessEnv(
|
|
||||||
buildProfile({
|
|
||||||
provider: 'anthropic',
|
|
||||||
baseUrl: 'https://api.anthropic.com',
|
|
||||||
model: 'claude-sonnet-4-6, claude-opus-4-6',
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(process.env.ANTHROPIC_MODEL).toBe('claude-sonnet-4-6')
|
|
||||||
expect(process.env.ANTHROPIC_BASE_URL).toBe('https://api.anthropic.com')
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('applyActiveProviderProfileFromConfig', () => {
|
describe('applyActiveProviderProfileFromConfig', () => {
|
||||||
@@ -452,169 +361,6 @@ describe('getProviderPresetDefaults', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('setActiveProviderProfile', () => {
|
|
||||||
test('sets OPENAI_MODEL env var when switching to an openai-type provider', async () => {
|
|
||||||
const { setActiveProviderProfile } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
const openaiProfile = buildProfile({
|
|
||||||
id: 'openai_prof',
|
|
||||||
name: 'OpenAI Provider',
|
|
||||||
provider: 'openai',
|
|
||||||
baseUrl: 'https://api.openai.com/v1',
|
|
||||||
model: 'gpt-4o',
|
|
||||||
})
|
|
||||||
|
|
||||||
saveMockGlobalConfig(current => ({
|
|
||||||
...current,
|
|
||||||
providerProfiles: [openaiProfile],
|
|
||||||
}))
|
|
||||||
|
|
||||||
const result = setActiveProviderProfile('openai_prof')
|
|
||||||
|
|
||||||
expect(result?.id).toBe('openai_prof')
|
|
||||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
|
||||||
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
|
|
||||||
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
|
|
||||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe(
|
|
||||||
'openai_prof',
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('sets ANTHROPIC_MODEL env var when switching to an anthropic-type provider', async () => {
|
|
||||||
const { setActiveProviderProfile } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
const anthropicProfile = buildProfile({
|
|
||||||
id: 'anthro_prof',
|
|
||||||
name: 'Anthropic Provider',
|
|
||||||
provider: 'anthropic',
|
|
||||||
baseUrl: 'https://api.anthropic.com',
|
|
||||||
model: 'claude-sonnet-4-6',
|
|
||||||
})
|
|
||||||
|
|
||||||
saveMockGlobalConfig(current => ({
|
|
||||||
...current,
|
|
||||||
providerProfiles: [anthropicProfile],
|
|
||||||
}))
|
|
||||||
|
|
||||||
const result = setActiveProviderProfile('anthro_prof')
|
|
||||||
|
|
||||||
expect(result?.id).toBe('anthro_prof')
|
|
||||||
expect(process.env.ANTHROPIC_MODEL).toBe('claude-sonnet-4-6')
|
|
||||||
expect(process.env.ANTHROPIC_BASE_URL).toBe('https://api.anthropic.com')
|
|
||||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
|
||||||
expect(process.env.OPENAI_MODEL).toBeUndefined()
|
|
||||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe(
|
|
||||||
'anthro_prof',
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('clears openai model env and sets anthropic model env when switching from openai to anthropic provider', async () => {
|
|
||||||
const { setActiveProviderProfile } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
const openaiProfile = buildProfile({
|
|
||||||
id: 'openai_prof',
|
|
||||||
name: 'OpenAI Provider',
|
|
||||||
provider: 'openai',
|
|
||||||
baseUrl: 'https://api.openai.com/v1',
|
|
||||||
model: 'gpt-4o',
|
|
||||||
apiKey: 'sk-openai-key',
|
|
||||||
})
|
|
||||||
const anthropicProfile = buildProfile({
|
|
||||||
id: 'anthro_prof',
|
|
||||||
name: 'Anthropic Provider',
|
|
||||||
provider: 'anthropic',
|
|
||||||
baseUrl: 'https://api.anthropic.com',
|
|
||||||
model: 'claude-sonnet-4-6',
|
|
||||||
apiKey: 'sk-ant-key',
|
|
||||||
})
|
|
||||||
|
|
||||||
saveMockGlobalConfig(current => ({
|
|
||||||
...current,
|
|
||||||
providerProfiles: [openaiProfile, anthropicProfile],
|
|
||||||
}))
|
|
||||||
|
|
||||||
// First activate the openai profile
|
|
||||||
setActiveProviderProfile('openai_prof')
|
|
||||||
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
|
|
||||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
|
||||||
|
|
||||||
// Now switch to the anthropic profile
|
|
||||||
const result = setActiveProviderProfile('anthro_prof')
|
|
||||||
|
|
||||||
expect(result?.id).toBe('anthro_prof')
|
|
||||||
expect(process.env.ANTHROPIC_MODEL).toBe('claude-sonnet-4-6')
|
|
||||||
expect(process.env.ANTHROPIC_BASE_URL).toBe('https://api.anthropic.com')
|
|
||||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
|
||||||
expect(process.env.OPENAI_MODEL).toBeUndefined()
|
|
||||||
expect(process.env.OPENAI_BASE_URL).toBeUndefined()
|
|
||||||
expect(process.env.OPENAI_API_KEY).toBeUndefined()
|
|
||||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe(
|
|
||||||
'anthro_prof',
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('clears anthropic model env and sets openai model env when switching from anthropic to openai provider', async () => {
|
|
||||||
const { setActiveProviderProfile } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
const anthropicProfile = buildProfile({
|
|
||||||
id: 'anthro_prof',
|
|
||||||
name: 'Anthropic Provider',
|
|
||||||
provider: 'anthropic',
|
|
||||||
baseUrl: 'https://api.anthropic.com',
|
|
||||||
model: 'claude-sonnet-4-6',
|
|
||||||
apiKey: 'sk-ant-key',
|
|
||||||
})
|
|
||||||
const openaiProfile = buildProfile({
|
|
||||||
id: 'openai_prof',
|
|
||||||
name: 'OpenAI Provider',
|
|
||||||
provider: 'openai',
|
|
||||||
baseUrl: 'https://api.openai.com/v1',
|
|
||||||
model: 'gpt-4o',
|
|
||||||
apiKey: 'sk-openai-key',
|
|
||||||
})
|
|
||||||
|
|
||||||
saveMockGlobalConfig(current => ({
|
|
||||||
...current,
|
|
||||||
providerProfiles: [anthropicProfile, openaiProfile],
|
|
||||||
}))
|
|
||||||
|
|
||||||
// First activate the anthropic profile
|
|
||||||
setActiveProviderProfile('anthro_prof')
|
|
||||||
expect(process.env.ANTHROPIC_MODEL).toBe('claude-sonnet-4-6')
|
|
||||||
expect(process.env.ANTHROPIC_BASE_URL).toBe('https://api.anthropic.com')
|
|
||||||
|
|
||||||
// Now switch to the openai profile
|
|
||||||
const result = setActiveProviderProfile('openai_prof')
|
|
||||||
|
|
||||||
expect(result?.id).toBe('openai_prof')
|
|
||||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
|
||||||
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
|
|
||||||
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
|
|
||||||
// ANTHROPIC_MODEL is set to the profile model for all provider types
|
|
||||||
expect(process.env.ANTHROPIC_MODEL).toBe('gpt-4o')
|
|
||||||
expect(process.env.ANTHROPIC_BASE_URL).toBeUndefined()
|
|
||||||
expect(process.env.ANTHROPIC_API_KEY).toBeUndefined()
|
|
||||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe(
|
|
||||||
'openai_prof',
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns null for non-existent profile id', async () => {
|
|
||||||
const { setActiveProviderProfile } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
const openaiProfile = buildProfile({ id: 'existing_prof' })
|
|
||||||
|
|
||||||
saveMockGlobalConfig(current => ({
|
|
||||||
...current,
|
|
||||||
providerProfiles: [openaiProfile],
|
|
||||||
}))
|
|
||||||
|
|
||||||
const result = setActiveProviderProfile('nonexistent_prof')
|
|
||||||
|
|
||||||
expect(result).toBeNull()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('deleteProviderProfile', () => {
|
describe('deleteProviderProfile', () => {
|
||||||
test('deleting final profile clears provider env when active profile applied it', async () => {
|
test('deleting final profile clears provider env when active profile applied it', async () => {
|
||||||
const {
|
const {
|
||||||
@@ -683,82 +429,3 @@ describe('deleteProviderProfile', () => {
|
|||||||
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
|
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('getProfileModelOptions', () => {
|
|
||||||
test('generates options for multi-model profile', async () => {
|
|
||||||
const { getProfileModelOptions } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
|
|
||||||
const options = getProfileModelOptions(
|
|
||||||
buildProfile({
|
|
||||||
name: 'Test Provider',
|
|
||||||
model: 'glm-4.7, glm-4.7-flash, glm-4.7-plus',
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(options).toEqual([
|
|
||||||
{ value: 'glm-4.7', label: 'glm-4.7', description: 'Provider: Test Provider' },
|
|
||||||
{ value: 'glm-4.7-flash', label: 'glm-4.7-flash', description: 'Provider: Test Provider' },
|
|
||||||
{ value: 'glm-4.7-plus', label: 'glm-4.7-plus', description: 'Provider: Test Provider' },
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns single option for single-model profile', async () => {
|
|
||||||
const { getProfileModelOptions } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
|
|
||||||
const options = getProfileModelOptions(
|
|
||||||
buildProfile({
|
|
||||||
name: 'Single Model',
|
|
||||||
model: 'llama3.1:8b',
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(options).toEqual([
|
|
||||||
{ value: 'llama3.1:8b', label: 'llama3.1:8b', description: 'Provider: Single Model' },
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
test('returns empty array for empty model field', async () => {
|
|
||||||
const { getProfileModelOptions } =
|
|
||||||
await importFreshProviderProfileModules()
|
|
||||||
|
|
||||||
const options = getProfileModelOptions(
|
|
||||||
buildProfile({
|
|
||||||
name: 'Empty',
|
|
||||||
model: '',
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(options).toEqual([])
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('setActiveProviderProfile model cache', () => {
|
|
||||||
test('populates model cache with all models from multi-model profile on activation', async () => {
|
|
||||||
const {
|
|
||||||
setActiveProviderProfile,
|
|
||||||
getActiveOpenAIModelOptionsCache,
|
|
||||||
} = await importFreshProviderProfileModules()
|
|
||||||
|
|
||||||
mockConfigState = {
|
|
||||||
...createMockConfigState(),
|
|
||||||
providerProfiles: [
|
|
||||||
buildProfile({
|
|
||||||
id: 'multi_provider',
|
|
||||||
name: 'Multi Provider',
|
|
||||||
model: 'glm-4.7, glm-4.7-flash, glm-4.7-plus',
|
|
||||||
baseUrl: 'https://api.example.com/v1',
|
|
||||||
}),
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
setActiveProviderProfile('multi_provider')
|
|
||||||
|
|
||||||
const cache = getActiveOpenAIModelOptionsCache()
|
|
||||||
const cacheValues = cache.map(opt => opt.value)
|
|
||||||
expect(cacheValues).toContain('glm-4.7')
|
|
||||||
expect(cacheValues).toContain('glm-4.7-flash')
|
|
||||||
expect(cacheValues).toContain('glm-4.7-plus')
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|||||||
@@ -5,15 +5,6 @@ import {
|
|||||||
type ProviderProfile,
|
type ProviderProfile,
|
||||||
} from './config.js'
|
} from './config.js'
|
||||||
import type { ModelOption } from './model/modelOptions.js'
|
import type { ModelOption } from './model/modelOptions.js'
|
||||||
import { getPrimaryModel, parseModelList } from './providerModels.js'
|
|
||||||
import {
|
|
||||||
createProfileFile,
|
|
||||||
saveProfileFile,
|
|
||||||
buildGeminiProfileEnv,
|
|
||||||
buildMistralProfileEnv,
|
|
||||||
buildOpenAIProfileEnv,
|
|
||||||
type ProviderProfile as ProviderProfileStartup,
|
|
||||||
} from './providerProfile.js'
|
|
||||||
|
|
||||||
export type ProviderPreset =
|
export type ProviderPreset =
|
||||||
| 'anthropic'
|
| 'anthropic'
|
||||||
@@ -28,11 +19,7 @@ export type ProviderPreset =
|
|||||||
| 'azure-openai'
|
| 'azure-openai'
|
||||||
| 'openrouter'
|
| 'openrouter'
|
||||||
| 'lmstudio'
|
| 'lmstudio'
|
||||||
| 'dashscope-cn'
|
|
||||||
| 'dashscope-intl'
|
|
||||||
| 'custom'
|
| 'custom'
|
||||||
| 'nvidia-nim'
|
|
||||||
| 'minimax'
|
|
||||||
|
|
||||||
export type ProviderProfileInput = {
|
export type ProviderProfileInput = {
|
||||||
provider?: ProviderProfile['provider']
|
provider?: ProviderProfile['provider']
|
||||||
@@ -68,14 +55,7 @@ function normalizeBaseUrl(value: string): string {
|
|||||||
function sanitizeProfile(profile: ProviderProfile): ProviderProfile | null {
|
function sanitizeProfile(profile: ProviderProfile): ProviderProfile | null {
|
||||||
const id = trimValue(profile.id)
|
const id = trimValue(profile.id)
|
||||||
const name = trimValue(profile.name)
|
const name = trimValue(profile.name)
|
||||||
const provider =
|
const provider = profile.provider === 'anthropic' ? 'anthropic' : 'openai'
|
||||||
profile.provider === 'anthropic'
|
|
||||||
? 'anthropic'
|
|
||||||
: profile.provider === 'mistral'
|
|
||||||
? 'mistral'
|
|
||||||
: profile.provider === 'gemini'
|
|
||||||
? 'gemini'
|
|
||||||
: 'openai'
|
|
||||||
const baseUrl = normalizeBaseUrl(profile.baseUrl)
|
const baseUrl = normalizeBaseUrl(profile.baseUrl)
|
||||||
const model = trimValue(profile.model)
|
const model = trimValue(profile.model)
|
||||||
|
|
||||||
@@ -176,7 +156,7 @@ export function getProviderPresetDefaults(
|
|||||||
}
|
}
|
||||||
case 'gemini':
|
case 'gemini':
|
||||||
return {
|
return {
|
||||||
provider: 'gemini',
|
provider: 'openai',
|
||||||
name: 'Google Gemini',
|
name: 'Google Gemini',
|
||||||
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
||||||
model: 'gemini-3-flash-preview',
|
model: 'gemini-3-flash-preview',
|
||||||
@@ -185,7 +165,7 @@ export function getProviderPresetDefaults(
|
|||||||
}
|
}
|
||||||
case 'mistral':
|
case 'mistral':
|
||||||
return {
|
return {
|
||||||
provider: 'mistral',
|
provider: 'openai',
|
||||||
name: 'Mistral',
|
name: 'Mistral',
|
||||||
baseUrl: 'https://api.mistral.ai/v1',
|
baseUrl: 'https://api.mistral.ai/v1',
|
||||||
model: 'devstral-latest',
|
model: 'devstral-latest',
|
||||||
@@ -237,24 +217,6 @@ export function getProviderPresetDefaults(
|
|||||||
apiKey: '',
|
apiKey: '',
|
||||||
requiresApiKey: false,
|
requiresApiKey: false,
|
||||||
}
|
}
|
||||||
case 'dashscope-cn':
|
|
||||||
return {
|
|
||||||
provider: 'openai',
|
|
||||||
name: 'Alibaba Coding Plan (China)',
|
|
||||||
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
|
|
||||||
model: 'qwen3.6-plus',
|
|
||||||
apiKey: process.env.DASHSCOPE_API_KEY ?? '',
|
|
||||||
requiresApiKey: true,
|
|
||||||
}
|
|
||||||
case 'dashscope-intl':
|
|
||||||
return {
|
|
||||||
provider: 'openai',
|
|
||||||
name: 'Alibaba Coding Plan',
|
|
||||||
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
|
|
||||||
model: 'qwen3.6-plus',
|
|
||||||
apiKey: process.env.DASHSCOPE_API_KEY ?? '',
|
|
||||||
requiresApiKey: true,
|
|
||||||
}
|
|
||||||
case 'custom':
|
case 'custom':
|
||||||
return {
|
return {
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
@@ -267,24 +229,6 @@ export function getProviderPresetDefaults(
|
|||||||
apiKey: process.env.OPENAI_API_KEY ?? '',
|
apiKey: process.env.OPENAI_API_KEY ?? '',
|
||||||
requiresApiKey: false,
|
requiresApiKey: false,
|
||||||
}
|
}
|
||||||
case 'nvidia-nim':
|
|
||||||
return {
|
|
||||||
provider: 'openai',
|
|
||||||
name: 'NVIDIA NIM',
|
|
||||||
baseUrl: 'https://integrate.api.nvidia.com/v1',
|
|
||||||
model: 'nvidia/llama-3.1-nemotron-70b-instruct',
|
|
||||||
apiKey: process.env.NVIDIA_API_KEY ?? '',
|
|
||||||
requiresApiKey: true,
|
|
||||||
}
|
|
||||||
case 'minimax':
|
|
||||||
return {
|
|
||||||
provider: 'openai',
|
|
||||||
name: 'MiniMax',
|
|
||||||
baseUrl: 'https://api.minimax.io/v1',
|
|
||||||
model: 'MiniMax-M2.5',
|
|
||||||
apiKey: process.env.MINIMAX_API_KEY ?? '',
|
|
||||||
requiresApiKey: true,
|
|
||||||
}
|
|
||||||
case 'ollama':
|
case 'ollama':
|
||||||
default:
|
default:
|
||||||
return {
|
return {
|
||||||
@@ -332,7 +276,6 @@ function hasConflictingProviderFlagsForProfile(
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
|
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
|
||||||
processEnv.CLAUDE_CODE_USE_MISTRAL !== undefined ||
|
|
||||||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
|
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
|
||||||
processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined ||
|
processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined ||
|
||||||
processEnv.CLAUDE_CODE_USE_VERTEX !== undefined ||
|
processEnv.CLAUDE_CODE_USE_VERTEX !== undefined ||
|
||||||
@@ -368,44 +311,12 @@ function isProcessEnvAlignedWithProfile(
|
|||||||
return (
|
return (
|
||||||
!hasProviderSelectionFlags(processEnv) &&
|
!hasProviderSelectionFlags(processEnv) &&
|
||||||
sameOptionalEnvValue(processEnv.ANTHROPIC_BASE_URL, profile.baseUrl) &&
|
sameOptionalEnvValue(processEnv.ANTHROPIC_BASE_URL, profile.baseUrl) &&
|
||||||
sameOptionalEnvValue(processEnv.ANTHROPIC_MODEL, getPrimaryModel(profile.model)) &&
|
sameOptionalEnvValue(processEnv.ANTHROPIC_MODEL, profile.model) &&
|
||||||
(!includeApiKey ||
|
(!includeApiKey ||
|
||||||
sameOptionalEnvValue(processEnv.ANTHROPIC_API_KEY, profile.apiKey))
|
sameOptionalEnvValue(processEnv.ANTHROPIC_API_KEY, profile.apiKey))
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (profile.provider === 'mistral') {
|
|
||||||
return (
|
|
||||||
processEnv.CLAUDE_CODE_USE_MISTRAL !== undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_GEMINI === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_OPENAI === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_GITHUB === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_BEDROCK === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
|
|
||||||
sameOptionalEnvValue(processEnv.MISTRAL_BASE_URL, profile.baseUrl) &&
|
|
||||||
sameOptionalEnvValue(processEnv.MISTRAL_MODEL, profile.model) &&
|
|
||||||
(!includeApiKey ||
|
|
||||||
sameOptionalEnvValue(processEnv.MISTRAL_API_KEY, profile.apiKey))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (profile.provider === 'gemini') {
|
|
||||||
return (
|
|
||||||
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_MISTRAL === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_OPENAI === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_GITHUB === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_BEDROCK === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
|
|
||||||
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
|
|
||||||
sameOptionalEnvValue(processEnv.GEMINI_BASE_URL, profile.baseUrl) &&
|
|
||||||
sameOptionalEnvValue(processEnv.GEMINI_MODEL, profile.model) &&
|
|
||||||
(!includeApiKey ||
|
|
||||||
sameOptionalEnvValue(processEnv.GEMINI_API_KEY, profile.apiKey))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined &&
|
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined &&
|
||||||
processEnv.CLAUDE_CODE_USE_GEMINI === undefined &&
|
processEnv.CLAUDE_CODE_USE_GEMINI === undefined &&
|
||||||
@@ -415,7 +326,7 @@ function isProcessEnvAlignedWithProfile(
|
|||||||
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
|
processEnv.CLAUDE_CODE_USE_VERTEX === undefined &&
|
||||||
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
|
processEnv.CLAUDE_CODE_USE_FOUNDRY === undefined &&
|
||||||
sameOptionalEnvValue(processEnv.OPENAI_BASE_URL, profile.baseUrl) &&
|
sameOptionalEnvValue(processEnv.OPENAI_BASE_URL, profile.baseUrl) &&
|
||||||
sameOptionalEnvValue(processEnv.OPENAI_MODEL, getPrimaryModel(profile.model)) &&
|
sameOptionalEnvValue(processEnv.OPENAI_MODEL, profile.model) &&
|
||||||
(!includeApiKey ||
|
(!includeApiKey ||
|
||||||
sameOptionalEnvValue(processEnv.OPENAI_API_KEY, profile.apiKey))
|
sameOptionalEnvValue(processEnv.OPENAI_API_KEY, profile.apiKey))
|
||||||
)
|
)
|
||||||
@@ -454,22 +365,6 @@ export function clearProviderProfileEnvFromProcessEnv(
|
|||||||
delete processEnv.ANTHROPIC_API_KEY
|
delete processEnv.ANTHROPIC_API_KEY
|
||||||
delete processEnv[PROFILE_ENV_APPLIED_FLAG]
|
delete processEnv[PROFILE_ENV_APPLIED_FLAG]
|
||||||
delete processEnv[PROFILE_ENV_APPLIED_ID]
|
delete processEnv[PROFILE_ENV_APPLIED_ID]
|
||||||
|
|
||||||
delete processEnv.GEMINI_MODEL
|
|
||||||
delete processEnv.GEMINI_BASE_URL
|
|
||||||
delete processEnv.GEMINI_API_KEY
|
|
||||||
delete processEnv.GEMINI_AUTH_MODE
|
|
||||||
delete processEnv.GEMINI_ACCESS_TOKEN
|
|
||||||
delete processEnv.GOOGLE_API_KEY
|
|
||||||
|
|
||||||
delete processEnv.MISTRAL_MODEL
|
|
||||||
delete processEnv.MISTRAL_BASE_URL
|
|
||||||
delete processEnv.MISTRAL_API_KEY
|
|
||||||
|
|
||||||
// Clear provider-specific API keys
|
|
||||||
delete processEnv.MINIMAX_API_KEY
|
|
||||||
delete processEnv.NVIDIA_API_KEY
|
|
||||||
delete processEnv.NVIDIA_NIM
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void {
|
export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void {
|
||||||
@@ -477,7 +372,7 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
|
|||||||
process.env[PROFILE_ENV_APPLIED_FLAG] = '1'
|
process.env[PROFILE_ENV_APPLIED_FLAG] = '1'
|
||||||
process.env[PROFILE_ENV_APPLIED_ID] = profile.id
|
process.env[PROFILE_ENV_APPLIED_ID] = profile.id
|
||||||
|
|
||||||
process.env.ANTHROPIC_MODEL = getPrimaryModel(profile.model)
|
process.env.ANTHROPIC_MODEL = profile.model
|
||||||
if (profile.provider === 'anthropic') {
|
if (profile.provider === 'anthropic') {
|
||||||
process.env.ANTHROPIC_BASE_URL = profile.baseUrl
|
process.env.ANTHROPIC_BASE_URL = profile.baseUrl
|
||||||
|
|
||||||
@@ -494,54 +389,12 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if (profile.provider === 'mistral') {
|
|
||||||
process.env.CLAUDE_CODE_USE_MISTRAL = '1'
|
|
||||||
process.env.MISTRAL_BASE_URL = profile.baseUrl
|
|
||||||
process.env.MISTRAL_MODEL = profile.model
|
|
||||||
|
|
||||||
if (profile.apiKey) {
|
|
||||||
process.env.MISTRAL_API_KEY = profile.apiKey
|
|
||||||
} else {
|
|
||||||
delete process.env.MISTRAL_API_KEY
|
|
||||||
}
|
|
||||||
|
|
||||||
delete process.env.OPENAI_BASE_URL
|
|
||||||
delete process.env.OPENAI_API_KEY
|
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (profile.provider === 'gemini') {
|
|
||||||
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
|
||||||
process.env.GEMINI_BASE_URL = profile.baseUrl
|
|
||||||
process.env.GEMINI_MODEL = profile.model
|
|
||||||
|
|
||||||
if (profile.apiKey) {
|
|
||||||
process.env.GEMINI_API_KEY = profile.apiKey
|
|
||||||
} else {
|
|
||||||
delete process.env.GEMINI_API_KEY
|
|
||||||
}
|
|
||||||
|
|
||||||
delete process.env.OPENAI_BASE_URL
|
|
||||||
delete process.env.OPENAI_API_KEY
|
|
||||||
delete process.env.OPENAI_MODEL
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||||
process.env.OPENAI_BASE_URL = profile.baseUrl
|
process.env.OPENAI_BASE_URL = profile.baseUrl
|
||||||
process.env.OPENAI_MODEL = getPrimaryModel(profile.model)
|
process.env.OPENAI_MODEL = profile.model
|
||||||
|
|
||||||
if (profile.apiKey) {
|
if (profile.apiKey) {
|
||||||
process.env.OPENAI_API_KEY = profile.apiKey
|
process.env.OPENAI_API_KEY = profile.apiKey
|
||||||
// Also set provider-specific API keys for detection
|
|
||||||
const baseUrl = profile.baseUrl.toLowerCase()
|
|
||||||
if (baseUrl.includes('minimax')) {
|
|
||||||
process.env.MINIMAX_API_KEY = profile.apiKey
|
|
||||||
}
|
|
||||||
if (baseUrl.includes('nvidia') || baseUrl.includes('integrate.api.nvidia')) {
|
|
||||||
process.env.NVIDIA_API_KEY = profile.apiKey
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
delete process.env.OPENAI_API_KEY
|
delete process.env.OPENAI_API_KEY
|
||||||
}
|
}
|
||||||
@@ -613,7 +466,7 @@ export function addProviderProfile(
|
|||||||
|
|
||||||
const activeProfile = getActiveProviderProfile()
|
const activeProfile = getActiveProviderProfile()
|
||||||
if (activeProfile?.id === profile.id) {
|
if (activeProfile?.id === profile.id) {
|
||||||
setActiveProviderProfile(profile.id)
|
applyProviderProfileToProcessEnv(profile)
|
||||||
clearActiveOpenAIModelOptionsCache()
|
clearActiveOpenAIModelOptionsCache()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -695,16 +548,6 @@ export function persistActiveProviderProfileModel(
|
|||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the model is already part of the profile's model list, don't
|
|
||||||
// overwrite the field. This preserves comma-separated model lists like
|
|
||||||
// "glm-4.5, glm-4.7". Switching between models in the list is a
|
|
||||||
// session-level choice handled by mainLoopModelOverride, not a profile
|
|
||||||
// edit — the profile's model list should only change via explicit edit.
|
|
||||||
const existingModels = parseModelList(activeProfile.model)
|
|
||||||
if (existingModels.includes(nextModel)) {
|
|
||||||
return activeProfile
|
|
||||||
}
|
|
||||||
|
|
||||||
saveGlobalConfig(current => {
|
saveGlobalConfig(current => {
|
||||||
const currentProfiles = getProviderProfiles(current)
|
const currentProfiles = getProviderProfiles(current)
|
||||||
const profileIndex = currentProfiles.findIndex(
|
const profileIndex = currentProfiles.findIndex(
|
||||||
@@ -747,23 +590,6 @@ export function persistActiveProviderProfileModel(
|
|||||||
return resolvedProfile
|
return resolvedProfile
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate model options from a provider profile's model field.
|
|
||||||
* Each comma-separated model becomes a separate option in the picker.
|
|
||||||
*/
|
|
||||||
export function getProfileModelOptions(profile: ProviderProfile): ModelOption[] {
|
|
||||||
const models = parseModelList(profile.model)
|
|
||||||
if (models.length === 0) {
|
|
||||||
return []
|
|
||||||
}
|
|
||||||
|
|
||||||
return models.map(model => ({
|
|
||||||
value: model,
|
|
||||||
label: model,
|
|
||||||
description: `Provider: ${profile.name}`,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
export function setActiveProviderProfile(
|
export function setActiveProviderProfile(
|
||||||
profileId: string,
|
profileId: string,
|
||||||
): ProviderProfile | null {
|
): ProviderProfile | null {
|
||||||
@@ -775,85 +601,13 @@ export function setActiveProviderProfile(
|
|||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
|
|
||||||
const profileModelOptions = getProfileModelOptions(activeProfile)
|
|
||||||
|
|
||||||
saveGlobalConfig(config => ({
|
saveGlobalConfig(config => ({
|
||||||
...config,
|
...config,
|
||||||
activeProviderProfileId: profileId,
|
activeProviderProfileId: profileId,
|
||||||
openaiAdditionalModelOptionsCache: profileModelOptions.length > 0
|
openaiAdditionalModelOptionsCache: getModelCacheByProfile(profileId, config),
|
||||||
? profileModelOptions
|
|
||||||
: getModelCacheByProfile(profileId, config),
|
|
||||||
openaiAdditionalModelOptionsCacheByProfile: {
|
|
||||||
...(config.openaiAdditionalModelOptionsCacheByProfile ?? {}),
|
|
||||||
[profileId]: profileModelOptions.length > 0
|
|
||||||
? profileModelOptions
|
|
||||||
: (config.openaiAdditionalModelOptionsCacheByProfile?.[profileId] ?? []),
|
|
||||||
},
|
|
||||||
}))
|
}))
|
||||||
|
|
||||||
applyProviderProfileToProcessEnv(activeProfile)
|
applyProviderProfileToProcessEnv(activeProfile)
|
||||||
|
|
||||||
// Keep startup persisted provider profile in sync so initial startup
|
|
||||||
// uses the selected provider/model.
|
|
||||||
const persistedProfile = (() => {
|
|
||||||
if (activeProfile.provider === 'anthropic') return 'openai' as const
|
|
||||||
return activeProfile.provider
|
|
||||||
})()
|
|
||||||
|
|
||||||
const profileEnv = (() => {
|
|
||||||
switch (activeProfile.provider) {
|
|
||||||
case 'gemini':
|
|
||||||
return (
|
|
||||||
buildGeminiProfileEnv({
|
|
||||||
model: activeProfile.model,
|
|
||||||
baseUrl: activeProfile.baseUrl,
|
|
||||||
apiKey: activeProfile.apiKey,
|
|
||||||
authMode: 'api-key',
|
|
||||||
processEnv: process.env,
|
|
||||||
}) ?? null
|
|
||||||
)
|
|
||||||
case 'mistral':
|
|
||||||
return (
|
|
||||||
buildMistralProfileEnv({
|
|
||||||
model: activeProfile.model,
|
|
||||||
baseUrl: activeProfile.baseUrl,
|
|
||||||
apiKey: activeProfile.apiKey,
|
|
||||||
processEnv: process.env,
|
|
||||||
}) ?? null
|
|
||||||
)
|
|
||||||
default:
|
|
||||||
// anthropic and all openai-compatible providers
|
|
||||||
return (
|
|
||||||
buildOpenAIProfileEnv({
|
|
||||||
model: activeProfile.model,
|
|
||||||
baseUrl: activeProfile.baseUrl,
|
|
||||||
apiKey: activeProfile.apiKey,
|
|
||||||
processEnv: process.env,
|
|
||||||
}) ?? null
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})()
|
|
||||||
|
|
||||||
if (profileEnv) {
|
|
||||||
const startupProfile =
|
|
||||||
activeProfile.provider === 'anthropic'
|
|
||||||
? ({
|
|
||||||
profile: 'openai' as ProviderProfileStartup,
|
|
||||||
env: {
|
|
||||||
OPENAI_BASE_URL: activeProfile.baseUrl,
|
|
||||||
OPENAI_MODEL: activeProfile.model,
|
|
||||||
OPENAI_API_KEY: activeProfile.apiKey,
|
|
||||||
},
|
|
||||||
} as const)
|
|
||||||
: ({
|
|
||||||
profile: activeProfile.provider as ProviderProfileStartup,
|
|
||||||
env: profileEnv,
|
|
||||||
} as const)
|
|
||||||
|
|
||||||
const file = createProfileFile(startupProfile.profile, startupProfile.env)
|
|
||||||
saveProfileFile(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
return activeProfile
|
return activeProfile
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -61,7 +61,15 @@ export function maskSecretForDisplay(
|
|||||||
return 'configured'
|
return 'configured'
|
||||||
}
|
}
|
||||||
|
|
||||||
return `${sanitized.slice(0, 3)}...${sanitized.slice(-3)}`
|
if (sanitized.startsWith('sk-')) {
|
||||||
|
return `${sanitized.slice(0, 3)}...${sanitized.slice(-4)}`
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sanitized.startsWith('AIza')) {
|
||||||
|
return `${sanitized.slice(0, 4)}...${sanitized.slice(-4)}`
|
||||||
|
}
|
||||||
|
|
||||||
|
return `${sanitized.slice(0, 2)}...${sanitized.slice(-4)}`
|
||||||
}
|
}
|
||||||
|
|
||||||
export function redactSecretValueForDisplay(
|
export function redactSecretValueForDisplay(
|
||||||
|
|||||||
@@ -1,9 +1,6 @@
|
|||||||
import { afterEach, expect, test } from 'bun:test'
|
import { afterEach, expect, test } from 'bun:test'
|
||||||
|
|
||||||
import {
|
import { getProviderValidationError } from './providerValidation.ts'
|
||||||
getProviderValidationError,
|
|
||||||
shouldExitForStartupProviderValidationError,
|
|
||||||
} from './providerValidation.ts'
|
|
||||||
|
|
||||||
const originalEnv = {
|
const originalEnv = {
|
||||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||||
@@ -96,45 +93,3 @@ test('openai missing key error includes recovery guidance and config locations',
|
|||||||
expect(message).toContain('Saved startup settings can come from')
|
expect(message).toContain('Saved startup settings can come from')
|
||||||
expect(message).toContain('.openclaude-profile.json')
|
expect(message).toContain('.openclaude-profile.json')
|
||||||
})
|
})
|
||||||
|
|
||||||
test('startup provider validation allows interactive recovery', () => {
|
|
||||||
expect(
|
|
||||||
shouldExitForStartupProviderValidationError({
|
|
||||||
args: [],
|
|
||||||
stdoutIsTTY: true,
|
|
||||||
}),
|
|
||||||
).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('startup provider validation stays strict for non-interactive launches', () => {
|
|
||||||
expect(
|
|
||||||
shouldExitForStartupProviderValidationError({
|
|
||||||
args: ['-p', 'hello'],
|
|
||||||
stdoutIsTTY: true,
|
|
||||||
}),
|
|
||||||
).toBe(true)
|
|
||||||
expect(
|
|
||||||
shouldExitForStartupProviderValidationError({
|
|
||||||
args: ['--print', 'hello'],
|
|
||||||
stdoutIsTTY: true,
|
|
||||||
}),
|
|
||||||
).toBe(true)
|
|
||||||
expect(
|
|
||||||
shouldExitForStartupProviderValidationError({
|
|
||||||
args: [],
|
|
||||||
stdoutIsTTY: false,
|
|
||||||
}),
|
|
||||||
).toBe(true)
|
|
||||||
expect(
|
|
||||||
shouldExitForStartupProviderValidationError({
|
|
||||||
args: ['--sdk-url', 'ws://127.0.0.1:3000'],
|
|
||||||
stdoutIsTTY: true,
|
|
||||||
}),
|
|
||||||
).toBe(true)
|
|
||||||
expect(
|
|
||||||
shouldExitForStartupProviderValidationError({
|
|
||||||
args: ['--sdk-url=ws://127.0.0.1:3000'],
|
|
||||||
stdoutIsTTY: true,
|
|
||||||
}),
|
|
||||||
).toBe(true)
|
|
||||||
})
|
|
||||||
|
|||||||
@@ -169,44 +169,3 @@ export async function validateProviderEnvOrExit(
|
|||||||
process.exit(1)
|
process.exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function shouldExitForStartupProviderValidationError(options: {
|
|
||||||
args?: string[]
|
|
||||||
stdoutIsTTY?: boolean
|
|
||||||
} = {}): boolean {
|
|
||||||
const args = options.args ?? process.argv.slice(2)
|
|
||||||
const stdoutIsTTY = options.stdoutIsTTY ?? process.stdout.isTTY
|
|
||||||
|
|
||||||
if (!stdoutIsTTY) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
args.includes('-p') ||
|
|
||||||
args.includes('--print') ||
|
|
||||||
args.includes('--init-only') ||
|
|
||||||
args.some(arg => arg.startsWith('--sdk-url'))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function validateProviderEnvForStartupOrExit(
|
|
||||||
env: NodeJS.ProcessEnv = process.env,
|
|
||||||
options?: {
|
|
||||||
args?: string[]
|
|
||||||
stdoutIsTTY?: boolean
|
|
||||||
},
|
|
||||||
): Promise<void> {
|
|
||||||
const error = await getProviderValidationError(env)
|
|
||||||
if (!error) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (shouldExitForStartupProviderValidationError(options)) {
|
|
||||||
console.error(error)
|
|
||||||
process.exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
console.error(
|
|
||||||
`Warning: provider configuration is incomplete.\n${error}\nOpenClaude will continue starting so you can run /provider and repair the saved provider settings.`,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
import { afterEach, beforeEach, expect, test } from 'bun:test'
|
|
||||||
|
|
||||||
import { buildInheritedEnvVars } from './spawnUtils.js'
|
|
||||||
|
|
||||||
const ORIGINAL_ENV = { ...process.env }
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
for (const key of Object.keys(process.env)) {
|
|
||||||
delete process.env[key]
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
for (const key of Object.keys(process.env)) {
|
|
||||||
delete process.env[key]
|
|
||||||
}
|
|
||||||
Object.assign(process.env, ORIGINAL_ENV)
|
|
||||||
})
|
|
||||||
|
|
||||||
test('buildInheritedEnvVars marks spawned teammates as host-managed for provider routing', () => {
|
|
||||||
const envVars = buildInheritedEnvVars()
|
|
||||||
|
|
||||||
expect(envVars).toContain('CLAUDE_CODE_PROVIDER_MANAGED_BY_HOST=1')
|
|
||||||
})
|
|
||||||
|
|
||||||
test('buildInheritedEnvVars forwards PATH for source-built teammate tool lookups', () => {
|
|
||||||
process.env.PATH = '/custom/bin:/usr/bin'
|
|
||||||
|
|
||||||
const envVars = buildInheritedEnvVars()
|
|
||||||
|
|
||||||
expect(envVars).toContain('PATH=')
|
|
||||||
expect(envVars).toContain('/custom/bin\\:/usr/bin')
|
|
||||||
})
|
|
||||||
@@ -141,9 +141,6 @@ const TEAMMATE_ENV_VARS = [
|
|||||||
'NODE_EXTRA_CA_CERTS',
|
'NODE_EXTRA_CA_CERTS',
|
||||||
'REQUESTS_CA_BUNDLE',
|
'REQUESTS_CA_BUNDLE',
|
||||||
'CURL_CA_BUNDLE',
|
'CURL_CA_BUNDLE',
|
||||||
// Source builds may rely on user shell PATH for rg/node/bun and other tools.
|
|
||||||
// Forward it so teammates resolve the same toolchain as the parent session.
|
|
||||||
'PATH',
|
|
||||||
] as const
|
] as const
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -152,13 +149,7 @@ const TEAMMATE_ENV_VARS = [
|
|||||||
* plus any provider/config env vars that are set in the current process.
|
* plus any provider/config env vars that are set in the current process.
|
||||||
*/
|
*/
|
||||||
export function buildInheritedEnvVars(): string {
|
export function buildInheritedEnvVars(): string {
|
||||||
const envVars = [
|
const envVars = ['CLAUDECODE=1', 'CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS=1']
|
||||||
'CLAUDECODE=1',
|
|
||||||
'CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS=1',
|
|
||||||
// Teammates should inherit the leader-selected provider route instead of
|
|
||||||
// replaying persisted ~/.claude or settings.env provider defaults.
|
|
||||||
'CLAUDE_CODE_PROVIDER_MANAGED_BY_HOST=1',
|
|
||||||
]
|
|
||||||
|
|
||||||
for (const key of TEAMMATE_ENV_VARS) {
|
for (const key of TEAMMATE_ENV_VARS) {
|
||||||
const value = process.env[key]
|
const value = process.env[key]
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user