Compare commits
43 Commits
fix/363-st
...
v0.2.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d2a057c6f1 | ||
|
|
08cc6f3287 | ||
|
|
84fcc7f7e0 | ||
|
|
ad11414def | ||
|
|
9419e8a4a2 | ||
|
|
41a86d05fa | ||
|
|
fa4b6a96c0 | ||
|
|
d03d77b110 | ||
|
|
15de1d6190 | ||
|
|
812facf024 | ||
|
|
2e39d2607a | ||
|
|
a3633ac094 | ||
|
|
3cefe2297d | ||
|
|
40ac164501 | ||
|
|
b3f3dc4e66 | ||
|
|
2e0e14d713 | ||
|
|
a02c44143b | ||
|
|
7817fe88bd | ||
|
|
4c50977f3c | ||
|
|
b126e38b1a | ||
|
|
6e94dd9136 | ||
|
|
91e4cfb15b | ||
|
|
f4ac709fa6 | ||
|
|
8aaa4f22ac | ||
|
|
a7f5982f64 | ||
|
|
cb8f8b7ac2 | ||
|
|
07621a6f8d | ||
|
|
692471850f | ||
|
|
68c296833d | ||
|
|
9ccaa7a675 | ||
|
|
598651f423 | ||
|
|
c385047abb | ||
|
|
42b121bd0d | ||
|
|
32fbd0c7b4 | ||
|
|
e30ad17ae0 | ||
|
|
c328fdf9e2 | ||
|
|
4ad6bc50c1 | ||
|
|
284d9bda36 | ||
|
|
537c469c3a | ||
|
|
ccaa193eec | ||
|
|
2caf2fd982 | ||
|
|
ad724dc3a4 | ||
|
|
648ae8053b |
90
.env.example
90
.env.example
@@ -248,3 +248,93 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
|
||||
|
||||
# Enable debug logging
|
||||
# CLAUDE_DEBUG=1
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# WEB SEARCH (OPTIONAL)
|
||||
# =============================================================================
|
||||
# OpenClaude includes a web search tool. By default it uses DuckDuckGo (free)
|
||||
# or the provider's native search (Anthropic firstParty / vertex).
|
||||
#
|
||||
# Set one API key below to enable a provider. That's it.
|
||||
|
||||
# ── Provider API keys — set ONE of these ────────────────────────────
|
||||
|
||||
# Tavily (AI-optimized search, recommended)
|
||||
# TAVILY_API_KEY=tvly-your-key-here
|
||||
|
||||
# Exa (neural/semantic search)
|
||||
# EXA_API_KEY=your-exa-key-here
|
||||
|
||||
# You.com (RAG-ready snippets)
|
||||
# YOU_API_KEY=your-you-key-here
|
||||
|
||||
# Jina (s.jina.ai endpoint)
|
||||
# JINA_API_KEY=your-jina-key-here
|
||||
|
||||
# Bing Web Search
|
||||
# BING_API_KEY=your-bing-key-here
|
||||
|
||||
# Mojeek (privacy-focused)
|
||||
# MOJEEK_API_KEY=your-mojeek-key-here
|
||||
|
||||
# Linkup
|
||||
# LINKUP_API_KEY=your-linkup-key-here
|
||||
|
||||
# Firecrawl (premium, uses @mendable/firecrawl-js)
|
||||
# FIRECRAWL_API_KEY=fc-your-key-here
|
||||
|
||||
# ── Provider selection mode ─────────────────────────────────────────
|
||||
#
|
||||
# WEB_SEARCH_PROVIDER controls fallback behavior:
|
||||
#
|
||||
# "auto" (default) — try all configured providers, fall through on failure
|
||||
# "custom" — custom API only, throw on failure (NOT in auto chain)
|
||||
# "firecrawl" — firecrawl only
|
||||
# "tavily" — tavily only
|
||||
# "exa" — exa only
|
||||
# "you" — you.com only
|
||||
# "jina" — jina only
|
||||
# "bing" — bing only
|
||||
# "mojeek" — mojeek only
|
||||
# "linkup" — linkup only
|
||||
# "ddg" — duckduckgo only
|
||||
# "native" — anthropic native / codex only
|
||||
#
|
||||
# Auto mode priority: firecrawl → tavily → exa → you → jina → bing → mojeek →
|
||||
# linkup → ddg
|
||||
# Note: "custom" is NOT in the auto chain. To use the custom API provider,
|
||||
# you must explicitly set WEB_SEARCH_PROVIDER=custom.
|
||||
#
|
||||
# WEB_SEARCH_PROVIDER=auto
|
||||
|
||||
# ── Built-in custom API presets ─────────────────────────────────────
|
||||
#
|
||||
# Use with WEB_KEY for the API key:
|
||||
# WEB_PROVIDER=searxng|google|brave|serpapi
|
||||
# WEB_KEY=your-api-key-here
|
||||
|
||||
# ── Custom API endpoint (advanced) ──────────────────────────────────
|
||||
#
|
||||
# WEB_SEARCH_API — base URL of your search endpoint
|
||||
# WEB_QUERY_PARAM — query parameter name (default: "q")
|
||||
# WEB_METHOD — GET or POST (default: GET)
|
||||
# WEB_PARAMS — extra static query params as JSON: {"lang":"en","count":"10"}
|
||||
# WEB_URL_TEMPLATE — URL template with {query} for path embedding
|
||||
# WEB_BODY_TEMPLATE — custom POST body with {query} placeholder
|
||||
# WEB_AUTH_HEADER — header name for API key (default: "Authorization")
|
||||
# WEB_AUTH_SCHEME — prefix before key (default: "Bearer")
|
||||
# WEB_HEADERS — extra headers as "Name: value; Name2: value2"
|
||||
# WEB_JSON_PATH — dot-path to results array in response
|
||||
|
||||
# ── Custom API security guardrails ──────────────────────────────────
|
||||
#
|
||||
# The custom provider enforces security guardrails by default.
|
||||
# Override these only if you understand the risks.
|
||||
#
|
||||
# WEB_CUSTOM_TIMEOUT_SEC=15 — request timeout in seconds (default 15)
|
||||
# WEB_CUSTOM_MAX_BODY_KB=300 — max POST body size in KB (default 300)
|
||||
# WEB_CUSTOM_ALLOW_ARBITRARY_HEADERS=false — set "true" to use non-standard headers
|
||||
# WEB_CUSTOM_ALLOW_HTTP=false — set "true" to allow http:// URLs
|
||||
# WEB_CUSTOM_ALLOW_PRIVATE=false — set "true" to target localhost/private IPs
|
||||
# (needed for self-hosted SearXNG)
|
||||
|
||||
13
.github/workflows/pr-checks.yml
vendored
13
.github/workflows/pr-checks.yml
vendored
@@ -29,6 +29,13 @@ jobs:
|
||||
with:
|
||||
bun-version: 1.3.11
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: "pip"
|
||||
cache-dependency-path: python/requirements.txt
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
@@ -38,6 +45,12 @@ jobs:
|
||||
- name: Full unit test suite
|
||||
run: bun test --max-concurrency=1
|
||||
|
||||
- name: Install Python test dependencies
|
||||
run: python -m pip install -r python/requirements.txt
|
||||
|
||||
- name: Python unit tests
|
||||
run: python -m pytest -q python/tests
|
||||
|
||||
- name: Suspicious PR intent scan
|
||||
run: bun run security:pr-scan -- --base ${{ github.event.pull_request.base.sha || 'origin/main' }}
|
||||
- name: Provider tests
|
||||
|
||||
88
.github/workflows/release.yml
vendored
Normal file
88
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
name: Auto Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: auto-release-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
release-please:
|
||||
name: Release Please
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
outputs:
|
||||
release_created: ${{ steps.release.outputs.release_created }}
|
||||
tag_name: ${{ steps.release.outputs.tag_name }}
|
||||
version: ${{ steps.release.outputs.version }}
|
||||
steps:
|
||||
- name: Run release-please
|
||||
id: release
|
||||
uses: googleapis/release-please-action@16a9c90856f42705d54a6fda1823352bdc62cf38
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
release-type: node
|
||||
|
||||
publish-npm:
|
||||
name: Publish to npm
|
||||
needs: release-please
|
||||
if: ${{ needs.release-please.outputs.release_created == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout release tag
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5
|
||||
with:
|
||||
ref: ${{ needs.release-please.outputs.tag_name }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020
|
||||
with:
|
||||
node-version: 24
|
||||
registry-url: https://registry.npmjs.org
|
||||
|
||||
- name: Set up Bun
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6
|
||||
with:
|
||||
bun-version: 1.3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
- name: Run unit tests
|
||||
run: bun test --max-concurrency=1
|
||||
|
||||
- name: Smoke test
|
||||
run: bun run smoke
|
||||
|
||||
- name: Build
|
||||
run: bun run build
|
||||
|
||||
- name: Dry-run package
|
||||
run: npm pack --dry-run
|
||||
|
||||
- name: Clear token auth for trusted publishing
|
||||
run: |
|
||||
unset NODE_AUTH_TOKEN
|
||||
echo "NODE_AUTH_TOKEN=" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Publish to npm
|
||||
run: npm publish --access public --provenance
|
||||
|
||||
- name: Release summary
|
||||
run: |
|
||||
{
|
||||
echo "## Released ${{ needs.release-please.outputs.tag_name }}"
|
||||
echo
|
||||
echo "- npm: https://www.npmjs.com/package/@gitlawb/openclaude"
|
||||
echo "- GitHub: https://github.com/Gitlawb/openclaude/releases/tag/${{ needs.release-please.outputs.tag_name }}"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
3
.release-please-manifest.json
Normal file
3
.release-please-manifest.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
".": "0.2.2"
|
||||
}
|
||||
61
CHANGELOG.md
Normal file
61
CHANGELOG.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Changelog
|
||||
|
||||
## [0.2.2](https://github.com/Gitlawb/openclaude/compare/v0.2.1...v0.2.2) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **read/edit:** make compact line prefix unambiguous for tab-indented files ([#613](https://github.com/Gitlawb/openclaude/issues/613)) ([08cc6f3](https://github.com/Gitlawb/openclaude/commit/08cc6f328711cd93ce9fa53351266c29a0b0a341))
|
||||
|
||||
## [0.2.1](https://github.com/Gitlawb/openclaude/compare/v0.2.0...v0.2.1) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **provider:** add recovery guidance for missing OpenAI API key ([#616](https://github.com/Gitlawb/openclaude/issues/616)) ([9419e8a](https://github.com/Gitlawb/openclaude/commit/9419e8a4a21b3771d9ddb10f7072e0a8c5b5b631))
|
||||
|
||||
## [0.2.0](https://github.com/Gitlawb/openclaude/compare/v0.1.8...v0.2.0) (2026-04-12)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add /cache-probe diagnostic command ([#580](https://github.com/Gitlawb/openclaude/issues/580)) ([9ccaa7a](https://github.com/Gitlawb/openclaude/commit/9ccaa7a6759b6991f4a566b4118c06e68a2398fe)), closes [#515](https://github.com/Gitlawb/openclaude/issues/515)
|
||||
* add auto-fix service — auto-lint and test after AI file edits ([#508](https://github.com/Gitlawb/openclaude/issues/508)) ([c385047](https://github.com/Gitlawb/openclaude/commit/c385047abba4366866f4c87bfb5e0b0bd4dcbb9d))
|
||||
* Add Gemini support with thought_signature fix ([#404](https://github.com/Gitlawb/openclaude/issues/404)) ([5012c16](https://github.com/Gitlawb/openclaude/commit/5012c160c9a2dff9418e7ee19dc9a4d29ef2b024))
|
||||
* add headless gRPC server for external agent integration ([#278](https://github.com/Gitlawb/openclaude/issues/278)) ([26eef92](https://github.com/Gitlawb/openclaude/commit/26eef92fe72e9c3958d61435b8d3571e12bf2b74))
|
||||
* add wiki mvp commands ([#532](https://github.com/Gitlawb/openclaude/issues/532)) ([c328fdf](https://github.com/Gitlawb/openclaude/commit/c328fdf9e2fe59ad101b049301298ce9ff24caca))
|
||||
* GitHub provider lifecycle and onboarding hardening ([#351](https://github.com/Gitlawb/openclaude/issues/351)) ([ff7d499](https://github.com/Gitlawb/openclaude/commit/ff7d49990de515825ddbe4099f3a39b944b61370))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add File polyfill for Node < 20 to prevent startup deadlock with proxy ([#442](https://github.com/Gitlawb/openclaude/issues/442)) ([85aa8b0](https://github.com/Gitlawb/openclaude/commit/85aa8b0985c8f3cb8801efa5141114a0ab0f6a83))
|
||||
* add GitHub Copilot model context windows and output limits ([#576](https://github.com/Gitlawb/openclaude/issues/576)) ([a7f5982](https://github.com/Gitlawb/openclaude/commit/a7f5982f6438ab0ddc3f0daae31ea68ac7ac206c)), closes [#515](https://github.com/Gitlawb/openclaude/issues/515)
|
||||
* add LiteLLM-style aliases for GitHub Copilot context windows ([#606](https://github.com/Gitlawb/openclaude/issues/606)) ([2e0e14d](https://github.com/Gitlawb/openclaude/commit/2e0e14d71313e0e501efaa9e55c6c56f2742fb10))
|
||||
* add store:false to Chat Completions and /responses fallback ([#578](https://github.com/Gitlawb/openclaude/issues/578)) ([8aaa4f2](https://github.com/Gitlawb/openclaude/commit/8aaa4f22ac5b942d82aa9cad54af30d56034515a))
|
||||
* address code scanning alerts ([#434](https://github.com/Gitlawb/openclaude/issues/434)) ([e365cb4](https://github.com/Gitlawb/openclaude/commit/e365cb4010becabacd7cbccb4c3e59ea23a41e90))
|
||||
* avoid sync github credential reads in provider manager ([#428](https://github.com/Gitlawb/openclaude/issues/428)) ([aff2bd8](https://github.com/Gitlawb/openclaude/commit/aff2bd87e4f2821992f74fb95481c505d0ba5d5d))
|
||||
* convert dragged file paths to [@mentions](https://github.com/mentions) for attachment ([#382](https://github.com/Gitlawb/openclaude/issues/382)) ([112df59](https://github.com/Gitlawb/openclaude/commit/112df5911791ea71ee9efbb98ea59c5ded1ea161))
|
||||
* custom web search — WEB_URL_TEMPLATE not recognized, timeout too short, silent native fallback ([#537](https://github.com/Gitlawb/openclaude/issues/537)) ([32fbd0c](https://github.com/Gitlawb/openclaude/commit/32fbd0c7b4168b32dcb13a5b69342e2727269201))
|
||||
* defer startup checks and suppress recommendation dialogs during startup window (issue [#363](https://github.com/Gitlawb/openclaude/issues/363)) ([#504](https://github.com/Gitlawb/openclaude/issues/504)) ([2caf2fd](https://github.com/Gitlawb/openclaude/commit/2caf2fd982af1ec845c50152ad9d28d1a597f82f))
|
||||
* display selected model in startup screen instead of hardcoded sonnet 4.6 ([#587](https://github.com/Gitlawb/openclaude/issues/587)) ([b126e38](https://github.com/Gitlawb/openclaude/commit/b126e38b1affddd2de83fcc3ba26f2e44b42a509))
|
||||
* handle missing skill parameter in SkillTool ([#485](https://github.com/Gitlawb/openclaude/issues/485)) ([f9ce81b](https://github.com/Gitlawb/openclaude/commit/f9ce81bfb384e909353813fb6f6760cadd508ae7))
|
||||
* include MCP tool results in microcompact to reduce token waste ([#348](https://github.com/Gitlawb/openclaude/issues/348)) ([52d33a8](https://github.com/Gitlawb/openclaude/commit/52d33a87a047b943aedaaaf772cd48636c263509))
|
||||
* **ink:** restore host prop updates in React 19 reconciler ([#589](https://github.com/Gitlawb/openclaude/issues/589)) ([6e94dd9](https://github.com/Gitlawb/openclaude/commit/6e94dd913688b2d6433a9abe62a245c5f031b776))
|
||||
* let saved provider profiles win on restart ([#513](https://github.com/Gitlawb/openclaude/issues/513)) ([cb8f8b7](https://github.com/Gitlawb/openclaude/commit/cb8f8b7ac2e3e74516ee219a3a48156db7c6ed78))
|
||||
* normalize malformed Bash tool arguments from OpenAI-compatible providers ([#385](https://github.com/Gitlawb/openclaude/issues/385)) ([b4bd95b](https://github.com/Gitlawb/openclaude/commit/b4bd95b47715c9896240d708c106777507fd26ec))
|
||||
* preserve only originally-required properties in strict tool schemas ([#471](https://github.com/Gitlawb/openclaude/issues/471)) ([ccaa193](https://github.com/Gitlawb/openclaude/commit/ccaa193eec5761f0972ffb58eb3189a81a9244b0))
|
||||
* preserve unicode in Windows clipboard fallback ([#388](https://github.com/Gitlawb/openclaude/issues/388)) ([c193497](https://github.com/Gitlawb/openclaude/commit/c1934974aaf64db460cc850a044bd13cc744cce7))
|
||||
* rebrand prompt identity to openclaude ([#496](https://github.com/Gitlawb/openclaude/issues/496)) ([598651f](https://github.com/Gitlawb/openclaude/commit/598651f42389ce76311ec00e8a9c701c939ead27))
|
||||
* replace isDeepStrictEqual with navigation-aware options comparison ([#507](https://github.com/Gitlawb/openclaude/issues/507)) ([537c469](https://github.com/Gitlawb/openclaude/commit/537c469c3a2f7cb0eed05fa2f54dca57b6bc273f)), closes [#472](https://github.com/Gitlawb/openclaude/issues/472)
|
||||
* report cache reads in streaming and correct cost calculation ([#577](https://github.com/Gitlawb/openclaude/issues/577)) ([f4ac709](https://github.com/Gitlawb/openclaude/commit/f4ac709fa6eda732bf45204fcab625ba6c5674b9))
|
||||
* restore default context window for unknown 3p models ([#494](https://github.com/Gitlawb/openclaude/issues/494)) ([69ea1f1](https://github.com/Gitlawb/openclaude/commit/69ea1f1e4a99e9436215d8cb391a116a64442b94))
|
||||
* restore Grep and Glob reliability on OpenAI paths ([#461](https://github.com/Gitlawb/openclaude/issues/461)) ([600c01f](https://github.com/Gitlawb/openclaude/commit/600c01faf761a080a2c7dede872ddbe05a132f23))
|
||||
* restore Ollama auto-detect in first-run setup ([#561](https://github.com/Gitlawb/openclaude/issues/561)) ([68c2968](https://github.com/Gitlawb/openclaude/commit/68c296833dcef54ce44cb18b24357230b5204dbc))
|
||||
* scrub canonical Anthropic headers from 3P shim requests ([#499](https://github.com/Gitlawb/openclaude/issues/499)) ([07621a6](https://github.com/Gitlawb/openclaude/commit/07621a6f8d0918170281869a47b5dbff90e71594))
|
||||
* strip Anthropic params from 3P resume paths ([#479](https://github.com/Gitlawb/openclaude/issues/479)) ([4975cfc](https://github.com/Gitlawb/openclaude/commit/4975cfc2e0ddbe34aa4e8e3f52ee5eba07fbe465))
|
||||
* suppress startup dialogs when input is buffered ([#423](https://github.com/Gitlawb/openclaude/issues/423)) ([8ece290](https://github.com/Gitlawb/openclaude/commit/8ece2900872dadd157e798ef501ddf126dac66c4))
|
||||
* **tui:** restore prompt rendering on startup ([#498](https://github.com/Gitlawb/openclaude/issues/498)) ([e30ad17](https://github.com/Gitlawb/openclaude/commit/e30ad17ae0056787273be2caafd6cf5340b6ab57))
|
||||
* update theme preview on focus change ([#562](https://github.com/Gitlawb/openclaude/issues/562)) ([6924718](https://github.com/Gitlawb/openclaude/commit/692471850fc789ee0797190089272407f9a4d953))
|
||||
* **web-search:** close SSRF bypasses in custom provider hostname guard ([#610](https://github.com/Gitlawb/openclaude/issues/610)) ([a02c441](https://github.com/Gitlawb/openclaude/commit/a02c44143b257fbee7f38f1b93873cc0ea68a1f9))
|
||||
* WebSearch providers + MCPTool bugs ([#593](https://github.com/Gitlawb/openclaude/issues/593)) ([91e4cfb](https://github.com/Gitlawb/openclaude/commit/91e4cfb15b62c04615834fd3c417fe38b4feb914))
|
||||
@@ -137,10 +137,9 @@ export OPENAI_MODEL=llama-3.3-70b-versatile
|
||||
### Mistral
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=...
|
||||
export OPENAI_BASE_URL=https://api.mistral.ai/v1
|
||||
export OPENAI_MODEL=mistral-large-latest
|
||||
export CLAUDE_CODE_USE_MISTRAL=1
|
||||
export MISTRAL_API_KEY=...
|
||||
export MISTRAL_MODEL=mistral-large-latest
|
||||
```
|
||||
|
||||
### Azure OpenAI
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@gitlawb/openclaude",
|
||||
"version": "0.1.8",
|
||||
"version": "0.2.2",
|
||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -140,7 +140,7 @@
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude"
|
||||
"url": "https://github.com/Gitlawb/openclaude.git"
|
||||
},
|
||||
"keywords": [
|
||||
"claude-code",
|
||||
|
||||
3
python/requirements.txt
Normal file
3
python/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
pytest==7.4.4
|
||||
pytest-asyncio==0.23.3
|
||||
httpx==0.25.2
|
||||
@@ -112,6 +112,14 @@ def build_default_providers() -> list[Provider]:
|
||||
big_model=big if "gemini" in big else "gemini-2.5-pro",
|
||||
small_model=small if "gemini" in small else "gemini-2.0-flash",
|
||||
),
|
||||
Provider(
|
||||
name="mistral",
|
||||
ping_url="",
|
||||
api_key_env="MISTRAL_API_KEY",
|
||||
cost_per_1k_tokens=0.0001,
|
||||
big_model=big if "mistral" in big else "devstral-latest",
|
||||
small_model=small if "small" in small else "ministral-3b-latest",
|
||||
),
|
||||
Provider(
|
||||
name="ollama",
|
||||
ping_url=f"{ollama_url}/api/tags",
|
||||
|
||||
11
release-please-config.json
Normal file
11
release-please-config.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
|
||||
"packages": {
|
||||
".": {
|
||||
"release-type": "node",
|
||||
"package-name": "@gitlawb/openclaude",
|
||||
"bump-minor-pre-major": true,
|
||||
"include-v-in-tag": true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
buildAtomicChatProfileEnv,
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildMistralProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
createProfileFile,
|
||||
@@ -37,7 +38,7 @@ function parseArg(name: string): string | null {
|
||||
|
||||
function parseProviderArg(): ProviderProfile | 'auto' {
|
||||
const p = parseArg('--provider')?.toLowerCase()
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'mistral' || p === 'atomic-chat') return p
|
||||
return 'auto'
|
||||
}
|
||||
|
||||
@@ -90,6 +91,21 @@ async function main(): Promise<void> {
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = builtEnv
|
||||
} else if (selected === 'mistral') {
|
||||
const builtEnv = buildMistralProfileEnv({
|
||||
model: argModel || null,
|
||||
baseUrl: argBaseUrl || null,
|
||||
apiKey: argApiKey || null,
|
||||
processEnv: process.env,
|
||||
})
|
||||
|
||||
if (!builtEnv) {
|
||||
console.error('Mistral profile requires an API key. Use --api-key or set MISTRAL_API_KEY.')
|
||||
console.error('Get a free key at: https://admin.mistral.ai/organization/api-keys')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = builtEnv
|
||||
} else if (selected === 'ollama') {
|
||||
resolvedOllamaModel ??= await resolveOllamaModel(argModel, argBaseUrl, goal)
|
||||
@@ -169,7 +185,7 @@ async function main(): Promise<void> {
|
||||
|
||||
console.log(`Saved profile: ${selected}`)
|
||||
console.log(`Goal: ${goal}`)
|
||||
console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
|
||||
console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.MISTRAL_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
|
||||
console.log(`Path: ${outputPath}`)
|
||||
console.log('Next: bun run dev:profile')
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
|
||||
continue
|
||||
}
|
||||
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower ==='mistral' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
requestedProfile = lower as ProviderProfile | 'auto'
|
||||
continue
|
||||
}
|
||||
@@ -124,6 +124,8 @@ function printSummary(profile: ProviderProfile): void {
|
||||
console.log(`Launching profile: ${profile}`)
|
||||
if (profile === 'gemini') {
|
||||
console.log('Using configured Gemini provider settings.')
|
||||
} else if (profile === 'mistral') {
|
||||
console.log('Using configured Mistral provider settings.')
|
||||
} else if (profile === 'codex') {
|
||||
console.log('Using configured Codex/OpenAI-compatible provider settings.')
|
||||
} else if (profile === 'atomic-chat') {
|
||||
@@ -139,7 +141,7 @@ async function main(): Promise<void> {
|
||||
const options = parseLaunchOptions(process.argv.slice(2))
|
||||
const requestedProfile = options.requestedProfile
|
||||
if (!requestedProfile) {
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|mistral|atomic-chat|mistral|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
@@ -205,6 +207,11 @@ async function main(): Promise<void> {
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (profile === 'mistral' && !env.MISTRAL_API_KEY) {
|
||||
console.error('MISTRAL_API_KEY is required for mistral profile. Run: bun run profile:init -- --provider mistral --api-key <key>')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (profile === 'openai' && (!env.OPENAI_API_KEY || env.OPENAI_API_KEY === 'SUA_CHAVE')) {
|
||||
console.error('OPENAI_API_KEY is required for openai profile and cannot be SUA_CHAVE. Run: bun run profile:init -- --provider openai --api-key <key>')
|
||||
process.exit(1)
|
||||
|
||||
@@ -118,14 +118,18 @@ function isLocalBaseUrl(baseUrl: string): boolean {
|
||||
}
|
||||
|
||||
const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
|
||||
const MISTRAL_DEFAULT_BASE_URL = 'https://api.mistral.ai/v1'
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
|
||||
function currentBaseUrl(): string {
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return process.env.MISTRAL_BASE_URL ?? MISTRAL_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
}
|
||||
return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
|
||||
}
|
||||
@@ -155,9 +159,34 @@ function checkGeminiEnv(): CheckResult[] {
|
||||
return results
|
||||
}
|
||||
|
||||
function checkMistralEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const model = process.env.MISTRAL_MODEL
|
||||
const key = process.env.MISTRAL_API_KEY
|
||||
const baseUrl = process.env.MISTRAL_BASE_URL ?? MISTRAL_DEFAULT_BASE_URL
|
||||
|
||||
results.push(pass('Provider mode', 'Mistral provider enabled.'))
|
||||
|
||||
if (!model) {
|
||||
results.push(pass('MISTRAL_MODEL', 'Not set. Default will be used at runtime.'))
|
||||
} else {
|
||||
results.push(pass('MISTRAL_MODEL', model))
|
||||
}
|
||||
|
||||
results.push(pass('MISTRAL_BASE_URL', baseUrl))
|
||||
|
||||
if (!key) {
|
||||
results.push(fail('MISTRAL_API_KEY', 'Missing. Set MISTRAL_API_KEY.'))
|
||||
} else {
|
||||
results.push(pass('MISTRAL_API_KEY', 'Configured.'))
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
function checkGithubEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
results.push(pass('Provider mode', 'GitHub Models provider enabled.'))
|
||||
|
||||
const token = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
|
||||
@@ -186,12 +215,17 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
|
||||
if (useGemini) {
|
||||
return checkGeminiEnv()
|
||||
}
|
||||
|
||||
if (useMistral) {
|
||||
return checkMistralEnv()
|
||||
}
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
return checkGithubEnv()
|
||||
}
|
||||
@@ -268,8 +302,9 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
|
||||
if (!useGemini && !useOpenAI && !useGithub) {
|
||||
if (!useGemini && !useOpenAI && !useGithub && !useMistral) {
|
||||
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
@@ -326,6 +361,8 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
})
|
||||
} else if (useGemini && (process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY)) {
|
||||
headers.Authorization = `Bearer ${process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY}`
|
||||
} else if (useMistral && process.env.MISTRAL_API_KEY) {
|
||||
headers.Authorization = `Bearer ${process.env.MISTRAL_API_KEY}`
|
||||
} else if (process.env.OPENAI_API_KEY) {
|
||||
headers.Authorization = `Bearer ${process.env.OPENAI_API_KEY}`
|
||||
}
|
||||
@@ -373,7 +410,8 @@ function checkOllamaProcessorMode(): CheckResult {
|
||||
if (
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
) {
|
||||
return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
@@ -425,6 +463,14 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
|
||||
GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY),
|
||||
}
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return {
|
||||
CLAUDE_CODE_USE_MISTRAL: true,
|
||||
MISTRAL_MODEL: process.env.MISTRAL_MODEL ?? '(unset, default: devstral-latest)',
|
||||
MISTRAL_BASE_URL: process.env.MISTRAL_BASE_URL ?? 'https://api.mistral.ai/v1',
|
||||
MISTRAL_API_KEY_SET: Boolean(process.env.MISTRAL_API_KEY),
|
||||
}
|
||||
}
|
||||
if (
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
@@ -435,7 +481,7 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
|
||||
process.env.OPENAI_MODEL ??
|
||||
'(unset, default: github:copilot → openai/gpt-4.1)',
|
||||
OPENAI_BASE_URL:
|
||||
process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE,
|
||||
process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE,
|
||||
GITHUB_TOKEN_SET: Boolean(
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN,
|
||||
),
|
||||
|
||||
@@ -400,12 +400,12 @@ export async function update() {
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
` cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write('Try running with sudo or fix npm permissions\n')
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: claude install\n',
|
||||
'Or consider using native installation with: openclaude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
@@ -415,11 +415,11 @@ export async function update() {
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
` cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: claude install\n',
|
||||
'Or consider using native installation with: openclaude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
|
||||
@@ -32,6 +32,7 @@ import logout from './commands/logout/index.js'
|
||||
import installGitHubApp from './commands/install-github-app/index.js'
|
||||
import installSlackApp from './commands/install-slack-app/index.js'
|
||||
import breakCache from './commands/break-cache/index.js'
|
||||
import cacheProbe from './commands/cache-probe/index.js'
|
||||
import mcp from './commands/mcp/index.js'
|
||||
import mobile from './commands/mobile/index.js'
|
||||
import onboarding from './commands/onboarding/index.js'
|
||||
@@ -136,6 +137,7 @@ import hooks from './commands/hooks/index.js'
|
||||
import files from './commands/files/index.js'
|
||||
import branch from './commands/branch/index.js'
|
||||
import agents from './commands/agents/index.js'
|
||||
import autoFix from './commands/auto-fix.js'
|
||||
import plugin from './commands/plugin/index.js'
|
||||
import reloadPlugins from './commands/reload-plugins/index.js'
|
||||
import rewind from './commands/rewind/index.js'
|
||||
@@ -143,6 +145,7 @@ import heapDump from './commands/heapdump/index.js'
|
||||
import mockLimits from './commands/mock-limits/index.js'
|
||||
import bridgeKick from './commands/bridge-kick.js'
|
||||
import version from './commands/version.js'
|
||||
import wiki from './commands/wiki/index.js'
|
||||
import summary from './commands/summary/index.js'
|
||||
import {
|
||||
resetLimits,
|
||||
@@ -263,8 +266,10 @@ const COMMANDS = memoize((): Command[] => [
|
||||
addDir,
|
||||
advisor,
|
||||
agents,
|
||||
autoFix,
|
||||
branch,
|
||||
btw,
|
||||
cacheProbe,
|
||||
chrome,
|
||||
clear,
|
||||
color,
|
||||
@@ -324,6 +329,7 @@ const COMMANDS = memoize((): Command[] => [
|
||||
usage,
|
||||
usageReport,
|
||||
vim,
|
||||
wiki,
|
||||
...(webCmd ? [webCmd] : []),
|
||||
...(forkCmd ? [forkCmd] : []),
|
||||
...(buddy ? [buddy] : []),
|
||||
|
||||
25
src/commands/auto-fix.ts
Normal file
25
src/commands/auto-fix.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import type { Command } from '../types/command.js'
|
||||
|
||||
const command: Command = {
|
||||
name: 'auto-fix',
|
||||
description: 'Configure auto-fix: run lint/test after AI edits',
|
||||
isEnabled: () => true,
|
||||
type: 'prompt',
|
||||
progressMessage: 'Configuring auto-fix...',
|
||||
contentLength: 0,
|
||||
source: 'builtin',
|
||||
async getPromptForCommand() {
|
||||
return [
|
||||
{
|
||||
type: 'text',
|
||||
text:
|
||||
'The user wants to configure auto-fix settings. Auto-fix automatically runs lint and test commands after AI file edits, feeding errors back for self-repair.\n\n' +
|
||||
'Current settings location: `.claude/settings.json` or `.claude/settings.local.json`\n\n' +
|
||||
'Example configuration:\n```json\n{\n "autoFix": {\n "enabled": true,\n "lint": "eslint . --fix",\n "test": "bun test",\n "maxRetries": 3,\n "timeout": 30000\n }\n}\n```\n\n' +
|
||||
'Ask the user what lint and test commands they use, then help them set up the configuration.',
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
export default command
|
||||
413
src/commands/cache-probe/cache-probe.ts
Normal file
413
src/commands/cache-probe/cache-probe.ts
Normal file
@@ -0,0 +1,413 @@
|
||||
import { getSessionId } from '../../bootstrap/state.js'
|
||||
import { resolveProviderRequest } from '../../services/api/providerConfig.js'
|
||||
import type { LocalCommandCall } from '../../types/command.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import { getMainLoopModel } from '../../utils/model/model.js'
|
||||
|
||||
const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
// Large system prompt (~6000 chars, ~1500 tokens) to cross the 1024-token cache threshold
|
||||
const SYSTEM_PROMPT = [
|
||||
'You are a coding assistant. Answer concisely.',
|
||||
'CONTEXT: User is working on a TypeScript project with Bun runtime.',
|
||||
...Array.from(
|
||||
{ length: 80 },
|
||||
(_, i) =>
|
||||
`Rule ${i + 1}: Follow best practices for TypeScript including strict typing, error handling, testing, and clean code. Prefer explicit types over any. Use const assertions. Await all async operations.`,
|
||||
),
|
||||
].join('\n\n')
|
||||
|
||||
const USER_MESSAGE = 'Say "hello" and nothing else.'
|
||||
const DELAY_MS = 3000
|
||||
|
||||
/**
|
||||
* Extract model family from a versioned model string.
|
||||
* e.g. "gpt-5.4-0626" → "gpt-5.4", "codex-mini-latest" → "codex-mini"
|
||||
*/
|
||||
function getModelFamily(model: string | undefined): string {
|
||||
if (!model) return 'unknown'
|
||||
return model
|
||||
.replace(/-\d{4,}$/, '')
|
||||
.replace(/-latest$/, '')
|
||||
.replace(/-preview$/, '')
|
||||
}
|
||||
|
||||
function getField(obj: unknown, path: string): unknown {
|
||||
return path
|
||||
.split('.')
|
||||
.reduce((o: any, k: string) => (o != null ? o[k] : undefined), obj)
|
||||
}
|
||||
|
||||
interface ProbeResult {
|
||||
label: string
|
||||
status: number
|
||||
elapsed: number
|
||||
headers: Record<string, string>
|
||||
usage: Record<string, unknown> | null
|
||||
responseId: string | null
|
||||
error: string | null
|
||||
}
|
||||
|
||||
async function sendProbe(
|
||||
url: string,
|
||||
headers: Record<string, string>,
|
||||
body: Record<string, unknown>,
|
||||
label: string,
|
||||
): Promise<ProbeResult> {
|
||||
const start = Date.now()
|
||||
let response: Response
|
||||
try {
|
||||
response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
} catch (err: any) {
|
||||
return {
|
||||
label,
|
||||
status: 0,
|
||||
elapsed: Date.now() - start,
|
||||
headers: {},
|
||||
usage: null,
|
||||
responseId: null,
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
const elapsed = Date.now() - start
|
||||
|
||||
const respHeaders: Record<string, string> = {}
|
||||
response.headers.forEach((value, key) => {
|
||||
respHeaders[key] = value
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorBody = await response.text().catch(() => '')
|
||||
return {
|
||||
label,
|
||||
status: response.status,
|
||||
elapsed,
|
||||
headers: respHeaders,
|
||||
usage: null,
|
||||
responseId: null,
|
||||
error: errorBody,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse SSE stream for usage data
|
||||
const text = await response.text()
|
||||
let usage: Record<string, unknown> | null = null
|
||||
let responseId: string | null = null
|
||||
|
||||
const isResponses = url.endsWith('/responses')
|
||||
for (const chunk of text.split('\n\n')) {
|
||||
const lines = chunk
|
||||
.split('\n')
|
||||
.map((l) => l.trim())
|
||||
.filter(Boolean)
|
||||
|
||||
if (isResponses) {
|
||||
const eventLine = lines.find((l) => l.startsWith('event: '))
|
||||
const dataLines = lines.filter((l) => l.startsWith('data: '))
|
||||
if (!eventLine || !dataLines.length) continue
|
||||
const event = eventLine.slice(7).trim()
|
||||
if (
|
||||
event === 'response.completed' ||
|
||||
event === 'response.incomplete'
|
||||
) {
|
||||
try {
|
||||
const data = JSON.parse(
|
||||
dataLines.map((l) => l.slice(6)).join('\n'),
|
||||
)
|
||||
usage = (data?.response?.usage as Record<string, unknown>) ?? null
|
||||
responseId = (data?.response?.id as string) ?? null
|
||||
} catch {}
|
||||
}
|
||||
} else {
|
||||
for (const line of lines) {
|
||||
if (!line.startsWith('data: ')) continue
|
||||
const raw = line.slice(6).trim()
|
||||
if (raw === '[DONE]') continue
|
||||
try {
|
||||
const data = JSON.parse(raw) as Record<string, unknown>
|
||||
if (data.usage) {
|
||||
usage = data.usage as Record<string, unknown>
|
||||
responseId = (data.id as string) ?? null
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { label, status: response.status, elapsed, headers: respHeaders, usage, responseId, error: null }
|
||||
}
|
||||
|
||||
function formatResult(r: ProbeResult): string {
|
||||
const lines: string[] = [`--- ${r.label} ---`]
|
||||
if (r.error) {
|
||||
lines.push(` ERROR (HTTP ${r.status}): ${r.error.slice(0, 200)}`)
|
||||
return lines.join('\n')
|
||||
}
|
||||
lines.push(` HTTP ${r.status} — ${r.elapsed}ms`)
|
||||
if (r.responseId) lines.push(` response.id: ${r.responseId}`)
|
||||
|
||||
if (r.usage) {
|
||||
lines.push(' Usage:')
|
||||
lines.push(` ${JSON.stringify(r.usage, null, 2).replace(/\n/g, '\n ')}`)
|
||||
} else {
|
||||
lines.push(' Usage: null')
|
||||
}
|
||||
|
||||
// Interesting headers
|
||||
for (const h of [
|
||||
'openai-processing-ms',
|
||||
'x-ratelimit-remaining',
|
||||
'x-ratelimit-limit',
|
||||
'x-ms-region',
|
||||
'x-github-request-id',
|
||||
'x-request-id',
|
||||
]) {
|
||||
if (r.headers[h]) lines.push(` ${h}: ${r.headers[h]}`)
|
||||
}
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
export const call: LocalCommandCall = async (args) => {
|
||||
const parts = (args ?? '').trim().split(/\s+/).filter(Boolean)
|
||||
const noKey = parts.includes('--no-key')
|
||||
const modelOverride = parts.find((p) => !p.startsWith('--')) || undefined
|
||||
const modelStr = modelOverride ?? getMainLoopModel()
|
||||
const request = resolveProviderRequest({ model: modelStr })
|
||||
const isGithub = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
|
||||
// Resolve API key the same way the OpenAI shim does
|
||||
let apiKey = process.env.OPENAI_API_KEY ?? ''
|
||||
if (!apiKey && isGithub) {
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
apiKey =
|
||||
process.env.OPENAI_API_KEY ??
|
||||
process.env.GITHUB_TOKEN ??
|
||||
process.env.GH_TOKEN ??
|
||||
''
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
return {
|
||||
type: 'text',
|
||||
value:
|
||||
'No API key found. Make sure you are in an active OpenAI-compatible or GitHub Copilot session.\n' +
|
||||
'For GitHub Copilot: run /onboard-github first.\n' +
|
||||
'For OpenAI-compatible: set OPENAI_API_KEY.',
|
||||
}
|
||||
}
|
||||
|
||||
const useResponses = request.transport === 'codex_responses'
|
||||
const endpoint = useResponses ? '/responses' : '/chat/completions'
|
||||
const url = `${request.baseUrl}${endpoint}`
|
||||
const family = getModelFamily(request.resolvedModel)
|
||||
const cacheKey = `${getSessionId()}:${family}`
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
originator: 'openclaude',
|
||||
}
|
||||
if (isGithub) {
|
||||
Object.assign(headers, COPILOT_HEADERS)
|
||||
}
|
||||
|
||||
let body: Record<string, unknown>
|
||||
if (useResponses) {
|
||||
body = {
|
||||
model: request.resolvedModel,
|
||||
instructions: SYSTEM_PROMPT,
|
||||
input: [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'user',
|
||||
content: [{ type: 'input_text', text: USER_MESSAGE }],
|
||||
},
|
||||
],
|
||||
stream: true,
|
||||
...(noKey ? {} : {
|
||||
store: false,
|
||||
prompt_cache_key: cacheKey,
|
||||
prompt_cache_retention: '24h',
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
body = {
|
||||
model: request.resolvedModel,
|
||||
messages: [
|
||||
{ role: 'system', content: SYSTEM_PROMPT },
|
||||
{ role: 'user', content: USER_MESSAGE },
|
||||
],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
max_tokens: 20,
|
||||
...(noKey ? {} : {
|
||||
store: false,
|
||||
prompt_cache_key: cacheKey,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Log configuration
|
||||
const config = [
|
||||
`[cache-probe] Starting cache probe${noKey ? ' (--no-key: cache params OMITTED)' : ''}`,
|
||||
` model: ${request.resolvedModel} (family: ${family})`,
|
||||
` transport: ${request.transport}`,
|
||||
` endpoint: ${url}`,
|
||||
` prompt_cache_key: ${noKey ? 'NOT SENT' : cacheKey}`,
|
||||
` store: ${noKey ? 'NOT SENT' : 'false'}`,
|
||||
` system prompt: ~${Math.round(SYSTEM_PROMPT.length / 4)} tokens`,
|
||||
` delay between calls: ${DELAY_MS}ms`,
|
||||
].join('\n')
|
||||
logForDebugging(config)
|
||||
|
||||
// Call 1 — Cold
|
||||
const r1 = await sendProbe(url, headers, body, 'CALL 1 — Cold (no cache)')
|
||||
logForDebugging(`[cache-probe]\n${formatResult(r1)}`)
|
||||
|
||||
if (r1.error) {
|
||||
return {
|
||||
type: 'text',
|
||||
value: `Cache probe failed on first call: HTTP ${r1.status}\n${r1.error.slice(0, 300)}\n\nFull details in debug log.`,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait
|
||||
await new Promise((r) => setTimeout(r, DELAY_MS))
|
||||
|
||||
// Call 2 — Warm
|
||||
const r2 = await sendProbe(url, headers, body, 'CALL 2 — Warm (cache expected)')
|
||||
logForDebugging(`[cache-probe]\n${formatResult(r2)}`)
|
||||
|
||||
// --- Comparison ---
|
||||
const fields = [
|
||||
'input_tokens',
|
||||
'output_tokens',
|
||||
'total_tokens',
|
||||
'prompt_tokens',
|
||||
'completion_tokens',
|
||||
'input_tokens_details.cached_tokens',
|
||||
'prompt_tokens_details.cached_tokens',
|
||||
'output_tokens_details.reasoning_tokens',
|
||||
]
|
||||
|
||||
const comparison: string[] = ['[cache-probe] COMPARISON']
|
||||
comparison.push(
|
||||
` ${'Field'.padEnd(42)} ${'Call 1'.padStart(8)} ${'Call 2'.padStart(8)} ${'Delta'.padStart(8)}`,
|
||||
)
|
||||
comparison.push(` ${'-'.repeat(72)}`)
|
||||
|
||||
for (const f of fields) {
|
||||
const v1 = getField(r1.usage, f)
|
||||
const v2 = getField(r2.usage, f)
|
||||
if (v1 === undefined && v2 === undefined) continue
|
||||
const d =
|
||||
typeof v1 === 'number' && typeof v2 === 'number' ? v2 - v1 : ''
|
||||
comparison.push(
|
||||
` ${f.padEnd(42)} ${String(v1 ?? '-').padStart(8)} ${String(v2 ?? '-').padStart(8)} ${String(d).padStart(8)}`,
|
||||
)
|
||||
}
|
||||
|
||||
comparison.push('')
|
||||
comparison.push(
|
||||
` Latency: ${r1.elapsed}ms → ${r2.elapsed}ms (${r2.elapsed - r1.elapsed > 0 ? '+' : ''}${r2.elapsed - r1.elapsed}ms)`,
|
||||
)
|
||||
|
||||
// Header comparison
|
||||
for (const h of ['openai-processing-ms', 'x-ms-region', 'x-ratelimit-remaining']) {
|
||||
const v1 = r1.headers[h]
|
||||
const v2 = r2.headers[h]
|
||||
if (v1 || v2) {
|
||||
comparison.push(` ${h}: ${v1 ?? '-'} → ${v2 ?? '-'}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Verdict
|
||||
const cached2 =
|
||||
(getField(r2.usage, 'input_tokens_details.cached_tokens') as number) ??
|
||||
(getField(r2.usage, 'prompt_tokens_details.cached_tokens') as number) ??
|
||||
0
|
||||
const input1 =
|
||||
((r1.usage?.input_tokens ?? r1.usage?.prompt_tokens) as number) ?? 0
|
||||
const input2 =
|
||||
((r2.usage?.input_tokens ?? r2.usage?.prompt_tokens) as number) ?? 0
|
||||
|
||||
let verdict: string
|
||||
if (cached2 > 0) {
|
||||
const rate = input2 > 0 ? Math.round((cached2 / input2) * 100) : '?'
|
||||
verdict = `CACHE HIT: ${cached2} cached tokens (${rate}% of input)`
|
||||
} else if (input1 === 0 && input2 === 0) {
|
||||
verdict = 'INCONCLUSIVE: Server returns 0 input_tokens — cannot measure'
|
||||
} else if (r2.elapsed < r1.elapsed * 0.6 && input1 > 100) {
|
||||
verdict = `POSSIBLE SILENT CACHING: Call 2 was ${Math.round((1 - r2.elapsed / r1.elapsed) * 100)}% faster but no cached_tokens reported`
|
||||
} else {
|
||||
verdict = 'NO CACHE DETECTED'
|
||||
}
|
||||
|
||||
comparison.push(`\n Verdict: ${verdict}`)
|
||||
|
||||
// --- Simulate what main's shim code does with this usage ---
|
||||
// codexShim.ts makeUsage() — used for Responses API (GPT-5+/Codex)
|
||||
function mainMakeUsage(u: any) {
|
||||
return {
|
||||
input_tokens: u?.input_tokens ?? 0,
|
||||
output_tokens: u?.output_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0, // ← main hardcodes this to 0
|
||||
}
|
||||
}
|
||||
// openaiShim.ts convertChunkUsage() — used for Chat Completions
|
||||
function mainConvertChunkUsage(u: any) {
|
||||
return {
|
||||
input_tokens: u?.prompt_tokens ?? 0,
|
||||
output_tokens: u?.completion_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: u?.prompt_tokens_details?.cached_tokens ?? 0,
|
||||
}
|
||||
}
|
||||
|
||||
const shimFn = useResponses ? mainMakeUsage : mainConvertChunkUsage
|
||||
const shim1 = shimFn(r1.usage)
|
||||
const shim2 = shimFn(r2.usage)
|
||||
|
||||
comparison.push('')
|
||||
comparison.push(` --- What main's shim reports (${useResponses ? 'codexShim.makeUsage' : 'openaiShim.convertChunkUsage'}) ---`)
|
||||
comparison.push(` Call 1: cache_read_input_tokens=${shim1.cache_read_input_tokens}`)
|
||||
comparison.push(` Call 2: cache_read_input_tokens=${shim2.cache_read_input_tokens}`)
|
||||
if (useResponses && cached2 > 0) {
|
||||
comparison.push(` BUG: Server returned ${cached2} cached tokens but main's makeUsage() drops it → reports 0`)
|
||||
} else if (!useResponses && shim2.cache_read_input_tokens > 0) {
|
||||
comparison.push(` OK: Chat Completions path on main correctly reads cached_tokens`)
|
||||
}
|
||||
|
||||
logForDebugging(comparison.join('\n'))
|
||||
|
||||
// User-facing summary
|
||||
const mode = noKey ? ' (NO cache key sent)' : ''
|
||||
const shimLabel = useResponses ? 'codexShim.makeUsage()' : 'openaiShim.convertChunkUsage()'
|
||||
const summary = [
|
||||
`Cache Probe — ${request.resolvedModel} via ${useResponses ? 'Responses API' : 'Chat Completions'}${mode}`,
|
||||
'',
|
||||
`Call 1: ${r1.elapsed}ms, input=${input1}, cached=${(getField(r1.usage, 'input_tokens_details.cached_tokens') as number) ?? (getField(r1.usage, 'prompt_tokens_details.cached_tokens') as number) ?? 0}`,
|
||||
`Call 2: ${r2.elapsed}ms, input=${input2}, cached=${cached2}`,
|
||||
'',
|
||||
verdict,
|
||||
'',
|
||||
`What main's ${shimLabel} reports:`,
|
||||
` Call 2 cache_read_input_tokens = ${shim2.cache_read_input_tokens}${useResponses && cached2 > 0 ? ' ← BUG: server sent ' + cached2 + ' but main drops it' : ''}`,
|
||||
'',
|
||||
'Full details written to debug log.',
|
||||
].join('\n')
|
||||
|
||||
return { type: 'text', value: summary }
|
||||
}
|
||||
17
src/commands/cache-probe/index.ts
Normal file
17
src/commands/cache-probe/index.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
|
||||
const cacheProbe: Command = {
|
||||
type: 'local',
|
||||
name: 'cache-probe',
|
||||
description:
|
||||
'Send identical requests to test prompt caching (results in debug log)',
|
||||
argumentHint: '[model] [--no-key]',
|
||||
isEnabled: () =>
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB),
|
||||
supportsNonInteractive: false,
|
||||
load: () => import('./cache-probe.js'),
|
||||
}
|
||||
|
||||
export default cacheProbe
|
||||
@@ -45,7 +45,7 @@ function getPromptContent(
|
||||
<!-- CHANGELOG:END -->`
|
||||
let slackStep = `
|
||||
|
||||
5. After creating/updating the PR, check if the user's CLAUDE.md mentions posting to Slack channels. If it does, use ToolSearch to search for "slack send message" tools. If ToolSearch finds a Slack tool, ask the user if they'd like you to post the PR URL to the relevant Slack channel. Only post if the user confirms. If ToolSearch returns no results or errors, skip this step silently—do not mention the failure, do not attempt workarounds, and do not try alternative approaches.`
|
||||
5. After creating/updating the PR, check if the user's AGENTS.md or CLAUDE.md mentions posting to Slack channels. If it does, use ToolSearch to search for "slack send message" tools. If ToolSearch finds a Slack tool, ask the user if they'd like you to post the PR URL to the relevant Slack channel. Only post if the user confirms. If ToolSearch returns no results or errors, skip this step silently—do not mention the failure, do not attempt workarounds, and do not try alternative approaches.`
|
||||
if (process.env.USER_TYPE === 'ant' && isUndercover()) {
|
||||
prefix = getUndercoverInstructions() + '\n'
|
||||
reviewerArg = ''
|
||||
|
||||
43
src/commands/init.test.ts
Normal file
43
src/commands/init.test.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
const originalClaudeCodeNewInit = process.env.CLAUDE_CODE_NEW_INIT
|
||||
|
||||
async function importInitCommand() {
|
||||
return (await import(`./init.ts?ts=${Date.now()}-${Math.random()}`)).default
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
|
||||
if (originalClaudeCodeNewInit === undefined) {
|
||||
delete process.env.CLAUDE_CODE_NEW_INIT
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_NEW_INIT = originalClaudeCodeNewInit
|
||||
}
|
||||
})
|
||||
|
||||
test('NEW_INIT prompt preserves existing root CLAUDE.md by default', async () => {
|
||||
process.env.CLAUDE_CODE_NEW_INIT = '1'
|
||||
|
||||
mock.module('../projectOnboardingState.js', () => ({
|
||||
maybeMarkProjectOnboardingComplete: () => {},
|
||||
}))
|
||||
mock.module('./initMode.js', () => ({
|
||||
isNewInitEnabled: () => true,
|
||||
}))
|
||||
|
||||
const command = await importInitCommand()
|
||||
const blocks = await command.getPromptForCommand()
|
||||
|
||||
expect(blocks).toHaveLength(1)
|
||||
expect(blocks[0]?.type).toBe('text')
|
||||
expect(String(blocks[0]?.text)).toContain(
|
||||
'checked-in root `CLAUDE.md` and does NOT already have a root `AGENTS.md`',
|
||||
)
|
||||
expect(String(blocks[0]?.text)).toContain(
|
||||
'do NOT silently create a second root instruction file',
|
||||
)
|
||||
expect(String(blocks[0]?.text)).toContain(
|
||||
'update the existing root `CLAUDE.md` in place by default',
|
||||
)
|
||||
})
|
||||
@@ -1,7 +1,6 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type { Command } from '../commands.js'
|
||||
import { maybeMarkProjectOnboardingComplete } from '../projectOnboardingState.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import { isNewInitEnabled } from './initMode.js'
|
||||
|
||||
const OLD_INIT_PROMPT = `Please analyze this codebase and create a CLAUDE.md file, which will be given to future instances of Claude Code to operate in this repository.
|
||||
|
||||
@@ -25,19 +24,19 @@ Usage notes:
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
\`\`\``
|
||||
|
||||
const NEW_INIT_PROMPT = `Set up a minimal CLAUDE.md (and optionally skills and hooks) for this repo. CLAUDE.md is loaded into every Claude Code session, so it must be concise — only include what Claude would get wrong without it.
|
||||
const NEW_INIT_PROMPT = `Set up a minimal AGENTS.md (and optionally CLAUDE.local.md, skills, and hooks) for this repo. The root project instruction file is loaded into every Claude Code session, so it must be concise — only include what Claude would get wrong without it.
|
||||
|
||||
## Phase 1: Ask what to set up
|
||||
|
||||
Use AskUserQuestion to find out what the user wants:
|
||||
|
||||
- "Which CLAUDE.md files should /init set up?"
|
||||
Options: "Project CLAUDE.md" | "Personal CLAUDE.local.md" | "Both project + personal"
|
||||
- "Which instruction files should /init set up?"
|
||||
Options: "Project AGENTS.md" | "Personal CLAUDE.local.md" | "Both project + personal"
|
||||
Description for project: "Team-shared instructions checked into source control — architecture, coding standards, common workflows."
|
||||
Description for personal: "Your private preferences for this project (gitignored, not shared) — your role, sandbox URLs, preferred test data, workflow quirks."
|
||||
|
||||
- "Also set up skills and hooks?"
|
||||
Options: "Skills + hooks" | "Skills only" | "Hooks only" | "Neither, just CLAUDE.md"
|
||||
Options: "Skills + hooks" | "Skills only" | "Hooks only" | "Neither, just the instruction file(s)"
|
||||
Description for skills: "On-demand capabilities you or Claude invoke with \`/skill-name\` — good for repeatable workflows and reference knowledge."
|
||||
Description for hooks: "Deterministic shell commands that run on tool events (e.g., format after every edit). Claude can't skip them."
|
||||
|
||||
@@ -59,24 +58,24 @@ Note what you could NOT figure out from code alone — these become interview qu
|
||||
|
||||
## Phase 3: Fill in the gaps
|
||||
|
||||
Use AskUserQuestion to gather what you still need to write good CLAUDE.md files and skills. Ask only things the code can't answer.
|
||||
Use AskUserQuestion to gather what you still need to write good instruction files and skills. Ask only things the code can't answer.
|
||||
|
||||
If the user chose project CLAUDE.md or both: ask about codebase practices — non-obvious commands, gotchas, branch/PR conventions, required env setup, testing quirks. Skip things already in README or obvious from manifest files. Do not mark any options as "recommended" — this is about how their team works, not best practices.
|
||||
If the user chose project AGENTS.md or both: ask about codebase practices — non-obvious commands, gotchas, branch/PR conventions, required env setup, testing quirks. Skip things already in README or obvious from manifest files. Do not mark any options as "recommended" — this is about how their team works, not best practices.
|
||||
|
||||
If the user chose personal CLAUDE.local.md or both: ask about them, not the codebase. Do not mark any options as "recommended" — this is about their personal preferences, not best practices. Examples of questions:
|
||||
- What's their role on the team? (e.g., "backend engineer", "data scientist", "new hire onboarding")
|
||||
- How familiar are they with this codebase and its languages/frameworks? (so Claude can calibrate explanation depth)
|
||||
- Do they have personal sandbox URLs, test accounts, API key paths, or local setup details Claude should know?
|
||||
- Only if Phase 2 found multiple git worktrees: ask whether their worktrees are nested inside the main repo (e.g., \`.claude/worktrees/<name>/\`) or siblings/external (e.g., \`../myrepo-feature/\`). If nested, the upward file walk finds the main repo's CLAUDE.local.md automatically — no special handling needed. If sibling/external, the personal content should live in a home-directory file (e.g., \`~/.claude/<project-name>-instructions.md\`) and each worktree gets a one-line CLAUDE.local.md stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. Never put this import in the project CLAUDE.md — that would check a personal reference into the team-shared file.
|
||||
- Only if Phase 2 found multiple git worktrees: ask whether their worktrees are nested inside the main repo (e.g., \`.claude/worktrees/<name>/\`) or siblings/external (e.g., \`../myrepo-feature/\`). If nested, the upward file walk finds the main repo's CLAUDE.local.md automatically — no special handling needed. If sibling/external, the personal content should live in a home-directory file (e.g., \`~/.claude/<project-name>-instructions.md\`) and each worktree gets a one-line CLAUDE.local.md stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. Never put this import in the project AGENTS.md — that would check a personal reference into the team-shared file.
|
||||
- Any communication preferences? (e.g., "be terse", "always explain tradeoffs", "don't summarize at the end")
|
||||
|
||||
**Synthesize a proposal from Phase 2 findings** — e.g., format-on-edit if a formatter exists, a project verification workflow if tests exist, a CLAUDE.md note for anything from the gap-fill answers that's a guideline rather than a workflow. For each, pick the artifact type that fits, **constrained by the Phase 1 skills+hooks choice**:
|
||||
**Synthesize a proposal from Phase 2 findings** — e.g., format-on-edit if a formatter exists, a project verification workflow if tests exist, an AGENTS.md note for anything from the gap-fill answers that's a guideline rather than a workflow. For each, pick the artifact type that fits, **constrained by the Phase 1 skills+hooks choice**:
|
||||
|
||||
- **Hook** (stricter) — deterministic shell command on a tool event; Claude can't skip it. Fits mechanical, fast, per-edit steps: formatting, linting, running a quick test on the changed file.
|
||||
- **Skill** (on-demand) — you or Claude invoke \`/skill-name\` when you want it. Fits workflows that don't belong on every edit: deep verification, session reports, deploys.
|
||||
- **CLAUDE.md note** (looser) — influences Claude's behavior but not enforced. Fits communication/thinking preferences: "plan before coding", "be terse", "explain tradeoffs".
|
||||
- **AGENTS.md note** (looser) — influences Claude's behavior but not enforced. Fits communication/thinking preferences: "plan before coding", "be terse", "explain tradeoffs".
|
||||
|
||||
**Respect Phase 1's skills+hooks choice as a hard filter**: if the user picked "Skills only", downgrade any hook you'd suggest to a skill or a CLAUDE.md note. If "Hooks only", downgrade skills to hooks (where mechanically possible) or notes. If "Neither", everything becomes a CLAUDE.md note. Never propose an artifact type the user didn't opt into.
|
||||
**Respect Phase 1's skills+hooks choice as a hard filter**: if the user picked "Skills only", downgrade any hook you'd suggest to a skill or an AGENTS.md note. If "Hooks only", downgrade skills to hooks (where mechanically possible) or notes. If "Neither", everything becomes an AGENTS.md note. Never propose an artifact type the user didn't opt into.
|
||||
|
||||
**Show the proposal via AskUserQuestion's \`preview\` field, not as a separate text message** — the dialog overlays your output, so preceding text is hidden. The \`preview\` field renders markdown in a side-panel (like plan mode); the \`question\` field is plain-text-only. Structure it as:
|
||||
|
||||
@@ -86,17 +85,19 @@ If the user chose personal CLAUDE.local.md or both: ask about them, not the code
|
||||
|
||||
• **Format-on-edit hook** (automatic) — \`ruff format <file>\` via PostToolUse
|
||||
• **Verification workflow** (on-demand) — \`make lint && make typecheck && make test\`
|
||||
• **CLAUDE.md note** (guideline) — "run lint/typecheck/test before marking done"
|
||||
• **AGENTS.md note** (guideline) — "run lint/typecheck/test before marking done"
|
||||
|
||||
- Option labels stay short ("Looks good", "Drop the hook", "Drop the skill") — the tool auto-adds an "Other" free-text option, so don't add your own catch-all.
|
||||
|
||||
**Build the preference queue** from the accepted proposal. Each entry: {type: hook|skill|note, description, target file, any Phase-2-sourced details like the actual test/format command}. Phases 4-7 consume this queue.
|
||||
|
||||
## Phase 4: Write CLAUDE.md (if user chose project or both)
|
||||
## Phase 4: Write AGENTS.md (if user chose project or both)
|
||||
|
||||
Write a minimal CLAUDE.md at the project root. Every line must pass this test: "Would removing this cause Claude to make mistakes?" If no, cut it.
|
||||
Write a minimal AGENTS.md at the project root. Every line must pass this test: "Would removing this cause Claude to make mistakes?" If no, cut it.
|
||||
|
||||
**Consume \`note\` entries from the Phase 3 preference queue whose target is CLAUDE.md** (team-level notes) — add each as a concise line in the most relevant section. These are the behaviors the user wants Claude to follow but didn't need guaranteed (e.g., "propose a plan before implementing", "explain the tradeoffs when refactoring"). Leave personal-targeted notes for Phase 5.
|
||||
If the repo already has a checked-in root \`CLAUDE.md\` and does NOT already have a root \`AGENTS.md\`, do NOT silently create a second root instruction file. In that case, update the existing root \`CLAUDE.md\` in place by default. Only create or migrate to root \`AGENTS.md\` if the user explicitly asks to migrate.
|
||||
|
||||
**Consume \`note\` entries from the Phase 3 preference queue whose target is AGENTS.md** (team-level notes) — add each as a concise line in the most relevant section. These are the behaviors the user wants Claude to follow but didn't need guaranteed (e.g., "propose a plan before implementing", "explain the tradeoffs when refactoring"). Leave personal-targeted notes for Phase 5.
|
||||
|
||||
Include:
|
||||
- Build/test/lint commands Claude can't guess (non-standard scripts, flags, or sequences)
|
||||
@@ -111,7 +112,7 @@ Exclude:
|
||||
- File-by-file structure or component lists (Claude can discover these by reading the codebase)
|
||||
- Standard language conventions Claude already knows
|
||||
- Generic advice ("write clean code", "handle errors")
|
||||
- Detailed API docs or long references — use \`@path/to/import\` syntax instead (e.g., \`@docs/api-reference.md\`) to inline content on demand without bloating CLAUDE.md
|
||||
- Detailed API docs or long references — use \`@path/to/import\` syntax instead (e.g., \`@docs/api-reference.md\`) to inline content on demand without bloating AGENTS.md
|
||||
- Information that changes frequently — reference the source with \`@path/to/import\` so Claude always reads the current version
|
||||
- Long tutorials or walkthroughs (move to a separate file and reference with \`@path/to/import\`, or put in a skill)
|
||||
- Commands obvious from manifest files (e.g., standard "npm test", "cargo test", "pytest")
|
||||
@@ -123,20 +124,20 @@ Do not repeat yourself and do not make up sections like "Common Development Task
|
||||
Prefix the file with:
|
||||
|
||||
\`\`\`
|
||||
# CLAUDE.md
|
||||
# AGENTS.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
\`\`\`
|
||||
|
||||
If CLAUDE.md already exists: read it, propose specific changes as diffs, and explain why each change improves it. Do not silently overwrite.
|
||||
If AGENTS.md already exists: read it, propose specific changes as diffs, and explain why each change improves it. Do not silently overwrite.
|
||||
|
||||
For projects with multiple concerns, suggest organizing instructions into \`.claude/rules/\` as separate focused files (e.g., \`code-style.md\`, \`testing.md\`, \`security.md\`). These are loaded automatically alongside CLAUDE.md and can be scoped to specific file paths using \`paths\` frontmatter.
|
||||
For projects with multiple concerns, suggest organizing instructions into \`.claude/rules/\` as separate focused files (e.g., \`code-style.md\`, \`testing.md\`, \`security.md\`). These are loaded automatically alongside AGENTS.md and can be scoped to specific file paths using \`paths\` frontmatter.
|
||||
|
||||
For projects with distinct subdirectories (monorepos, multi-module projects, etc.): mention that subdirectory CLAUDE.md files can be added for module-specific instructions (they're loaded automatically when Claude works in those directories). Offer to create them if the user wants.
|
||||
For projects with distinct subdirectories (monorepos, multi-module projects, etc.): mention that subdirectory AGENTS.md files can be added for module-specific instructions (they're loaded automatically when Claude works in those directories). Offer to create them if the user wants.
|
||||
|
||||
## Phase 5: Write CLAUDE.local.md (if user chose personal or both)
|
||||
|
||||
Write a minimal CLAUDE.local.md at the project root. This file is automatically loaded alongside CLAUDE.md. After creating it, add \`CLAUDE.local.md\` to the project's .gitignore so it stays private.
|
||||
Write a minimal CLAUDE.local.md at the project root. This file is automatically loaded alongside AGENTS.md. After creating it, add \`CLAUDE.local.md\` to the project's .gitignore so it stays private.
|
||||
|
||||
**Consume \`note\` entries from the Phase 3 preference queue whose target is CLAUDE.local.md** (personal-level notes) — add each as a concise line. If the user chose personal-only in Phase 1, this is the sole consumer of note entries.
|
||||
|
||||
@@ -147,7 +148,7 @@ Include:
|
||||
|
||||
Keep it short — only include what would make Claude's responses noticeably better for this user.
|
||||
|
||||
If Phase 2 found multiple git worktrees and the user confirmed they use sibling/external worktrees (not nested inside the main repo): the upward file walk won't find a single CLAUDE.local.md from all worktrees. Write the actual personal content to \`~/.claude/<project-name>-instructions.md\` and make CLAUDE.local.md a one-line stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. The user can copy this one-line stub to each sibling worktree. Never put this import in the project CLAUDE.md. If worktrees are nested inside the main repo (e.g., \`.claude/worktrees/\`), no special handling is needed — the main repo's CLAUDE.local.md is found automatically.
|
||||
If Phase 2 found multiple git worktrees and the user confirmed they use sibling/external worktrees (not nested inside the main repo): the upward file walk won't find a single CLAUDE.local.md from all worktrees. Write the actual personal content to \`~/.claude/<project-name>-instructions.md\` and make CLAUDE.local.md a one-line stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. The user can copy this one-line stub to each sibling worktree. Never put this import in the project AGENTS.md. If worktrees are nested inside the main repo (e.g., \`.claude/worktrees/\`), no special handling is needed — the main repo's CLAUDE.local.md is found automatically.
|
||||
|
||||
If CLAUDE.local.md already exists: read it, propose specific additions, and do not silently overwrite.
|
||||
|
||||
@@ -183,7 +184,7 @@ Both the user (\`/<skill-name>\`) and Claude can invoke skills by default. For w
|
||||
|
||||
## Phase 7: Suggest additional optimizations
|
||||
|
||||
Tell the user you're going to suggest a few additional optimizations now that CLAUDE.md and skills (if chosen) are in place.
|
||||
Tell the user you're going to suggest a few additional optimizations now that AGENTS.md and skills (if chosen) are in place.
|
||||
|
||||
Check the environment and ask about each gap you find (use AskUserQuestion):
|
||||
|
||||
@@ -195,7 +196,7 @@ Check the environment and ask about each gap you find (use AskUserQuestion):
|
||||
|
||||
For each hook preference (from the queue or the formatter fallback):
|
||||
|
||||
1. Target file: default based on the Phase 1 CLAUDE.md choice — project → \`.claude/settings.json\` (team-shared, committed); personal → \`.claude/settings.local.json\`. Only ask if the user chose "both" in Phase 1 or the preference is ambiguous. Ask once for all hooks, not per-hook.
|
||||
1. Target file: default based on the Phase 1 instruction-file choice — project → \`.claude/settings.json\` (team-shared, committed); personal → \`.claude/settings.local.json\`. Only ask if the user chose "both" in Phase 1 or the preference is ambiguous. Ask once for all hooks, not per-hook.
|
||||
|
||||
2. Pick the event and matcher from the preference:
|
||||
- "after every edit" → \`PostToolUse\` with matcher \`Write|Edit\`
|
||||
@@ -227,11 +228,9 @@ const command = {
|
||||
type: 'prompt',
|
||||
name: 'init',
|
||||
get description() {
|
||||
return feature('NEW_INIT') &&
|
||||
(process.env.USER_TYPE === 'ant' ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT))
|
||||
? 'Initialize new CLAUDE.md file(s) and optional skills/hooks with codebase documentation'
|
||||
: 'Initialize a new CLAUDE.md file with codebase documentation'
|
||||
return isNewInitEnabled()
|
||||
? 'Initialize new project instruction file(s) and optional skills/hooks with codebase documentation'
|
||||
: 'Initialize a new project instruction file with codebase documentation'
|
||||
},
|
||||
contentLength: 0, // Dynamic content
|
||||
progressMessage: 'analyzing your codebase',
|
||||
@@ -242,12 +241,7 @@ const command = {
|
||||
return [
|
||||
{
|
||||
type: 'text',
|
||||
text:
|
||||
feature('NEW_INIT') &&
|
||||
(process.env.USER_TYPE === 'ant' ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT))
|
||||
? NEW_INIT_PROMPT
|
||||
: OLD_INIT_PROMPT,
|
||||
text: isNewInitEnabled() ? NEW_INIT_PROMPT : OLD_INIT_PROMPT,
|
||||
},
|
||||
]
|
||||
},
|
||||
|
||||
13
src/commands/initMode.ts
Normal file
13
src/commands/initMode.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
|
||||
export function isNewInitEnabled(): boolean {
|
||||
if (feature('NEW_INIT')) {
|
||||
return (
|
||||
process.env.USER_TYPE === 'ant' ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT)
|
||||
)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -39,16 +39,16 @@ type InstallState = {
|
||||
message: string;
|
||||
warnings?: string[];
|
||||
};
|
||||
function getInstallationPath(): string {
|
||||
export function getInstallationPath(): string {
|
||||
const isWindows = env.platform === 'win32';
|
||||
const homeDir = homedir();
|
||||
if (isWindows) {
|
||||
// Convert to Windows-style path
|
||||
const windowsPath = join(homeDir, '.local', 'bin', 'claude.exe');
|
||||
const windowsPath = join(homeDir, '.local', 'bin', 'openclaude.exe');
|
||||
// Replace forward slashes with backslashes for Windows display
|
||||
return windowsPath.replace(/\//g, '\\');
|
||||
}
|
||||
return '~/.local/bin/claude';
|
||||
return '~/.local/bin/openclaude';
|
||||
}
|
||||
function SetupNotes(t0) {
|
||||
const $ = _c(5);
|
||||
|
||||
@@ -1,20 +1,44 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
CLAUDE_CODE_USE_MISTRAL: process.env.CLAUDE_CODE_USE_MISTRAL,
|
||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
||||
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
process.env.CLAUDE_CODE_USE_MISTRAL = originalEnv.CLAUDE_CODE_USE_MISTRAL
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
test('opens the model picker without awaiting local model discovery refresh', async () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.CLAUDE_CODE_USE_MISTRAL
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.OPENAI_API_BASE
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5-coder-7b-instruct'
|
||||
|
||||
@@ -30,7 +54,9 @@ test('opens the model picker without awaiting local model discovery refresh', as
|
||||
discoverOpenAICompatibleModelOptions,
|
||||
}))
|
||||
|
||||
const { call } = await import(`./model.js?ts=${Date.now()}-${Math.random()}`)
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe('openai:http://127.0.0.1:8080/v1')
|
||||
|
||||
const { call } = await import('./model.js')
|
||||
const result = await Promise.race([
|
||||
call(() => {}, {} as never, ''),
|
||||
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
|
||||
|
||||
@@ -284,7 +284,7 @@ function haveSameModelOptions(left: ModelOption[], right: ModelOption[]): boolea
|
||||
});
|
||||
}
|
||||
async function refreshOpenAIModelOptionsCache(): Promise<void> {
|
||||
if (getAPIProvider() !== 'openai') {
|
||||
if (!getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
|
||||
@@ -4,7 +4,7 @@ const onboardGithub: Command = {
|
||||
name: 'onboard-github',
|
||||
aliases: ['onboarding-github', 'onboardgithub', 'onboardinggithub'],
|
||||
description:
|
||||
'Interactive setup for GitHub Models: device login or PAT, saved to secure storage',
|
||||
'Interactive setup for GitHub Copilot: OAuth device login stored in secure storage',
|
||||
type: 'local-jsx',
|
||||
load: () => import('./onboard-github.js'),
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@ import * as React from 'react'
|
||||
import { useCallback, useState } from 'react'
|
||||
import { Select } from '../../components/CustomSelect/select.js'
|
||||
import { Spinner } from '../../components/Spinner.js'
|
||||
import TextInput from '../../components/TextInput.js'
|
||||
import { Box, Text } from '../../ink.js'
|
||||
import {
|
||||
exchangeForCopilotToken,
|
||||
openVerificationUri,
|
||||
pollAccessToken,
|
||||
requestDeviceCode,
|
||||
@@ -15,7 +15,7 @@ import {
|
||||
readGithubModelsToken,
|
||||
saveGithubModelsToken,
|
||||
} from '../../utils/githubModelsCredentials.js'
|
||||
import { updateSettingsForSource } from '../../utils/settings/settings.js'
|
||||
import { getSettingsForSource, updateSettingsForSource } from '../../utils/settings/settings.js'
|
||||
|
||||
const DEFAULT_MODEL = 'github:copilot'
|
||||
const FORCE_RELOGIN_ARGS = new Set([
|
||||
@@ -27,11 +27,25 @@ const FORCE_RELOGIN_ARGS = new Set([
|
||||
'--reauth',
|
||||
])
|
||||
|
||||
type Step =
|
||||
| 'menu'
|
||||
| 'device-busy'
|
||||
| 'pat'
|
||||
| 'error'
|
||||
type Step = 'menu' | 'device-busy' | 'error'
|
||||
|
||||
const PROVIDER_SPECIFIC_KEYS = new Set([
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_BASE',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_API_KEY',
|
||||
'GOOGLE_API_KEY',
|
||||
'GEMINI_BASE_URL',
|
||||
'GEMINI_MODEL',
|
||||
'GEMINI_ACCESS_TOKEN',
|
||||
'GEMINI_AUTH_MODE',
|
||||
])
|
||||
|
||||
export function shouldForceGithubRelogin(args?: string): boolean {
|
||||
const normalized = (args ?? '').trim().toLowerCase()
|
||||
@@ -41,15 +55,29 @@ export function shouldForceGithubRelogin(args?: string): boolean {
|
||||
return normalized.split(/\s+/).some(arg => FORCE_RELOGIN_ARGS.has(arg))
|
||||
}
|
||||
|
||||
const GITHUB_PAT_PREFIXES = ['ghp_', 'gho_','ghs_', 'ghr_', 'github_pat_']
|
||||
|
||||
function isGithubPat(token: string): boolean {
|
||||
return GITHUB_PAT_PREFIXES.some(prefix => token.startsWith(prefix))
|
||||
}
|
||||
|
||||
export function hasExistingGithubModelsLoginToken(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
storedToken?: string,
|
||||
): boolean {
|
||||
const envToken = env.GITHUB_TOKEN?.trim() || env.GH_TOKEN?.trim()
|
||||
if (envToken) {
|
||||
// PATs are no longer supported - require OAuth re-auth
|
||||
if (isGithubPat(envToken)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
const persisted = (storedToken ?? readGithubModelsToken())?.trim()
|
||||
// PATs are no longer supported - require OAuth re-auth
|
||||
if (persisted && isGithubPat(persisted)) {
|
||||
return false
|
||||
}
|
||||
return Boolean(persisted)
|
||||
}
|
||||
|
||||
@@ -97,8 +125,21 @@ export function applyGithubOnboardingProcessEnv(
|
||||
}
|
||||
|
||||
function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } {
|
||||
const currentSettings = getSettingsForSource('userSettings')
|
||||
const currentEnv = currentSettings?.env ?? {}
|
||||
|
||||
const newEnv: Record<string, string> = {}
|
||||
for (const [key, value] of Object.entries(currentEnv)) {
|
||||
if (!PROVIDER_SPECIFIC_KEYS.has(key)) {
|
||||
newEnv[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
newEnv.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
newEnv.OPENAI_MODEL = model
|
||||
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: buildGithubOnboardingSettingsEnv(model) as any,
|
||||
env: newEnv,
|
||||
})
|
||||
if (error) {
|
||||
return { ok: false, detail: error.message }
|
||||
@@ -143,12 +184,14 @@ function OnboardGithub(props: {
|
||||
user_code: string
|
||||
verification_uri: string
|
||||
} | null>(null)
|
||||
const [patDraft, setPatDraft] = useState('')
|
||||
const [cursorOffset, setCursorOffset] = useState(0)
|
||||
|
||||
const finalize = useCallback(
|
||||
async (token: string, model: string = DEFAULT_MODEL) => {
|
||||
const saved = saveGithubModelsToken(token)
|
||||
async (
|
||||
token: string,
|
||||
model: string = DEFAULT_MODEL,
|
||||
oauthToken?: string,
|
||||
) => {
|
||||
const saved = saveGithubModelsToken(token, oauthToken)
|
||||
if (!saved.success) {
|
||||
setErrorMsg(saved.warning ?? 'Could not save token to secure storage.')
|
||||
setStep('error')
|
||||
@@ -165,8 +208,18 @@ function OnboardGithub(props: {
|
||||
setStep('error')
|
||||
return
|
||||
}
|
||||
// Clear stale provider-specific env vars from the current session
|
||||
// so resolveProviderRequest() doesn't pick up a previous provider's
|
||||
// base URL or key after onboarding completes.
|
||||
for (const key of PROVIDER_SPECIFIC_KEYS) {
|
||||
delete process.env[key]
|
||||
}
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = model.trim() || DEFAULT_MODEL
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
onChangeAPIKey()
|
||||
onDone(
|
||||
'GitHub Models onboard complete. Token stored in secure storage; user settings updated. Restart if the model does not switch.',
|
||||
'GitHub Copilot onboard complete. Copilot token and OAuth token stored in secure storage (Windows/Linux: ~/.claude/.credentials.json, macOS: Keychain fallback to ~/.claude/.credentials.json); user settings updated. Restart if the model does not switch.',
|
||||
{ display: 'user' },
|
||||
)
|
||||
},
|
||||
@@ -184,11 +237,12 @@ function OnboardGithub(props: {
|
||||
verification_uri: device.verification_uri,
|
||||
})
|
||||
await openVerificationUri(device.verification_uri)
|
||||
const token = await pollAccessToken(device.device_code, {
|
||||
const oauthToken = await pollAccessToken(device.device_code, {
|
||||
initialInterval: device.interval,
|
||||
timeoutSeconds: device.expires_in,
|
||||
})
|
||||
await finalize(token, DEFAULT_MODEL)
|
||||
const copilotToken = await exchangeForCopilotToken(oauthToken)
|
||||
await finalize(copilotToken.token, DEFAULT_MODEL, oauthToken)
|
||||
} catch (e) {
|
||||
setErrorMsg(e instanceof Error ? e.message : String(e))
|
||||
setStep('error')
|
||||
@@ -227,7 +281,7 @@ function OnboardGithub(props: {
|
||||
if (step === 'device-busy') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>GitHub device login</Text>
|
||||
<Text>GitHub Copilot sign-in</Text>
|
||||
{deviceHint ? (
|
||||
<>
|
||||
<Text>
|
||||
@@ -246,43 +300,11 @@ function OnboardGithub(props: {
|
||||
)
|
||||
}
|
||||
|
||||
if (step === 'pat') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>Paste a GitHub personal access token with access to GitHub Models.</Text>
|
||||
<Text dimColor>Input is masked. Enter to submit; Esc to go back.</Text>
|
||||
<TextInput
|
||||
value={patDraft}
|
||||
mask="*"
|
||||
onChange={setPatDraft}
|
||||
onSubmit={async (value: string) => {
|
||||
const t = value.trim()
|
||||
if (!t) {
|
||||
return
|
||||
}
|
||||
await finalize(t, DEFAULT_MODEL)
|
||||
}}
|
||||
onExit={() => {
|
||||
setStep('menu')
|
||||
setPatDraft('')
|
||||
}}
|
||||
columns={80}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={setCursorOffset}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
const menuOptions = [
|
||||
{
|
||||
label: 'Sign in with browser (device code)',
|
||||
label: 'Sign in with browser',
|
||||
value: 'device' as const,
|
||||
},
|
||||
{
|
||||
label: 'Paste personal access token',
|
||||
value: 'pat' as const,
|
||||
},
|
||||
{
|
||||
label: 'Cancel',
|
||||
value: 'cancel' as const,
|
||||
@@ -291,7 +313,7 @@ function OnboardGithub(props: {
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text bold>GitHub Models setup</Text>
|
||||
<Text bold>GitHub Copilot setup</Text>
|
||||
<Text dimColor>
|
||||
Stores your token in the OS credential store (macOS Keychain when available)
|
||||
and enables CLAUDE_CODE_USE_GITHUB in your user settings - no export
|
||||
@@ -304,10 +326,6 @@ function OnboardGithub(props: {
|
||||
onDone('GitHub onboard cancelled', { display: 'system' })
|
||||
return
|
||||
}
|
||||
if (v === 'pat') {
|
||||
setStep('pat')
|
||||
return
|
||||
}
|
||||
void runDeviceFlow()
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -22,11 +22,14 @@ import {
|
||||
import {
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildMistralProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
createProfileFile,
|
||||
DEFAULT_GEMINI_BASE_URL,
|
||||
DEFAULT_GEMINI_MODEL,
|
||||
DEFAULT_MISTRAL_BASE_URL,
|
||||
DEFAULT_MISTRAL_MODEL,
|
||||
deleteProfileFile,
|
||||
loadProfileFile,
|
||||
maskSecretForDisplay,
|
||||
@@ -74,6 +77,14 @@ type Step =
|
||||
baseUrl: string | null
|
||||
defaultModel: string
|
||||
}
|
||||
| { name: 'mistral-key'; defaultModel: string }
|
||||
| { name: 'mistral-base'; apiKey: string; defaultModel: string }
|
||||
| {
|
||||
name: 'mistral-model'
|
||||
apiKey: string
|
||||
baseUrl: string | null
|
||||
defaultModel: string
|
||||
}
|
||||
| { name: 'gemini-auth-method' }
|
||||
| { name: 'gemini-key' }
|
||||
| { name: 'gemini-access-token' }
|
||||
@@ -116,6 +127,8 @@ type ProviderWizardDefaults = {
|
||||
openAIModel: string
|
||||
openAIBaseUrl: string
|
||||
geminiModel: string
|
||||
mistralModel: string
|
||||
mistralBaseUrl: string
|
||||
}
|
||||
|
||||
function isEnvTruthy(value: string | undefined): boolean {
|
||||
@@ -147,11 +160,19 @@ export function getProviderWizardDefaults(
|
||||
const safeGeminiModel =
|
||||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, processEnv) ||
|
||||
DEFAULT_GEMINI_MODEL
|
||||
const safeMistralModel =
|
||||
sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, processEnv) ||
|
||||
DEFAULT_MISTRAL_MODEL
|
||||
const safeMistralBaseUrl =
|
||||
sanitizeProviderConfigValue(processEnv.MISTRAL_BASE_URL, processEnv) ||
|
||||
DEFAULT_MISTRAL_BASE_URL
|
||||
|
||||
return {
|
||||
openAIModel: safeOpenAIModel,
|
||||
openAIBaseUrl: safeOpenAIBaseUrl,
|
||||
geminiModel: safeGeminiModel,
|
||||
mistralModel: safeMistralModel,
|
||||
mistralBaseUrl: safeMistralBaseUrl,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,6 +199,21 @@ export function buildCurrentProviderSummary(options?: {
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return {
|
||||
providerLabel: 'Mistral',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
processEnv.MISTRAL_MODEL ?? DEFAULT_MISTRAL_MODEL,
|
||||
processEnv
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
processEnv.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL,
|
||||
processEnv
|
||||
),
|
||||
savedProfileLabel,
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return {
|
||||
providerLabel: 'GitHub Models',
|
||||
@@ -259,6 +295,24 @@ function buildSavedProfileSummary(
|
||||
? 'configured'
|
||||
: undefined,
|
||||
}
|
||||
case 'mistral':
|
||||
return {
|
||||
providerLabel: 'Mistral',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
env.MISTRAL_MODEL ?? DEFAULT_MISTRAL_MODEL,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
credentialLabel:
|
||||
maskSecretForDisplay(env.MISTRAL_API_KEY) !== undefined
|
||||
? 'configured'
|
||||
: undefined,
|
||||
}
|
||||
case 'codex':
|
||||
return {
|
||||
providerLabel: 'Codex',
|
||||
@@ -473,6 +527,11 @@ function ProviderChooser({
|
||||
value: 'gemini',
|
||||
description: 'Use Google Gemini with API key, access token, or local ADC',
|
||||
},
|
||||
{
|
||||
label: 'Mistral',
|
||||
value: 'mistral',
|
||||
description: 'Use Mistral with API key'
|
||||
},
|
||||
{
|
||||
label: 'Codex',
|
||||
value: 'codex',
|
||||
@@ -971,6 +1030,11 @@ export function ProviderWizard({
|
||||
})
|
||||
} else if (value === 'gemini') {
|
||||
setStep({ name: 'gemini-auth-method' })
|
||||
} else if (value === 'mistral') {
|
||||
setStep({
|
||||
name: 'mistral-key',
|
||||
defaultModel: defaults.mistralModel,
|
||||
})
|
||||
} else if (value === 'clear') {
|
||||
const filePath = deleteProfileFile()
|
||||
onDone(`Removed saved provider profile at ${filePath}. Restart OpenClaude to go back to normal startup.`, {
|
||||
@@ -1110,6 +1174,101 @@ export function ProviderWizard({
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-key':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 1 of 3"
|
||||
description={
|
||||
process.env.MISTRAL_API_KEY
|
||||
? 'Enter an API key, or leave this blank to reuse the current MISTRAL_API_KEY from this session.'
|
||||
: 'Enter the API key for your Mistral provider.'
|
||||
}
|
||||
initialValue=""
|
||||
placeholder="..."
|
||||
mask="*"
|
||||
allowEmpty={Boolean(process.env.MISTRAL_API_KEY)}
|
||||
validate={value => {
|
||||
const candidate = value.trim() || process.env.MISTRAL_API_KEY || ''
|
||||
return sanitizeApiKey(candidate)
|
||||
? null
|
||||
: 'Enter a real API key. Placeholder values like SUA_CHAVE are not valid.'
|
||||
}}
|
||||
onSubmit={value => {
|
||||
const apiKey = value.trim() || process.env.MISTRAL_API_KEY || ''
|
||||
setStep({
|
||||
name: 'mistral-base',
|
||||
apiKey,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}}
|
||||
onCancel={() => setStep({ name: 'choose' })}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-base':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 2 of 3"
|
||||
description={`Optionally enter a base URL. Leave blank for ${DEFAULT_MISTRAL_BASE_URL}.`}
|
||||
initialValue={
|
||||
defaults.mistralBaseUrl === DEFAULT_MISTRAL_BASE_URL
|
||||
? ''
|
||||
: defaults.mistralBaseUrl
|
||||
}
|
||||
placeholder={DEFAULT_MISTRAL_BASE_URL}
|
||||
allowEmpty
|
||||
onSubmit={value => {
|
||||
setStep({
|
||||
name: 'mistral-model',
|
||||
apiKey: step.apiKey,
|
||||
baseUrl: value.trim() || null,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}}
|
||||
onCancel={() =>
|
||||
setStep({
|
||||
name: 'mistral-key',
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-model':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 3 of 3"
|
||||
description={`Enter a model name. Leave blank for ${step.defaultModel}.`}
|
||||
initialValue={defaults.mistralModel ?? step.defaultModel}
|
||||
placeholder={step.defaultModel}
|
||||
allowEmpty
|
||||
onSubmit={value => {
|
||||
const env = buildMistralProfileEnv({
|
||||
model: value.trim() || step.defaultModel,
|
||||
baseUrl: step.baseUrl,
|
||||
apiKey: step.apiKey,
|
||||
processEnv: process.env,
|
||||
})
|
||||
if (env) {
|
||||
finishProfileSave(onDone, 'mistral', env)
|
||||
}
|
||||
}}
|
||||
onCancel={() =>
|
||||
setStep({
|
||||
name: 'mistral-base',
|
||||
apiKey: step.apiKey,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'gemini-auth-method': {
|
||||
const hasShellGeminiKey = Boolean(
|
||||
process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY,
|
||||
|
||||
@@ -65,7 +65,7 @@ export async function call(onDone: (result?: string) => void, _context: unknown,
|
||||
|
||||
// Get the local settings path and make it relative to cwd
|
||||
const localSettingsPath = getSettingsFilePathForSource('localSettings');
|
||||
const relativePath = localSettingsPath ? relative(getCwdState(), localSettingsPath) : '.claude/settings.local.json';
|
||||
const relativePath = localSettingsPath ? relative(getCwdState(), localSettingsPath) : '.openclaude/settings.local.json';
|
||||
const message = color('success', themeName)(`Added "${cleanPattern}" to excluded commands in ${relativePath}`);
|
||||
onDone(message);
|
||||
return null;
|
||||
|
||||
12
src/commands/wiki/index.ts
Normal file
12
src/commands/wiki/index.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
|
||||
const wiki = {
|
||||
type: 'local-jsx',
|
||||
name: 'wiki',
|
||||
description: 'Initialize and inspect the OpenClaude project wiki',
|
||||
argumentHint: '[init|status]',
|
||||
immediate: true,
|
||||
load: () => import('./wiki.js'),
|
||||
} satisfies Command
|
||||
|
||||
export default wiki
|
||||
123
src/commands/wiki/wiki.tsx
Normal file
123
src/commands/wiki/wiki.tsx
Normal file
@@ -0,0 +1,123 @@
|
||||
import React from 'react'
|
||||
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js'
|
||||
import { ingestLocalWikiSource } from '../../services/wiki/ingest.js'
|
||||
import { initializeWiki } from '../../services/wiki/init.js'
|
||||
import { getWikiStatus } from '../../services/wiki/status.js'
|
||||
import type {
|
||||
LocalJSXCommandCall,
|
||||
LocalJSXCommandOnDone,
|
||||
} from '../../types/command.js'
|
||||
import { getCwd } from '../../utils/cwd.js'
|
||||
|
||||
function renderHelp(): string {
|
||||
return `Usage: /wiki [init|status|ingest <path>]
|
||||
|
||||
Manage the OpenClaude project wiki stored in .openclaude/wiki.
|
||||
|
||||
Commands:
|
||||
/wiki init Initialize the wiki structure in the current project
|
||||
/wiki status Show wiki status and page/source counts
|
||||
/wiki ingest Ingest a local file into wiki sources
|
||||
|
||||
Examples:
|
||||
/wiki init
|
||||
/wiki status
|
||||
/wiki ingest README.md`
|
||||
}
|
||||
|
||||
function formatInitResult(result: Awaited<ReturnType<typeof initializeWiki>>): string {
|
||||
const lines = [`Initialized OpenClaude wiki at ${result.root}`]
|
||||
|
||||
if (result.alreadyExisted) {
|
||||
lines.push('', 'Wiki already existed. No new files were created.')
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
if (result.createdFiles.length > 0) {
|
||||
lines.push('', 'Created files:')
|
||||
for (const file of result.createdFiles) {
|
||||
lines.push(`- ${file}`)
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
function formatStatus(status: Awaited<ReturnType<typeof getWikiStatus>>): string {
|
||||
if (!status.initialized) {
|
||||
return `OpenClaude wiki is not initialized in this project.\n\nRun /wiki init to create ${status.root}.`
|
||||
}
|
||||
|
||||
return [
|
||||
'OpenClaude wiki status',
|
||||
'',
|
||||
`Root: ${status.root}`,
|
||||
`Pages: ${status.pageCount}`,
|
||||
`Sources: ${status.sourceCount}`,
|
||||
`Schema: ${status.hasSchema ? 'present' : 'missing'}`,
|
||||
`Index: ${status.hasIndex ? 'present' : 'missing'}`,
|
||||
`Log: ${status.hasLog ? 'present' : 'missing'}`,
|
||||
`Last updated: ${status.lastUpdatedAt ?? 'unknown'}`,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
function formatIngestResult(
|
||||
result: Awaited<ReturnType<typeof ingestLocalWikiSource>>,
|
||||
): string {
|
||||
return [
|
||||
`Ingested ${result.sourceFile} into the OpenClaude wiki.`,
|
||||
'',
|
||||
`Title: ${result.title}`,
|
||||
`Source note: ${result.sourceNote}`,
|
||||
`Summary: ${result.summary}`,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
async function runWikiCommand(
|
||||
onDone: LocalJSXCommandOnDone,
|
||||
args: string,
|
||||
): Promise<void> {
|
||||
const cwd = getCwd()
|
||||
const normalized = args.trim().toLowerCase()
|
||||
|
||||
if (COMMON_HELP_ARGS.includes(normalized) || COMMON_INFO_ARGS.includes(normalized)) {
|
||||
onDone(renderHelp(), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (!normalized || normalized === 'status') {
|
||||
onDone(formatStatus(await getWikiStatus(cwd)), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (normalized === 'init') {
|
||||
onDone(formatInitResult(await initializeWiki(cwd)), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (normalized.startsWith('ingest')) {
|
||||
const pathArg = args.trim().slice('ingest'.length).trim()
|
||||
if (!pathArg) {
|
||||
onDone('Usage: /wiki ingest <local-file-path>', { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
onDone(formatIngestResult(await ingestLocalWikiSource(cwd, pathArg)), {
|
||||
display: 'system',
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
onDone(`Unknown wiki subcommand: ${args.trim()}\n\n${renderHelp()}`, {
|
||||
display: 'system',
|
||||
})
|
||||
}
|
||||
|
||||
export const call: LocalJSXCommandCall = async (
|
||||
onDone,
|
||||
_context,
|
||||
args,
|
||||
): Promise<React.ReactNode> => {
|
||||
await runWikiCommand(onDone, args ?? '')
|
||||
return null
|
||||
}
|
||||
@@ -188,9 +188,9 @@ export function AutoUpdater({
|
||||
✓ Update installed · Restart to apply
|
||||
</Text>}
|
||||
{(autoUpdaterResult?.status === 'install_failed' || autoUpdaterResult?.status === 'no_permissions') && <Text color="error" wrap="truncate">
|
||||
✗ Auto-update failed · Try <Text bold>claude doctor</Text> or{' '}
|
||||
✗ Auto-update failed · Try <Text bold>openclaude doctor</Text> or{' '}
|
||||
<Text bold>
|
||||
{hasLocalInstall ? `cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}` : `npm i -g ${MACRO.PACKAGE_URL}`}
|
||||
{hasLocalInstall ? `cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}` : `npm i -g ${MACRO.PACKAGE_URL}`}
|
||||
</Text>
|
||||
</Text>}
|
||||
</Box>;
|
||||
|
||||
@@ -31,9 +31,11 @@ export function BaseTextInput(t0) {
|
||||
} = t0;
|
||||
const {
|
||||
onInput,
|
||||
value,
|
||||
renderedValue,
|
||||
cursorLine,
|
||||
cursorColumn
|
||||
cursorColumn,
|
||||
offset,
|
||||
} = inputState;
|
||||
const t1 = Boolean(props.focus && props.showCursor && terminalFocus);
|
||||
let t2;
|
||||
@@ -78,7 +80,7 @@ export function BaseTextInput(t0) {
|
||||
renderedPlaceholder
|
||||
} = renderPlaceholder({
|
||||
placeholder: props.placeholder,
|
||||
value: props.value,
|
||||
value,
|
||||
showCursor: props.showCursor,
|
||||
focus: props.focus,
|
||||
terminalFocus,
|
||||
@@ -88,9 +90,9 @@ export function BaseTextInput(t0) {
|
||||
useInput(wrappedOnInput, {
|
||||
isActive: props.focus
|
||||
});
|
||||
const commandWithoutArgs = props.value && props.value.trim().indexOf(" ") === -1 || props.value && props.value.endsWith(" ");
|
||||
const showArgumentHint = Boolean(props.argumentHint && props.value && commandWithoutArgs && props.value.startsWith("/"));
|
||||
const cursorFiltered = props.showCursor && props.highlights ? props.highlights.filter(h => h.dimColor || props.cursorOffset < h.start || props.cursorOffset >= h.end) : props.highlights;
|
||||
const commandWithoutArgs = value && value.trim().indexOf(" ") === -1 || value && value.endsWith(" ");
|
||||
const showArgumentHint = Boolean(props.argumentHint && value && commandWithoutArgs && value.startsWith("/"));
|
||||
const cursorFiltered = props.showCursor && props.highlights ? props.highlights.filter(h => h.dimColor || offset < h.start || offset >= h.end) : props.highlights;
|
||||
const {
|
||||
viewportCharOffset,
|
||||
viewportCharEnd
|
||||
@@ -102,13 +104,13 @@ export function BaseTextInput(t0) {
|
||||
})) : cursorFiltered;
|
||||
const hasHighlights = filteredHighlights && filteredHighlights.length > 0;
|
||||
if (hasHighlights) {
|
||||
return <Box ref={cursorRef}><HighlightedInput text={renderedValue} highlights={filteredHighlights} />{showArgumentHint && <Text dimColor={true}>{props.value?.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>}{children}</Box>;
|
||||
return <Box ref={cursorRef}><HighlightedInput text={renderedValue} highlights={filteredHighlights} />{showArgumentHint && <Text dimColor={true}>{value.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>}{children}</Box>;
|
||||
}
|
||||
const T0 = Box;
|
||||
const T1 = Text;
|
||||
const t4 = "truncate-end";
|
||||
const t5 = showPlaceholder && props.placeholderElement ? props.placeholderElement : showPlaceholder && renderedPlaceholder ? <Ansi>{renderedPlaceholder}</Ansi> : <Ansi>{renderedValue}</Ansi>;
|
||||
const t6 = showArgumentHint && <Text dimColor={true}>{props.value?.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>;
|
||||
const t6 = showArgumentHint && <Text dimColor={true}>{value.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>;
|
||||
let t7;
|
||||
if ($[4] !== T1 || $[5] !== children || $[6] !== props || $[7] !== t5 || $[8] !== t6) {
|
||||
t7 = <T1 wrap={t4} dimColor={props.dimColor}>{t5}{t6}{children}</T1>;
|
||||
|
||||
@@ -103,7 +103,7 @@ test('login picker shows the third-party platform option', async () => {
|
||||
expect(output).toContain('3rd-party platform')
|
||||
})
|
||||
|
||||
test('third-party provider branch opens the provider wizard', async () => {
|
||||
test('third-party provider branch opens the first-run provider manager', async () => {
|
||||
const output = await renderFrame(
|
||||
<ConsoleOAuthFlow
|
||||
initialStatus={{ state: 'platform_setup' }}
|
||||
@@ -111,7 +111,9 @@ test('third-party provider branch opens the provider wizard', async () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(output).toContain('Set up a provider profile')
|
||||
expect(output).toContain('OpenAI-compatible')
|
||||
expect(output).toContain('Set up provider')
|
||||
expect(output).toContain('Anthropic')
|
||||
expect(output).toContain('OpenAI')
|
||||
expect(output).toContain('Ollama')
|
||||
expect(output).toContain('LM Studio')
|
||||
})
|
||||
|
||||
@@ -12,7 +12,7 @@ import { OAuthService } from '../services/oauth/index.js';
|
||||
import { getOauthAccountInfo, validateForceLoginOrg } from '../utils/auth.js';
|
||||
import { logError } from '../utils/log.js';
|
||||
import { getSettings_DEPRECATED } from '../utils/settings/settings.js';
|
||||
import { ProviderWizard } from '../commands/provider/provider.js';
|
||||
import { ProviderManager } from './ProviderManager.js';
|
||||
import { Select } from './CustomSelect/select.js';
|
||||
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js';
|
||||
import { Spinner } from './Spinner.js';
|
||||
@@ -450,16 +450,17 @@ function OAuthStatusMessage({
|
||||
|
||||
case 'platform_setup':
|
||||
return (
|
||||
<ProviderWizard
|
||||
<ProviderManager
|
||||
mode="first-run"
|
||||
onDone={result => {
|
||||
if (!result) {
|
||||
if (!result || result.action !== 'saved' || !result.message) {
|
||||
setOAuthStatus({ state: 'idle' })
|
||||
return
|
||||
}
|
||||
|
||||
setOAuthStatus({
|
||||
state: 'platform_setup_complete',
|
||||
message: result,
|
||||
message: result.message,
|
||||
})
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -285,7 +285,7 @@ export function Select(t0) {
|
||||
onChange,
|
||||
onCancel,
|
||||
onFocus,
|
||||
focusValue: defaultFocusValue
|
||||
defaultFocusValue,
|
||||
};
|
||||
$[7] = defaultFocusValue;
|
||||
$[8] = defaultValue;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { useCallback, useState } from 'react'
|
||||
import { isDeepStrictEqual } from 'util'
|
||||
import { useRegisterOverlay } from '../../context/overlayContext.js'
|
||||
import type { InputEvent } from '../../ink/events/input-event.js'
|
||||
// eslint-disable-next-line custom-rules/prefer-use-keybindings -- raw space/arrow multiselect input
|
||||
@@ -9,6 +8,7 @@ import {
|
||||
normalizeFullWidthSpace,
|
||||
} from '../../utils/stringUtils.js'
|
||||
import type { OptionWithDescription } from './select.js'
|
||||
import { optionsNavigateEqual } from './use-select-navigation.js'
|
||||
import { useSelectNavigation } from './use-select-navigation.js'
|
||||
|
||||
export type UseMultiSelectStateProps<T> = {
|
||||
@@ -174,7 +174,7 @@ export function useMultiSelectState<T>({
|
||||
// and the deleted ui/useMultiSelectState.ts — without this, MCPServerDesktopImportDialog
|
||||
// keeps colliding servers checked after getAllMcpConfigs() resolves.
|
||||
const [lastOptions, setLastOptions] = useState(options)
|
||||
if (options !== lastOptions && !isDeepStrictEqual(options, lastOptions)) {
|
||||
if (options !== lastOptions && !optionsNavigateEqual(options, lastOptions)) {
|
||||
setSelectedValues(defaultValue)
|
||||
setLastOptions(options)
|
||||
}
|
||||
|
||||
@@ -6,10 +6,34 @@ import {
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react'
|
||||
import { isDeepStrictEqual } from 'util'
|
||||
import OptionMap from './option-map.js'
|
||||
import type { OptionWithDescription } from './select.js'
|
||||
|
||||
/**
|
||||
* Compare two option arrays for structural equality on properties that
|
||||
* affect navigation behavior. ReactNode `label` and function `onChange`
|
||||
* are intentionally excluded — they are identity-unstable (new reference
|
||||
* each render) but don't change navigation semantics.
|
||||
*/
|
||||
export function optionsNavigateEqual<T>(
|
||||
a: OptionWithDescription<T>[],
|
||||
b: OptionWithDescription<T>[],
|
||||
): boolean {
|
||||
if (a.length !== b.length) return false
|
||||
for (let i = 0; i < a.length; i++) {
|
||||
const ao = a[i]!
|
||||
const bo = b[i]!
|
||||
if (
|
||||
ao.value !== bo.value ||
|
||||
ao.disabled !== bo.disabled ||
|
||||
ao.type !== bo.type
|
||||
) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type State<T> = {
|
||||
/**
|
||||
* Map where key is option's value and value is option's index.
|
||||
@@ -524,7 +548,7 @@ export function useSelectNavigation<T>({
|
||||
|
||||
const [lastOptions, setLastOptions] = useState(options)
|
||||
|
||||
if (options !== lastOptions && !isDeepStrictEqual(options, lastOptions)) {
|
||||
if (options !== lastOptions && !optionsNavigateEqual(options, lastOptions)) {
|
||||
dispatch({
|
||||
type: 'reset',
|
||||
state: createDefaultState({
|
||||
|
||||
@@ -35,6 +35,11 @@ export type UseSelectStateProps<T> = {
|
||||
*/
|
||||
onFocus?: (value: T) => void
|
||||
|
||||
/**
|
||||
* Initial value to focus when the component mounts.
|
||||
*/
|
||||
defaultFocusValue?: T
|
||||
|
||||
/**
|
||||
* Value to focus
|
||||
*/
|
||||
@@ -131,6 +136,7 @@ export function useSelectState<T>({
|
||||
onChange,
|
||||
onCancel,
|
||||
onFocus,
|
||||
defaultFocusValue,
|
||||
focusValue,
|
||||
}: UseSelectStateProps<T>): SelectState<T> {
|
||||
const [value, setValue] = useState<T | undefined>(defaultValue)
|
||||
@@ -138,7 +144,7 @@ export function useSelectState<T>({
|
||||
const navigation = useSelectNavigation<T>({
|
||||
visibleOptionCount,
|
||||
options,
|
||||
initialFocusValue: undefined,
|
||||
initialFocusValue: defaultFocusValue,
|
||||
onFocus,
|
||||
focusValue,
|
||||
})
|
||||
|
||||
@@ -112,7 +112,7 @@ export function HelpV2(t0) {
|
||||
}
|
||||
tabs.push(t6);
|
||||
if (false && antOnlyCommands.length > 0) {
|
||||
let t7;
|
||||
let t7;
|
||||
if ($[26] !== antOnlyCommands || $[27] !== close || $[28] !== columns || $[29] !== maxHeight) {
|
||||
t7 = <Tab key="internal-only" title="[internal-only]"><Commands commands={antOnlyCommands} maxHeight={maxHeight} columns={columns} title="Browse internal-only commands:" onCancel={close} /></Tab>;
|
||||
$[26] = antOnlyCommands;
|
||||
|
||||
@@ -252,14 +252,24 @@ function PromptInput({
|
||||
show: false
|
||||
});
|
||||
const [cursorOffset, setCursorOffset] = useState<number>(input.length);
|
||||
// Track the last input value set via internal handlers so we can detect
|
||||
// external input changes (e.g. speech-to-text injection) and move cursor to end.
|
||||
// Track the last input value set via internal handlers so external updates
|
||||
// (for example speech-to-text injection) can still move the cursor to end
|
||||
// without clobbering a pending internal keystroke during render.
|
||||
const lastInternalInputRef = React.useRef(input);
|
||||
if (input !== lastInternalInputRef.current) {
|
||||
// Input changed externally (not through any internal handler) — move cursor to end
|
||||
setCursorOffset(input.length);
|
||||
const lastPropInputRef = React.useRef(input);
|
||||
React.useLayoutEffect(() => {
|
||||
if (input === lastPropInputRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
lastPropInputRef.current = input;
|
||||
if (input === lastInternalInputRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
lastInternalInputRef.current = input;
|
||||
}
|
||||
setCursorOffset(prev => prev === input.length ? prev : input.length);
|
||||
}, [input]);
|
||||
// Wrap onInputChange to track internal changes before they trigger re-render
|
||||
const trackAndSetInput = React.useCallback((value: string) => {
|
||||
lastInternalInputRef.current = value;
|
||||
@@ -2201,7 +2211,7 @@ function PromptInput({
|
||||
multiline: true,
|
||||
onSubmit,
|
||||
onChange,
|
||||
value: historyMatch ? getValueFromInput(typeof historyMatch === 'string' ? historyMatch : historyMatch.display) : input,
|
||||
value: isSearchingHistory && historyMatch ? getValueFromInput(typeof historyMatch === 'string' ? historyMatch : historyMatch.display) : input,
|
||||
// History navigation is handled via TextInput props (onHistoryUp/onHistoryDown),
|
||||
// NOT via useKeybindings. This allows useTextInput's upOrHistoryUp/downOrHistoryDown
|
||||
// to try cursor movement first and only fall through to history navigation when the
|
||||
|
||||
@@ -6,6 +6,7 @@ import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
@@ -106,19 +107,30 @@ function createDeferred<T>(): {
|
||||
return { promise, resolve }
|
||||
}
|
||||
|
||||
function mockProviderProfilesModule(): void {
|
||||
function mockProviderProfilesModule(options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
}): void {
|
||||
mock.module('../utils/providerProfiles.js', () => ({
|
||||
addProviderProfile: () => null,
|
||||
addProviderProfile: options?.addProviderProfile ?? (() => null),
|
||||
applyActiveProviderProfileFromConfig: () => {},
|
||||
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
|
||||
getActiveProviderProfile: () => null,
|
||||
getProviderPresetDefaults: () => ({
|
||||
provider: 'openai',
|
||||
name: 'Mock provider',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'mock-model',
|
||||
apiKey: '',
|
||||
}),
|
||||
getProviderPresetDefaults: (preset: string) =>
|
||||
preset === 'ollama'
|
||||
? {
|
||||
provider: 'openai',
|
||||
name: 'Ollama',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'llama3.1:8b',
|
||||
apiKey: '',
|
||||
}
|
||||
: {
|
||||
provider: 'openai',
|
||||
name: 'Mock provider',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'mock-model',
|
||||
apiKey: '',
|
||||
},
|
||||
getProviderProfiles: () => [],
|
||||
setActiveProviderProfile: () => null,
|
||||
updateProviderProfile: () => null,
|
||||
@@ -128,8 +140,27 @@ function mockProviderProfilesModule(): void {
|
||||
function mockProviderManagerDependencies(
|
||||
syncRead: () => string | undefined,
|
||||
asyncRead: () => Promise<string | undefined>,
|
||||
options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
hasLocalOllama?: () => Promise<boolean>
|
||||
listOllamaModels?: () => Promise<
|
||||
Array<{
|
||||
name: string
|
||||
sizeBytes?: number | null
|
||||
family?: string | null
|
||||
families?: string[]
|
||||
parameterSize?: string | null
|
||||
quantizationLevel?: string | null
|
||||
}>
|
||||
>
|
||||
},
|
||||
): void {
|
||||
mockProviderProfilesModule()
|
||||
mockProviderProfilesModule({ addProviderProfile: options?.addProviderProfile })
|
||||
|
||||
mock.module('../utils/providerDiscovery.js', () => ({
|
||||
hasLocalOllama: options?.hasLocalOllama ?? (async () => false),
|
||||
listOllamaModels: options?.listOllamaModels ?? (async () => []),
|
||||
}))
|
||||
|
||||
mock.module('../utils/githubModelsCredentials.js', () => ({
|
||||
clearGithubModelsToken: () => ({ success: true }),
|
||||
@@ -162,9 +193,14 @@ async function waitForFrameOutput(
|
||||
async function mountProviderManager(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: () => void
|
||||
onDone: (result?: unknown) => void
|
||||
}>,
|
||||
options?: {
|
||||
mode?: 'first-run' | 'manage'
|
||||
onDone?: (result?: unknown) => void
|
||||
},
|
||||
): Promise<{
|
||||
stdin: PassThrough
|
||||
getOutput: () => string
|
||||
dispose: () => Promise<void>
|
||||
}> {
|
||||
@@ -177,14 +213,17 @@ async function mountProviderManager(
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<ProviderManager
|
||||
mode="manage"
|
||||
onDone={() => {}}
|
||||
/>
|
||||
<KeybindingSetup>
|
||||
<ProviderManager
|
||||
mode={options?.mode ?? 'manage'}
|
||||
onDone={options?.onDone ?? (() => {})}
|
||||
/>
|
||||
</KeybindingSetup>
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
return {
|
||||
stdin,
|
||||
getOutput,
|
||||
dispose: async () => {
|
||||
root.unmount()
|
||||
@@ -198,14 +237,17 @@ async function mountProviderManager(
|
||||
async function renderProviderManagerFrame(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: () => void
|
||||
onDone: (result?: unknown) => void
|
||||
}>,
|
||||
options?: {
|
||||
waitForOutput?: (output: string) => boolean
|
||||
timeoutMs?: number
|
||||
mode?: 'first-run' | 'manage'
|
||||
},
|
||||
): Promise<string> {
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: options?.mode,
|
||||
})
|
||||
const output = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => {
|
||||
@@ -263,6 +305,96 @@ test('ProviderManager resolves GitHub virtual provider from async storage withou
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager first-run Ollama preset auto-detects installed models', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const onDone = mock(() => {})
|
||||
const addProviderProfile = mock((payload: {
|
||||
provider: string
|
||||
name: string
|
||||
baseUrl: string
|
||||
model: string
|
||||
apiKey?: string
|
||||
}) => ({
|
||||
id: 'provider_ollama',
|
||||
provider: payload.provider,
|
||||
name: payload.name,
|
||||
baseUrl: payload.baseUrl,
|
||||
model: payload.model,
|
||||
apiKey: payload.apiKey,
|
||||
}))
|
||||
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
addProviderProfile,
|
||||
hasLocalOllama: async () => true,
|
||||
listOllamaModels: async () => [
|
||||
{
|
||||
name: 'gemma4:31b-cloud',
|
||||
family: 'gemma',
|
||||
parameterSize: '31b',
|
||||
},
|
||||
{
|
||||
name: 'kimi-k2.5:cloud',
|
||||
family: 'kimi',
|
||||
parameterSize: '2.5b',
|
||||
},
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: 'first-run',
|
||||
onDone,
|
||||
})
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Set up provider') && frame.includes('Ollama'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(50)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
const modelFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame =>
|
||||
frame.includes('Choose an Ollama model') &&
|
||||
frame.includes('gemma4:31b-cloud') &&
|
||||
frame.includes('kimi-k2.5:cloud'),
|
||||
)
|
||||
|
||||
expect(modelFrame).toContain('Choose an Ollama model')
|
||||
expect(modelFrame).toContain('gemma4:31b-cloud')
|
||||
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => onDone.mock.calls.length > 0)
|
||||
|
||||
expect(addProviderProfile).toHaveBeenCalled()
|
||||
expect(addProviderProfile.mock.calls[0]?.[0]).toMatchObject({
|
||||
name: 'Ollama',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'gemma4:31b-cloud',
|
||||
})
|
||||
expect(onDone).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
action: 'saved',
|
||||
message: 'Provider configured: Ollama',
|
||||
}),
|
||||
)
|
||||
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
|
||||
@@ -3,6 +3,7 @@ import * as React from 'react'
|
||||
import { Box, Text } from '../ink.js'
|
||||
import { useKeybinding } from '../keybindings/useKeybinding.js'
|
||||
import type { ProviderProfile } from '../utils/config.js'
|
||||
import { hasLocalOllama, listOllamaModels } from '../utils/providerDiscovery.js'
|
||||
import {
|
||||
addProviderProfile,
|
||||
applyActiveProviderProfileFromConfig,
|
||||
@@ -15,6 +16,10 @@ import {
|
||||
type ProviderProfileInput,
|
||||
updateProviderProfile,
|
||||
} from '../utils/providerProfiles.js'
|
||||
import {
|
||||
rankOllamaModels,
|
||||
recommendOllamaModel,
|
||||
} from '../utils/providerRecommendation.js'
|
||||
import {
|
||||
clearGithubModelsToken,
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER,
|
||||
@@ -24,7 +29,7 @@ import {
|
||||
} from '../utils/githubModelsCredentials.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||
import { Select } from './CustomSelect/index.js'
|
||||
import { type OptionWithDescription, Select } from './CustomSelect/index.js'
|
||||
import { Pane } from './design-system/Pane.js'
|
||||
import TextInput from './TextInput.js'
|
||||
|
||||
@@ -42,6 +47,7 @@ type Props = {
|
||||
type Screen =
|
||||
| 'menu'
|
||||
| 'select-preset'
|
||||
| 'select-ollama-model'
|
||||
| 'form'
|
||||
| 'select-active'
|
||||
| 'select-edit'
|
||||
@@ -51,6 +57,16 @@ type DraftField = 'name' | 'baseUrl' | 'model' | 'apiKey'
|
||||
|
||||
type ProviderDraft = Record<DraftField, string>
|
||||
|
||||
type OllamaSelectionState =
|
||||
| { state: 'idle' }
|
||||
| { state: 'loading' }
|
||||
| {
|
||||
state: 'ready'
|
||||
options: OptionWithDescription<string>[]
|
||||
defaultValue?: string
|
||||
}
|
||||
| { state: 'unavailable'; message: string }
|
||||
|
||||
const FORM_STEPS: Array<{
|
||||
key: DraftField
|
||||
label: string
|
||||
@@ -210,6 +226,9 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const [statusMessage, setStatusMessage] = React.useState<string | undefined>()
|
||||
const [errorMessage, setErrorMessage] = React.useState<string | undefined>()
|
||||
const [ollamaSelection, setOllamaSelection] = React.useState<OllamaSelectionState>({
|
||||
state: 'idle',
|
||||
})
|
||||
|
||||
const currentStep = FORM_STEPS[formStepIndex] ?? FORM_STEPS[0]
|
||||
const currentStepKey = currentStep.key
|
||||
@@ -364,6 +383,59 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return null
|
||||
}
|
||||
|
||||
React.useEffect(() => {
|
||||
if (screen !== 'select-ollama-model') {
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
|
||||
void (async () => {
|
||||
const available = await hasLocalOllama(draft.baseUrl)
|
||||
if (!available) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Could not reach Ollama. Start Ollama first, or enter the endpoint manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels(draft.baseUrl)
|
||||
if (models.length === 0) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const ranked = rankOllamaModels(models, 'balanced')
|
||||
const recommended = recommendOllamaModel(models, 'balanced')
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'ready',
|
||||
defaultValue: recommended?.name ?? ranked[0]?.name,
|
||||
options: ranked.map(model => ({
|
||||
label: model.name,
|
||||
value: model.name,
|
||||
description: model.summary,
|
||||
})),
|
||||
})
|
||||
}
|
||||
})()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [draft.baseUrl, screen])
|
||||
|
||||
function startCreateFromPreset(preset: ProviderPreset): void {
|
||||
const defaults = getProviderPresetDefaults(preset)
|
||||
const nextDraft = {
|
||||
@@ -378,6 +450,13 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setFormStepIndex(0)
|
||||
setCursorOffset(nextDraft.name.length)
|
||||
setErrorMessage(undefined)
|
||||
|
||||
if (preset === 'ollama') {
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
setScreen('select-ollama-model')
|
||||
return
|
||||
}
|
||||
|
||||
setScreen('form')
|
||||
}
|
||||
|
||||
@@ -397,13 +476,13 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('form')
|
||||
}
|
||||
|
||||
function persistDraft(): void {
|
||||
function persistDraft(nextDraft: ProviderDraft = draft): void {
|
||||
const payload: ProviderProfileInput = {
|
||||
provider: draftProvider,
|
||||
name: draft.name,
|
||||
baseUrl: draft.baseUrl,
|
||||
model: draft.model,
|
||||
apiKey: draft.apiKey,
|
||||
name: nextDraft.name,
|
||||
baseUrl: nextDraft.baseUrl,
|
||||
model: nextDraft.model,
|
||||
apiKey: nextDraft.apiKey,
|
||||
}
|
||||
|
||||
const saved = editingProfileId
|
||||
@@ -446,6 +525,83 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('menu')
|
||||
}
|
||||
|
||||
function renderOllamaSelection(): React.ReactNode {
|
||||
if (ollamaSelection.state === 'loading' || ollamaSelection.state === 'idle') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Checking Ollama
|
||||
</Text>
|
||||
<Text dimColor>Looking for installed Ollama models...</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
if (ollamaSelection.state === 'unavailable') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Ollama setup
|
||||
</Text>
|
||||
<Text dimColor>{ollamaSelection.message}</Text>
|
||||
<Select
|
||||
options={[
|
||||
{
|
||||
value: 'manual',
|
||||
label: 'Enter manually',
|
||||
description: 'Fill in the base URL and model yourself',
|
||||
},
|
||||
{
|
||||
value: 'back',
|
||||
label: 'Back',
|
||||
description: 'Choose another provider preset',
|
||||
},
|
||||
]}
|
||||
onChange={value => {
|
||||
if (value === 'manual') {
|
||||
setFormStepIndex(0)
|
||||
setCursorOffset(draft.name.length)
|
||||
setScreen('form')
|
||||
return
|
||||
}
|
||||
setScreen('select-preset')
|
||||
}}
|
||||
onCancel={() => setScreen('select-preset')}
|
||||
visibleOptionCount={2}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Choose an Ollama model
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
Pick one of the installed Ollama models to save into a local provider
|
||||
profile.
|
||||
</Text>
|
||||
<Select
|
||||
options={ollamaSelection.options}
|
||||
defaultValue={ollamaSelection.defaultValue}
|
||||
defaultFocusValue={ollamaSelection.defaultValue}
|
||||
inlineDescriptions
|
||||
visibleOptionCount={Math.min(8, ollamaSelection.options.length)}
|
||||
onChange={value => {
|
||||
const nextDraft = {
|
||||
...draft,
|
||||
model: value,
|
||||
}
|
||||
setDraft(nextDraft)
|
||||
persistDraft(nextDraft)
|
||||
}}
|
||||
onCancel={() => setScreen('select-preset')}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function handleFormSubmit(value: string): void {
|
||||
const trimmed = value.trim()
|
||||
|
||||
@@ -470,7 +626,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return
|
||||
}
|
||||
|
||||
persistDraft()
|
||||
persistDraft(nextDraft)
|
||||
}
|
||||
|
||||
function handleBackFromForm(): void {
|
||||
@@ -819,13 +975,16 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
|
||||
let content: React.ReactNode
|
||||
|
||||
switch (screen) {
|
||||
case 'select-preset':
|
||||
content = renderPresetSelection()
|
||||
break
|
||||
case 'form':
|
||||
content = renderForm()
|
||||
break
|
||||
switch (screen) {
|
||||
case 'select-preset':
|
||||
content = renderPresetSelection()
|
||||
break
|
||||
case 'select-ollama-model':
|
||||
content = renderOllamaSelection()
|
||||
break
|
||||
case 'form':
|
||||
content = renderForm()
|
||||
break
|
||||
case 'select-active':
|
||||
content = renderProfileSelection(
|
||||
'Set active provider',
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
|
||||
import { isLocalProviderUrl } from '../services/api/providerConfig.js'
|
||||
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
|
||||
import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
|
||||
import { parseUserSpecifiedModel } from '../utils/model/model.js'
|
||||
|
||||
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
|
||||
|
||||
@@ -85,6 +87,7 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
||||
const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true'
|
||||
const useMistral = process.env.CLAUDE_CODE_USE_MISTRAL === '1' || process.env.CLAUDE_CODE_USE_MISTRAL === 'true'
|
||||
|
||||
if (useGemini) {
|
||||
const model = process.env.GEMINI_MODEL || 'gemini-2.0-flash'
|
||||
@@ -92,11 +95,17 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
return { name: 'Google Gemini', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useMistral) {
|
||||
const model = process.env.MISTRAL_MODEL || 'devstral-latest'
|
||||
const baseUrl = process.env.MISTRAL_BASE_URL || 'https://api.mistral.ai/v1'
|
||||
return { name: 'Mistral', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useGithub) {
|
||||
const model = process.env.OPENAI_MODEL || 'github:copilot'
|
||||
const baseUrl =
|
||||
process.env.OPENAI_BASE_URL || 'https://models.github.ai/inference'
|
||||
return { name: 'GitHub Models', model, baseUrl, isLocal: false }
|
||||
process.env.OPENAI_BASE_URL || 'https://api.githubcopilot.com'
|
||||
return { name: 'GitHub Copilot', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useOpenAI) {
|
||||
@@ -139,9 +148,11 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
return { name, model: displayModel, baseUrl, isLocal }
|
||||
}
|
||||
|
||||
// Default: Anthropic
|
||||
const model = process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
|
||||
return { name: 'Anthropic', model, baseUrl: 'https://api.anthropic.com', isLocal: false }
|
||||
// Default: Anthropic - check settings.model first, then env vars
|
||||
const settings = getSettings_DEPRECATED() || {}
|
||||
const modelSetting = settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
|
||||
const resolvedModel = parseUserSpecifiedModel(modelSetting)
|
||||
return { name: 'Anthropic', model: resolvedModel, baseUrl: 'https://api.anthropic.com', isLocal: false }
|
||||
}
|
||||
|
||||
// ─── Box drawing ──────────────────────────────────────────────────────────────
|
||||
|
||||
231
src/components/TextInput.test.tsx
Normal file
231
src/components/TextInput.test.tsx
Normal file
@@ -0,0 +1,231 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { expect, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import TextInput from './TextInput.js'
|
||||
import VimTextInput from './VimTextInput.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
function DelayedControlledTextInput(): React.ReactNode {
|
||||
const [value, setValue] = React.useState('')
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const valueTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
const offsetTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
React.useEffect(() => {
|
||||
return () => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
}
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<AppStateProvider>
|
||||
<TextInput
|
||||
value={value}
|
||||
onChange={nextValue => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
valueTimerRef.current = setTimeout(() => {
|
||||
setValue(nextValue)
|
||||
}, 200)
|
||||
}}
|
||||
onSubmit={() => {}}
|
||||
placeholder="Type here..."
|
||||
columns={60}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={nextOffset => {
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
offsetTimerRef.current = setTimeout(() => {
|
||||
setCursorOffset(nextOffset)
|
||||
}, 200)
|
||||
}}
|
||||
focus
|
||||
showCursor
|
||||
multiline
|
||||
/>
|
||||
</AppStateProvider>
|
||||
)
|
||||
}
|
||||
|
||||
function DelayedControlledVimTextInput(): React.ReactNode {
|
||||
const [value, setValue] = React.useState('')
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const valueTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
const offsetTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
React.useEffect(() => {
|
||||
return () => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
}
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<AppStateProvider>
|
||||
<VimTextInput
|
||||
value={value}
|
||||
onChange={nextValue => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
valueTimerRef.current = setTimeout(() => {
|
||||
setValue(nextValue)
|
||||
}, 200)
|
||||
}}
|
||||
onSubmit={() => {}}
|
||||
placeholder="Type here..."
|
||||
columns={60}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={nextOffset => {
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
offsetTimerRef.current = setTimeout(() => {
|
||||
setCursorOffset(nextOffset)
|
||||
}, 200)
|
||||
}}
|
||||
initialMode="INSERT"
|
||||
focus
|
||||
showCursor
|
||||
multiline
|
||||
/>
|
||||
</AppStateProvider>
|
||||
)
|
||||
}
|
||||
|
||||
test('TextInput renders typed characters before delayed parent value commits', async () => {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(<DelayedControlledTextInput />)
|
||||
|
||||
await Bun.sleep(50)
|
||||
stdin.write('a')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('b')
|
||||
await Bun.sleep(25)
|
||||
|
||||
const output = stripAnsi(extractLastFrame(getOutput()))
|
||||
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
|
||||
expect(output).toContain('ab')
|
||||
expect(output).not.toContain('Type here...')
|
||||
})
|
||||
|
||||
test('VimTextInput preserves rapid typed characters before delayed parent value commits', async () => {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(<DelayedControlledVimTextInput />)
|
||||
|
||||
await Bun.sleep(50)
|
||||
stdin.write('a')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('s')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('d')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('f')
|
||||
await Bun.sleep(25)
|
||||
|
||||
const output = stripAnsi(extractLastFrame(getOutput()))
|
||||
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
|
||||
expect(output).toContain('asdf')
|
||||
expect(output).not.toContain('Type here...')
|
||||
})
|
||||
@@ -1,113 +1,161 @@
|
||||
import { describe, expect, it, mock } from 'bun:test'
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
// We can't fully render ThemePicker due to complex dependencies
|
||||
// But we can test the theme options generation logic
|
||||
describe('ThemePicker', () => {
|
||||
describe('theme options', () => {
|
||||
it('generates correct theme options without AUTO_THEME feature flag', () => {
|
||||
// Since we can't easily mock bun:bundle, test the options structure
|
||||
// The real test would require integration testing
|
||||
const expectedOptions = [
|
||||
{ label: "Dark mode", value: "dark" },
|
||||
{ label: "Light mode", value: "light" },
|
||||
{ label: "Dark mode (colorblind-friendly)", value: "dark-daltonized" },
|
||||
{ label: "Light mode (colorblind-friendly)", value: "light-daltonized" },
|
||||
{ label: "Dark mode (ANSI colors only)", value: "dark-ansi" },
|
||||
{ label: "Light mode (ANSI colors only)", value: "light-ansi" },
|
||||
]
|
||||
expect(expectedOptions.length).toBe(6)
|
||||
})
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
it('includes auto theme when AUTO_THEME feature is enabled', () => {
|
||||
// Test the structure when auto is present
|
||||
const optionsWithAuto = [
|
||||
{ label: "Auto (match terminal)", value: "auto" },
|
||||
{ label: "Dark mode", value: "dark" },
|
||||
]
|
||||
expect(optionsWithAuto[0].value).toBe('auto')
|
||||
})
|
||||
import { createRoot, Text, useTheme } from '../ink.js'
|
||||
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import { ThemeProvider } from './design-system/ThemeProvider.js'
|
||||
|
||||
mock.module('./StructuredDiff.js', () => ({
|
||||
StructuredDiff: function StructuredDiffPreview(): React.ReactNode {
|
||||
const [theme] = useTheme()
|
||||
return <Text>{`Preview theme: ${theme}`}</Text>
|
||||
},
|
||||
}))
|
||||
|
||||
mock.module('./StructuredDiff/colorDiff.js', () => ({
|
||||
getColorModuleUnavailableReason: () => 'env',
|
||||
getSyntaxTheme: () => null,
|
||||
}))
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
describe('handleRowFocus callback', () => {
|
||||
it('setPreviewTheme is called with theme setting', () => {
|
||||
const setPreviewTheme = mock()
|
||||
const handleRowFocus = (setting: string) => setPreviewTheme(setting)
|
||||
|
||||
handleRowFocus('dark')
|
||||
expect(setPreviewTheme).toHaveBeenCalledWith('dark')
|
||||
})
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
timeoutMs = 2000,
|
||||
): Promise<void> {
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for ThemePicker test condition')
|
||||
}
|
||||
|
||||
async function waitForFrame(
|
||||
getOutput: () => string,
|
||||
predicate: (frame: string) => boolean,
|
||||
): Promise<string> {
|
||||
let frame = ''
|
||||
|
||||
await waitForCondition(() => {
|
||||
frame = stripAnsi(extractLastFrame(getOutput()))
|
||||
return predicate(frame)
|
||||
})
|
||||
|
||||
describe('handleSelect callback', () => {
|
||||
it('calls savePreview and onThemeSelect', () => {
|
||||
const savePreview = mock()
|
||||
const onThemeSelect = mock()
|
||||
const handleSelect = (setting: string) => {
|
||||
savePreview()
|
||||
onThemeSelect(setting)
|
||||
}
|
||||
|
||||
handleSelect('light')
|
||||
expect(savePreview).toHaveBeenCalled()
|
||||
expect(onThemeSelect).toHaveBeenCalledWith('light')
|
||||
})
|
||||
})
|
||||
return frame
|
||||
}
|
||||
|
||||
describe('handleCancel callback', () => {
|
||||
it('calls cancelPreview and gracefulShutdown when not skipExitHandling', () => {
|
||||
const cancelPreview = mock()
|
||||
const gracefulShutdown = mock()
|
||||
const handleCancel = (skipExitHandling: boolean, onCancelProp?: () => void) => {
|
||||
cancelPreview()
|
||||
if (skipExitHandling) {
|
||||
onCancelProp?.()
|
||||
} else {
|
||||
gracefulShutdown(0)
|
||||
}
|
||||
}
|
||||
|
||||
handleCancel(false)
|
||||
expect(cancelPreview).toHaveBeenCalled()
|
||||
expect(gracefulShutdown).toHaveBeenCalledWith(0)
|
||||
})
|
||||
|
||||
it('calls onCancelProp when skipExitHandling is true', () => {
|
||||
const cancelPreview = mock()
|
||||
const onCancelProp = mock()
|
||||
const handleCancel = (skipExitHandling: boolean, onCancelProp?: () => void) => {
|
||||
cancelPreview()
|
||||
if (skipExitHandling) {
|
||||
onCancelProp?.()
|
||||
}
|
||||
}
|
||||
|
||||
handleCancel(true, onCancelProp)
|
||||
expect(cancelPreview).toHaveBeenCalled()
|
||||
expect(onCancelProp).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('syntax hint logic', () => {
|
||||
it('shows disabled hint when syntax highlighting is disabled', () => {
|
||||
const syntaxHighlightingDisabled = true
|
||||
const syntaxToggleShortcut = 'Ctrl+T'
|
||||
|
||||
const hint = syntaxHighlightingDisabled
|
||||
? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
|
||||
: `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
|
||||
|
||||
expect(hint).toContain('disabled')
|
||||
})
|
||||
|
||||
it('shows enabled hint when syntax highlighting is active', () => {
|
||||
const syntaxHighlightingDisabled = false
|
||||
const syntaxToggleShortcut = 'Ctrl+T'
|
||||
|
||||
const hint = !syntaxHighlightingDisabled
|
||||
? `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
|
||||
: `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
|
||||
|
||||
expect(hint).toContain('enabled')
|
||||
})
|
||||
})
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
test('updates the preview when keyboard focus moves to another theme', async () => {
|
||||
const { ThemePicker } = await import('./ThemePicker.js')
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<KeybindingSetup>
|
||||
<ThemeProvider initialState="dark">
|
||||
<ThemePicker onThemeSelect={() => {}} />
|
||||
</ThemeProvider>
|
||||
</KeybindingSetup>
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
try {
|
||||
const initialFrame = await waitForFrame(
|
||||
getOutput,
|
||||
frame => frame.includes('Preview theme: dark'),
|
||||
)
|
||||
expect(initialFrame).toContain('Preview theme: dark')
|
||||
|
||||
stdin.write('j')
|
||||
|
||||
const updatedFrame = await waitForFrame(
|
||||
getOutput,
|
||||
frame => frame.includes('Preview theme: light'),
|
||||
)
|
||||
expect(updatedFrame).toContain('Preview theme: light')
|
||||
} finally {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(0)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -2,7 +2,7 @@ import { c as _c } from "react-compiler-runtime";
|
||||
import { feature } from 'bun:bundle';
|
||||
import chalk from 'chalk';
|
||||
import { mkdir } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
import { basename, join } from 'path';
|
||||
import * as React from 'react';
|
||||
import { use, useEffect, useState } from 'react';
|
||||
import { getOriginalCwd } from '../../bootstrap/state.js';
|
||||
@@ -24,6 +24,7 @@ import { projectIsInGitRepo } from '../../utils/memory/versions.js';
|
||||
import { updateSettingsForSource } from '../../utils/settings/settings.js';
|
||||
import { Select } from '../CustomSelect/index.js';
|
||||
import { ListItem } from '../design-system/ListItem.js';
|
||||
import { getProjectMemoryPathForSelector } from './memoryFileSelectorPaths.js';
|
||||
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const teamMemPaths = feature('TEAMMEM') ? require('../../memdir/teamMemPaths.js') as typeof import('../../memdir/teamMemPaths.js') : null;
|
||||
@@ -48,8 +49,10 @@ export function MemoryFileSelector(t0) {
|
||||
onCancel
|
||||
} = t0;
|
||||
const existingMemoryFiles = use(getMemoryFiles());
|
||||
const originalCwd = getOriginalCwd();
|
||||
const userMemoryPath = join(getClaudeConfigHomeDir(), "CLAUDE.md");
|
||||
const projectMemoryPath = join(getOriginalCwd(), "CLAUDE.md");
|
||||
const projectMemoryPath = getProjectMemoryPathForSelector(existingMemoryFiles, originalCwd);
|
||||
const projectMemoryFileName = basename(projectMemoryPath);
|
||||
const hasUserMemory = existingMemoryFiles.some(f => f.path === userMemoryPath);
|
||||
const hasProjectMemory = existingMemoryFiles.some(f_0 => f_0.path === projectMemoryPath);
|
||||
const allMemoryFiles = [...existingMemoryFiles.filter(_temp).map(_temp2), ...(hasUserMemory ? [] : [{
|
||||
@@ -85,12 +88,12 @@ export function MemoryFileSelector(t0) {
|
||||
}
|
||||
}
|
||||
let description;
|
||||
const isGit = projectIsInGitRepo(getOriginalCwd());
|
||||
const isGit = projectIsInGitRepo(originalCwd);
|
||||
if (file.type === "User" && !file.isNested) {
|
||||
description = "Saved in ~/.claude/CLAUDE.md";
|
||||
} else {
|
||||
if (file.type === "Project" && !file.isNested && file.path === projectMemoryPath) {
|
||||
description = `${isGit ? "Checked in at" : "Saved in"} ./CLAUDE.md`;
|
||||
description = `${isGit ? "Checked in at" : "Saved in"} ./${projectMemoryFileName}`;
|
||||
} else {
|
||||
if (file.parent) {
|
||||
description = "@-imported";
|
||||
|
||||
69
src/components/memory/memoryFileSelectorPaths.test.ts
Normal file
69
src/components/memory/memoryFileSelectorPaths.test.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { join } from 'node:path'
|
||||
|
||||
import type { MemoryFileInfo } from '../../utils/claudemd.js'
|
||||
import { getProjectMemoryPathForSelector } from './memoryFileSelectorPaths.js'
|
||||
|
||||
function projectFile(path: string): MemoryFileInfo {
|
||||
return {
|
||||
path,
|
||||
type: 'Project',
|
||||
content: '',
|
||||
}
|
||||
}
|
||||
|
||||
describe('getProjectMemoryPathForSelector', () => {
|
||||
test('uses the loaded repo-level AGENTS.md from a nested cwd', () => {
|
||||
const repoDir = '/repo'
|
||||
const nestedDir = join(repoDir, 'packages', 'app')
|
||||
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[projectFile(join(repoDir, 'AGENTS.md'))],
|
||||
nestedDir,
|
||||
),
|
||||
).toBe(join(repoDir, 'AGENTS.md'))
|
||||
})
|
||||
|
||||
test('uses the loaded repo-level CLAUDE.md fallback from a nested cwd', () => {
|
||||
const repoDir = '/repo'
|
||||
const nestedDir = join(repoDir, 'packages', 'app')
|
||||
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[projectFile(join(repoDir, 'CLAUDE.md'))],
|
||||
nestedDir,
|
||||
),
|
||||
).toBe(join(repoDir, 'CLAUDE.md'))
|
||||
})
|
||||
|
||||
test('prefers the closest loaded ancestor instruction file', () => {
|
||||
const repoDir = '/repo'
|
||||
const nestedProjectDir = join(repoDir, 'packages', 'app')
|
||||
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[
|
||||
projectFile(join(repoDir, 'AGENTS.md')),
|
||||
projectFile(join(nestedProjectDir, 'CLAUDE.md')),
|
||||
],
|
||||
join(nestedProjectDir, 'src'),
|
||||
),
|
||||
).toBe(join(nestedProjectDir, 'CLAUDE.md'))
|
||||
})
|
||||
|
||||
test('defaults to a new AGENTS.md in the current cwd when no project file is loaded', () => {
|
||||
expect(getProjectMemoryPathForSelector([], '/repo/packages/app')).toBe(
|
||||
'/repo/packages/app/AGENTS.md',
|
||||
)
|
||||
})
|
||||
|
||||
test('ignores loaded project instruction files outside the current cwd ancestry', () => {
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[projectFile('/other-worktree/AGENTS.md')],
|
||||
'/repo/packages/app',
|
||||
),
|
||||
).toBe('/repo/packages/app/AGENTS.md')
|
||||
})
|
||||
})
|
||||
34
src/components/memory/memoryFileSelectorPaths.ts
Normal file
34
src/components/memory/memoryFileSelectorPaths.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { basename, join } from 'path'
|
||||
|
||||
import type { MemoryFileInfo } from '../../utils/claudemd.js'
|
||||
import {
|
||||
findProjectInstructionFilePathInAncestors,
|
||||
isProjectInstructionFileName,
|
||||
PRIMARY_PROJECT_INSTRUCTION_FILE,
|
||||
} from '../../utils/projectInstructions.js'
|
||||
|
||||
function isLoadedProjectInstructionFile(file: MemoryFileInfo): boolean {
|
||||
return (
|
||||
file.type === 'Project' &&
|
||||
file.parent === undefined &&
|
||||
isProjectInstructionFileName(basename(file.path))
|
||||
)
|
||||
}
|
||||
|
||||
export function getProjectMemoryPathForSelector(
|
||||
existingMemoryFiles: MemoryFileInfo[],
|
||||
cwd: string,
|
||||
): string {
|
||||
const loadedProjectInstructionPaths = new Set(
|
||||
existingMemoryFiles
|
||||
.filter(isLoadedProjectInstructionFile)
|
||||
.map(file => file.path),
|
||||
)
|
||||
|
||||
return (
|
||||
findProjectInstructionFilePathInAncestors(
|
||||
cwd,
|
||||
path => loadedProjectInstructionPaths.has(path),
|
||||
) ?? join(cwd, PRIMARY_PROJECT_INSTRUCTION_FILE)
|
||||
)
|
||||
}
|
||||
@@ -32,7 +32,7 @@ export function optionForPermissionSaveDestination(saveDestination: EditableSett
|
||||
case 'userSettings':
|
||||
return {
|
||||
label: 'User settings',
|
||||
description: `Saved in at ~/.claude/settings.json`,
|
||||
description: `Saved in ~/.openclaude/settings.json`,
|
||||
value: saveDestination
|
||||
};
|
||||
}
|
||||
|
||||
@@ -33,14 +33,14 @@ export const IMAGE_TARGET_RAW_SIZE = (API_IMAGE_MAX_BASE64_SIZE * 3) / 4 // 3.75
|
||||
*
|
||||
* Note: The API internally resizes images larger than 1568px (source:
|
||||
* encoding/full_encoding.py), but this is handled server-side and doesn't
|
||||
* cause errors. These client-side limits (2000px) are slightly larger to
|
||||
* cause errors. These client-side limits (1568px) are slightly larger to
|
||||
* preserve quality when beneficial.
|
||||
*
|
||||
* The API_IMAGE_MAX_BASE64_SIZE (5MB) is the actual hard limit that causes
|
||||
* API errors if exceeded.
|
||||
*/
|
||||
export const IMAGE_MAX_WIDTH = 2000
|
||||
export const IMAGE_MAX_HEIGHT = 2000
|
||||
export const IMAGE_MAX_WIDTH = 1568
|
||||
export const IMAGE_MAX_HEIGHT = 1568
|
||||
|
||||
// =============================================================================
|
||||
// PDF LIMITS
|
||||
|
||||
@@ -2,8 +2,11 @@ import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { getSystemPrompt, DEFAULT_AGENT_PROMPT } from './prompts.js'
|
||||
import { CLI_SYSPROMPT_PREFIXES, getCLISyspromptPrefix } from './system.js'
|
||||
import { CLAUDE_CODE_GUIDE_AGENT } from '../tools/AgentTool/built-in/claudeCodeGuideAgent.js'
|
||||
import { GENERAL_PURPOSE_AGENT } from '../tools/AgentTool/built-in/generalPurposeAgent.js'
|
||||
import { EXPLORE_AGENT } from '../tools/AgentTool/built-in/exploreAgent.js'
|
||||
import { PLAN_AGENT } from '../tools/AgentTool/built-in/planAgent.js'
|
||||
import { STATUSLINE_SETUP_AGENT } from '../tools/AgentTool/built-in/statuslineSetup.js'
|
||||
|
||||
const originalSimpleEnv = process.env.CLAUDE_CODE_SIMPLE
|
||||
|
||||
@@ -13,10 +16,12 @@ afterEach(() => {
|
||||
|
||||
test('CLI identity prefixes describe OpenClaude instead of Claude Code', () => {
|
||||
expect(getCLISyspromptPrefix()).toContain('OpenClaude')
|
||||
expect(getCLISyspromptPrefix()).not.toContain('Claude Code')
|
||||
expect(getCLISyspromptPrefix()).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
for (const prefix of CLI_SYSPROMPT_PREFIXES) {
|
||||
expect(prefix).toContain('OpenClaude')
|
||||
expect(prefix).not.toContain('Claude Code')
|
||||
expect(prefix).not.toContain("Anthropic's official CLI for Claude")
|
||||
}
|
||||
})
|
||||
@@ -27,22 +32,53 @@ test('simple mode identity describes OpenClaude instead of Claude Code', async (
|
||||
const prompt = await getSystemPrompt([], 'gpt-4o')
|
||||
|
||||
expect(prompt[0]).toContain('OpenClaude')
|
||||
expect(prompt[0]).not.toContain('Claude Code')
|
||||
expect(prompt[0]).not.toContain("Anthropic's official CLI for Claude")
|
||||
})
|
||||
|
||||
test('built-in agent prompts describe OpenClaude instead of Claude Code', () => {
|
||||
expect(DEFAULT_AGENT_PROMPT).toContain('OpenClaude')
|
||||
expect(DEFAULT_AGENT_PROMPT).not.toContain('Claude Code')
|
||||
expect(DEFAULT_AGENT_PROMPT).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const generalPrompt = GENERAL_PURPOSE_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(generalPrompt).toContain('OpenClaude')
|
||||
expect(generalPrompt).not.toContain('Claude Code')
|
||||
expect(generalPrompt).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const explorePrompt = EXPLORE_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(explorePrompt).toContain('OpenClaude')
|
||||
expect(explorePrompt).not.toContain('Claude Code')
|
||||
expect(explorePrompt).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const planPrompt = PLAN_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(planPrompt).toContain('OpenClaude')
|
||||
expect(planPrompt).not.toContain('Claude Code')
|
||||
|
||||
const statuslinePrompt = STATUSLINE_SETUP_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(statuslinePrompt).toContain('OpenClaude')
|
||||
expect(statuslinePrompt).not.toContain('Claude Code')
|
||||
|
||||
const guidePrompt = CLAUDE_CODE_GUIDE_AGENT.getSystemPrompt({
|
||||
toolUseContext: {
|
||||
options: {
|
||||
commands: [],
|
||||
agentDefinitions: { activeAgents: [] },
|
||||
mcpClients: [],
|
||||
} as never,
|
||||
},
|
||||
})
|
||||
expect(guidePrompt).toContain('OpenClaude')
|
||||
expect(guidePrompt).toContain('You are the OpenClaude guide agent.')
|
||||
expect(guidePrompt).toContain('**OpenClaude** (the CLI tool)')
|
||||
expect(guidePrompt).not.toContain('You are the Claude guide agent.')
|
||||
expect(guidePrompt).not.toContain('**Claude Code** (the CLI tool)')
|
||||
})
|
||||
|
||||
@@ -214,7 +214,7 @@ function getSimpleDoingTasksSection(): string {
|
||||
]
|
||||
|
||||
const userHelpSubitems = [
|
||||
`/help: Get help with using Claude Code`,
|
||||
`/help: Get help with using OpenClaude`,
|
||||
`To give feedback, users should ${MACRO.ISSUES_EXPLAINER}`,
|
||||
]
|
||||
|
||||
@@ -242,7 +242,7 @@ function getSimpleDoingTasksSection(): string {
|
||||
: []),
|
||||
...(process.env.USER_TYPE === 'ant'
|
||||
? [
|
||||
`If the user reports a bug, slowness, or unexpected behavior with Claude Code itself (as opposed to asking you to fix their own code), recommend the appropriate slash command: /issue for model-related problems (odd outputs, wrong tool choices, hallucinations, refusals), or /share to upload the full session transcript for product bugs, crashes, slowness, or general issues. Only recommend these when the user is describing a problem with Claude Code.`,
|
||||
`If the user reports a bug, slowness, or unexpected behavior with OpenClaude itself (as opposed to asking you to fix their own code), recommend the appropriate slash command: /issue for model-related problems (odd outputs, wrong tool choices, hallucinations, refusals), or /share to upload the full session transcript for product bugs, crashes, slowness, or general issues. Only recommend these when the user is describing a problem with OpenClaude.`,
|
||||
]
|
||||
: []),
|
||||
`If the user asks for help or wants to give feedback inform them of the following:`,
|
||||
@@ -449,7 +449,7 @@ export async function getSystemPrompt(
|
||||
): Promise<string[]> {
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_SIMPLE)) {
|
||||
return [
|
||||
`You are OpenClaude, an open-source fork of Claude Code.\n\nCWD: ${getCwd()}\nDate: ${getSessionStartDate()}`,
|
||||
`You are OpenClaude, an open-source coding agent and CLI.\n\nCWD: ${getCwd()}\nDate: ${getSessionStartDate()}`,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -696,10 +696,10 @@ export async function computeSimpleEnvInfo(
|
||||
: `The most recent Claude model family is Claude 4.5/4.6. Model IDs — Opus 4.6: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.opus}', Sonnet 4.6: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.sonnet}', Haiku 4.5: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.haiku}'. When building AI applications, default to the latest and most capable Claude models.`,
|
||||
process.env.USER_TYPE === 'ant' && isUndercover()
|
||||
? null
|
||||
: `Claude Code is available as a CLI in the terminal, desktop app (Mac/Windows), web app (claude.ai/code), and IDE extensions (VS Code, JetBrains).`,
|
||||
: `OpenClaude is available as a CLI in the terminal and can be used across local development environments and IDE workflows.`,
|
||||
process.env.USER_TYPE === 'ant' && isUndercover()
|
||||
? null
|
||||
: `Fast mode for Claude Code uses the same ${FRONTIER_MODEL_NAME} model with faster output. It does NOT switch to a different model. It can be toggled with /fast.`,
|
||||
: `Fast mode for OpenClaude uses the same ${FRONTIER_MODEL_NAME} model with faster output. It does NOT switch to a different model. It can be toggled with /fast.`,
|
||||
].filter(item => item !== null)
|
||||
|
||||
return [
|
||||
@@ -755,7 +755,7 @@ export function getUnameSR(): string {
|
||||
return `${osType()} ${osRelease()}`
|
||||
}
|
||||
|
||||
export const DEFAULT_AGENT_PROMPT = `You are an agent for OpenClaude, an open-source fork of Claude Code. Given the user's message, you should use the tools available to complete the task. Complete the task fully—don't gold-plate, but don't leave it half-done. When you complete the task, respond with a concise report covering what was done and any key findings — the caller will relay this to the user, so it only needs the essentials.`
|
||||
export const DEFAULT_AGENT_PROMPT = `You are an agent for OpenClaude, an open-source coding agent and CLI. Given the user's message, you should use the tools available to complete the task. Complete the task fully—don't gold-plate, but don't leave it half-done. When you complete the task, respond with a concise report covering what was done and any key findings — the caller will relay this to the user, so it only needs the essentials.`
|
||||
|
||||
export async function enhanceSystemPromptWithEnvDetails(
|
||||
existingSystemPrompt: string[],
|
||||
|
||||
@@ -8,11 +8,11 @@ import { getAPIProvider } from '../utils/model/providers.js'
|
||||
import { getWorkload } from '../utils/workloadContext.js'
|
||||
|
||||
const DEFAULT_PREFIX =
|
||||
`You are OpenClaude, an open-source fork of Claude Code.`
|
||||
`You are OpenClaude, an open-source coding agent and CLI.`
|
||||
const AGENT_SDK_CLAUDE_CODE_PRESET_PREFIX =
|
||||
`You are OpenClaude, an open-source fork of Claude Code, running within the Claude Agent SDK.`
|
||||
`You are OpenClaude, an open-source coding agent and CLI running within the Claude Agent SDK.`
|
||||
const AGENT_SDK_PREFIX =
|
||||
`You are a Claude agent running in OpenClaude, built on the Claude Agent SDK.`
|
||||
`You are OpenClaude, built on the Claude Agent SDK.`
|
||||
|
||||
const CLI_SYSPROMPT_PREFIX_VALUES = [
|
||||
DEFAULT_PREFIX,
|
||||
|
||||
@@ -181,7 +181,7 @@ function formatCost(cost: number, maxDecimalPlaces: number = 4): string {
|
||||
function formatModelUsage(): string {
|
||||
const modelUsageMap = getModelUsage()
|
||||
if (Object.keys(modelUsageMap).length === 0) {
|
||||
return 'Usage: 0 input, 0 output, 0 cache read, 0 cache write'
|
||||
return 'Usage: 0 input, 0 output'
|
||||
}
|
||||
|
||||
// Accumulate usage by short name
|
||||
@@ -211,15 +211,19 @@ function formatModelUsage(): string {
|
||||
|
||||
let result = 'Usage by model:'
|
||||
for (const [shortName, usage] of Object.entries(usageByShortName)) {
|
||||
const usageString =
|
||||
let usageString =
|
||||
` ${formatNumber(usage.inputTokens)} input, ` +
|
||||
`${formatNumber(usage.outputTokens)} output, ` +
|
||||
`${formatNumber(usage.cacheReadInputTokens)} cache read, ` +
|
||||
`${formatNumber(usage.cacheCreationInputTokens)} cache write` +
|
||||
(usage.webSearchRequests > 0
|
||||
? `, ${formatNumber(usage.webSearchRequests)} web search`
|
||||
: '') +
|
||||
` (${formatCost(usage.costUSD)})`
|
||||
`${formatNumber(usage.outputTokens)} output`
|
||||
if (usage.cacheReadInputTokens > 0) {
|
||||
usageString += `, ${formatNumber(usage.cacheReadInputTokens)} cache read`
|
||||
}
|
||||
if (usage.cacheCreationInputTokens > 0) {
|
||||
usageString += `, ${formatNumber(usage.cacheCreationInputTokens)} cache write`
|
||||
}
|
||||
if (usage.webSearchRequests > 0) {
|
||||
usageString += `, ${formatNumber(usage.webSearchRequests)} web search`
|
||||
}
|
||||
usageString += ` (${formatCost(usage.costUSD)})`
|
||||
result += `\n` + `${shortName}:`.padStart(21) + usageString
|
||||
}
|
||||
return result
|
||||
|
||||
@@ -96,15 +96,16 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
// Enable configs first so we can read settings
|
||||
{
|
||||
const { enableConfigs } = await import('../utils/config.js')
|
||||
enableConfigs()
|
||||
}
|
||||
|
||||
// Apply settings.env from user settings (includes GitHub provider settings from /onboard-github)
|
||||
{
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGeminiAccessTokenFromSecureStorage } = await import('../utils/geminiCredentials.js')
|
||||
hydrateGeminiAccessTokenFromSecureStorage()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
}
|
||||
|
||||
const startupEnv = await buildStartupEnvFromProfile({
|
||||
@@ -121,6 +122,16 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
// Hydrate GitHub credentials after profile is applied so CLAUDE_CODE_USE_GITHUB from profile is available
|
||||
{
|
||||
const {
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
refreshGithubModelsTokenIfNeeded,
|
||||
} = await import('../utils/githubModelsCredentials.js')
|
||||
await refreshGithubModelsTokenIfNeeded()
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
}
|
||||
|
||||
await validateProviderEnvOrExit()
|
||||
|
||||
// Print the gradient startup screen before the Ink UI loads
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useCallback, useEffect } from 'react'
|
||||
import { useCallback, useEffect, useSyncExternalStore } from 'react'
|
||||
import type { Command } from '../commands.js'
|
||||
import { useNotifications } from '../context/notifications.js'
|
||||
import {
|
||||
@@ -7,6 +7,11 @@ import {
|
||||
} from '../services/analytics/index.js'
|
||||
import { reinitializeLspServerManager } from '../services/lsp/manager.js'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import {
|
||||
getPluginCommandsState,
|
||||
setPluginCommandsState,
|
||||
subscribePluginCommands,
|
||||
} from '../state/pluginCommandsStore.js'
|
||||
import type { AgentDefinition } from '../tools/AgentTool/loadAgentsDir.js'
|
||||
import { count } from '../utils/array.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
@@ -39,6 +44,11 @@ export function useManagePlugins({
|
||||
}: {
|
||||
enabled?: boolean
|
||||
} = {}) {
|
||||
const pluginCommands = useSyncExternalStore(
|
||||
subscribePluginCommands,
|
||||
getPluginCommandsState,
|
||||
getPluginCommandsState,
|
||||
)
|
||||
const setAppState = useSetAppState()
|
||||
const needsRefresh = useAppState(s => s.plugins.needsRefresh)
|
||||
const { addNotification } = useNotifications()
|
||||
@@ -74,6 +84,7 @@ export function useManagePlugins({
|
||||
|
||||
try {
|
||||
commands = await getPluginCommands()
|
||||
setPluginCommandsState(commands)
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error)
|
||||
@@ -82,6 +93,7 @@ export function useManagePlugins({
|
||||
source: 'plugin-commands',
|
||||
error: `Failed to load plugin commands: ${errorMessage}`,
|
||||
})
|
||||
setPluginCommandsState([])
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -173,7 +185,7 @@ export function useManagePlugins({
|
||||
...prevState.plugins,
|
||||
enabled,
|
||||
disabled,
|
||||
commands,
|
||||
commands: [],
|
||||
errors: mergedErrors,
|
||||
},
|
||||
}
|
||||
@@ -226,6 +238,7 @@ export function useManagePlugins({
|
||||
logError(errorObj)
|
||||
logForDebugging(`Error loading plugins: ${error}`)
|
||||
// Set empty state on error, but preserve LSP errors and add the new error
|
||||
setPluginCommandsState([])
|
||||
setAppState(prevState => {
|
||||
// Keep existing LSP/non-plugin-loading errors
|
||||
const existingLspErrors = prevState.plugins.errors.filter(
|
||||
@@ -284,6 +297,11 @@ export function useManagePlugins({
|
||||
})
|
||||
}, [initialPluginLoad, enabled])
|
||||
|
||||
useEffect(() => {
|
||||
if (enabled) return
|
||||
setPluginCommandsState([])
|
||||
}, [enabled])
|
||||
|
||||
// Plugin state changed on disk (background reconcile, /plugin menu,
|
||||
// external settings edit). Show a notification; user runs /reload-plugins
|
||||
// to apply. The previous auto-refresh here had a stale-cache bug (only
|
||||
@@ -301,4 +319,6 @@ export function useManagePlugins({
|
||||
// Do NOT auto-refresh. Do NOT reset needsRefresh — /reload-plugins
|
||||
// consumes it via refreshActivePlugins().
|
||||
}, [enabled, needsRefresh, addNotification])
|
||||
|
||||
return enabled ? pluginCommands : []
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useLayoutEffect, useRef, useState } from 'react'
|
||||
import { isInputModeCharacter } from 'src/components/PromptInput/inputModes.js'
|
||||
import { useNotifications } from 'src/context/notifications.js'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
@@ -100,9 +101,74 @@ export function useTextInput({
|
||||
prewarmModifiers()
|
||||
}
|
||||
|
||||
const offset = externalOffset
|
||||
const setOffset = onOffsetChange
|
||||
const cursor = Cursor.fromText(originalValue, columns, offset)
|
||||
// Keep a local text/cursor mirror so consecutive keystrokes can advance
|
||||
// immediately even if the controlled parent value hasn't committed yet.
|
||||
const [renderState, setRenderState] = useState(() => ({
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
}))
|
||||
const liveValueRef = useRef(originalValue)
|
||||
const liveOffsetRef = useRef(externalOffset)
|
||||
const lastSeenPropsRef = useRef({
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
})
|
||||
const updateRenderedInput = (nextValue: string, nextOffset: number): void => {
|
||||
liveValueRef.current = nextValue
|
||||
liveOffsetRef.current = nextOffset
|
||||
setRenderState(prev =>
|
||||
prev.value === nextValue && prev.offset === nextOffset
|
||||
? prev
|
||||
: { value: nextValue, offset: nextOffset },
|
||||
)
|
||||
}
|
||||
useLayoutEffect(() => {
|
||||
if (
|
||||
lastSeenPropsRef.current.value === originalValue &&
|
||||
lastSeenPropsRef.current.offset === externalOffset
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
lastSeenPropsRef.current = {
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
}
|
||||
updateRenderedInput(originalValue, externalOffset)
|
||||
}, [originalValue, externalOffset])
|
||||
|
||||
const value = renderState.value
|
||||
const offset = renderState.offset
|
||||
const getLiveValue = (): string => liveValueRef.current
|
||||
const getLiveCursor = (): Cursor =>
|
||||
Cursor.fromText(liveValueRef.current, columns, liveOffsetRef.current)
|
||||
const setValue = (nextValue: string, nextOffset = liveOffsetRef.current): void => {
|
||||
const previousValue = liveValueRef.current
|
||||
const previousOffset = liveOffsetRef.current
|
||||
|
||||
if (previousValue === nextValue && previousOffset === nextOffset) {
|
||||
return
|
||||
}
|
||||
|
||||
updateRenderedInput(nextValue, nextOffset)
|
||||
|
||||
if (previousValue !== nextValue) {
|
||||
onChange(nextValue)
|
||||
}
|
||||
|
||||
if (previousOffset !== nextOffset) {
|
||||
onOffsetChange(nextOffset)
|
||||
}
|
||||
}
|
||||
const setOffset = (nextOffset: number): void => {
|
||||
if (nextOffset === liveOffsetRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
updateRenderedInput(liveValueRef.current, nextOffset)
|
||||
onOffsetChange(nextOffset)
|
||||
}
|
||||
const cursor = Cursor.fromText(value, columns, offset)
|
||||
const { addNotification, removeNotification } = useNotifications()
|
||||
|
||||
const handleCtrlC = useDoublePress(
|
||||
@@ -111,9 +177,11 @@ export function useTextInput({
|
||||
},
|
||||
() => onExit?.(),
|
||||
() => {
|
||||
if (originalValue) {
|
||||
const currentValue = getLiveValue()
|
||||
if (currentValue) {
|
||||
updateRenderedInput('', 0)
|
||||
onChange('')
|
||||
setOffset(0)
|
||||
onOffsetChange(0)
|
||||
onHistoryReset?.()
|
||||
}
|
||||
},
|
||||
@@ -125,7 +193,8 @@ export function useTextInput({
|
||||
// not dialog dismissal, and needs the double-press safety mechanism.
|
||||
const handleEscape = useDoublePress(
|
||||
(show: boolean) => {
|
||||
if (!originalValue || !show) {
|
||||
const currentValue = getLiveValue()
|
||||
if (!currentValue || !show) {
|
||||
return
|
||||
}
|
||||
addNotification({
|
||||
@@ -136,17 +205,19 @@ export function useTextInput({
|
||||
})
|
||||
},
|
||||
() => {
|
||||
const currentValue = getLiveValue()
|
||||
// Remove the "Esc again to clear" notification immediately
|
||||
removeNotification('escape-again-to-clear')
|
||||
onClearInput?.()
|
||||
if (originalValue) {
|
||||
if (currentValue) {
|
||||
// Track double-escape usage for feature discovery
|
||||
// Save to history before clearing
|
||||
if (originalValue.trim() !== '') {
|
||||
addToHistory(originalValue)
|
||||
if (currentValue.trim() !== '') {
|
||||
addToHistory(currentValue)
|
||||
}
|
||||
updateRenderedInput('', 0)
|
||||
onChange('')
|
||||
setOffset(0)
|
||||
onOffsetChange(0)
|
||||
onHistoryReset?.()
|
||||
}
|
||||
},
|
||||
@@ -154,13 +225,13 @@ export function useTextInput({
|
||||
|
||||
const handleEmptyCtrlD = useDoublePress(
|
||||
show => {
|
||||
if (originalValue !== '') {
|
||||
if (getLiveValue() !== '') {
|
||||
return
|
||||
}
|
||||
onExitMessage?.(show, 'Ctrl-D')
|
||||
},
|
||||
() => {
|
||||
if (originalValue !== '') {
|
||||
if (getLiveValue() !== '') {
|
||||
return
|
||||
}
|
||||
onExit?.()
|
||||
@@ -168,6 +239,7 @@ export function useTextInput({
|
||||
)
|
||||
|
||||
function handleCtrlD(): MaybeCursor {
|
||||
const cursor = getLiveCursor()
|
||||
if (cursor.text === '') {
|
||||
// When input is empty, handle double-press
|
||||
handleEmptyCtrlD()
|
||||
@@ -178,24 +250,28 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function killToLineEnd(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineEnd()
|
||||
pushToKillRing(killed, 'append')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function killToLineStart(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineStart()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function killWordBefore(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteWordBefore()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function yank(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const text = getLastKill()
|
||||
if (text.length > 0) {
|
||||
const startOffset = cursor.offset
|
||||
@@ -207,6 +283,7 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function handleYankPop(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const popResult = yankPop()
|
||||
if (!popResult) {
|
||||
return cursor
|
||||
@@ -222,13 +299,16 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
const handleCtrl = mapInput([
|
||||
['a', () => cursor.startOfLine()],
|
||||
['b', () => cursor.left()],
|
||||
['a', () => getLiveCursor().startOfLine()],
|
||||
['b', () => getLiveCursor().left()],
|
||||
['c', handleCtrlC],
|
||||
['d', handleCtrlD],
|
||||
['e', () => cursor.endOfLine()],
|
||||
['f', () => cursor.right()],
|
||||
['h', () => cursor.deleteTokenBefore() ?? cursor.backspace()],
|
||||
['e', () => getLiveCursor().endOfLine()],
|
||||
['f', () => getLiveCursor().right()],
|
||||
['h', () => {
|
||||
const cursor = getLiveCursor()
|
||||
return cursor.deleteTokenBefore() ?? cursor.backspace()
|
||||
}],
|
||||
['k', killToLineEnd],
|
||||
['n', () => downOrHistoryDown()],
|
||||
['p', () => upOrHistoryUp()],
|
||||
@@ -238,13 +318,15 @@ export function useTextInput({
|
||||
])
|
||||
|
||||
const handleMeta = mapInput([
|
||||
['b', () => cursor.prevWord()],
|
||||
['f', () => cursor.nextWord()],
|
||||
['d', () => cursor.deleteWordAfter()],
|
||||
['b', () => getLiveCursor().prevWord()],
|
||||
['f', () => getLiveCursor().nextWord()],
|
||||
['d', () => getLiveCursor().deleteWordAfter()],
|
||||
['y', handleYankPop],
|
||||
])
|
||||
|
||||
function handleEnter(key: Key) {
|
||||
const cursor = getLiveCursor()
|
||||
const currentValue = getLiveValue()
|
||||
if (
|
||||
multiline &&
|
||||
cursor.offset > 0 &&
|
||||
@@ -263,10 +345,11 @@ export function useTextInput({
|
||||
if (env.terminal === 'Apple_Terminal' && isModifierPressed('shift')) {
|
||||
return cursor.insert('\n')
|
||||
}
|
||||
onSubmit?.(originalValue)
|
||||
onSubmit?.(currentValue)
|
||||
}
|
||||
|
||||
function upOrHistoryUp() {
|
||||
const cursor = getLiveCursor()
|
||||
if (disableCursorMovementForUpDownKeys) {
|
||||
onHistoryUp?.()
|
||||
return cursor
|
||||
@@ -291,6 +374,7 @@ export function useTextInput({
|
||||
return cursor
|
||||
}
|
||||
function downOrHistoryDown() {
|
||||
const cursor = getLiveCursor()
|
||||
if (disableCursorMovementForUpDownKeys) {
|
||||
onHistoryDown?.()
|
||||
return cursor
|
||||
@@ -315,7 +399,7 @@ export function useTextInput({
|
||||
return cursor
|
||||
}
|
||||
|
||||
function mapKey(key: Key): InputMapper {
|
||||
function mapKey(key: Key, cursor: Cursor): InputMapper {
|
||||
switch (true) {
|
||||
case key.escape:
|
||||
return () => {
|
||||
@@ -429,6 +513,7 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function onInput(input: string, key: Key): void {
|
||||
const currentCursor = getLiveCursor()
|
||||
// Note: Image paste shortcut (chat:imagePaste) is handled via useKeybindings in PromptInput
|
||||
|
||||
// Apply filter if provided
|
||||
@@ -446,18 +531,15 @@ export function useTextInput({
|
||||
|
||||
// Apply all DEL characters as backspace operations synchronously
|
||||
// Try to delete tokens first, fall back to character backspace
|
||||
let currentCursor = cursor
|
||||
let nextCursor = currentCursor
|
||||
for (let i = 0; i < delCount; i++) {
|
||||
currentCursor =
|
||||
currentCursor.deleteTokenBefore() ?? currentCursor.backspace()
|
||||
nextCursor =
|
||||
nextCursor.deleteTokenBefore() ?? nextCursor.backspace()
|
||||
}
|
||||
|
||||
// Update state once with the final result
|
||||
if (!cursor.equals(currentCursor)) {
|
||||
if (cursor.text !== currentCursor.text) {
|
||||
onChange(currentCursor.text)
|
||||
}
|
||||
setOffset(currentCursor.offset)
|
||||
if (!currentCursor.equals(nextCursor)) {
|
||||
setValue(nextCursor.text, nextCursor.offset)
|
||||
}
|
||||
resetKillAccumulation()
|
||||
resetYankState()
|
||||
@@ -474,13 +556,10 @@ export function useTextInput({
|
||||
resetYankState()
|
||||
}
|
||||
|
||||
const nextCursor = mapKey(key)(filteredInput)
|
||||
const nextCursor = mapKey(key, currentCursor)(filteredInput)
|
||||
if (nextCursor) {
|
||||
if (!cursor.equals(nextCursor)) {
|
||||
if (cursor.text !== nextCursor.text) {
|
||||
onChange(nextCursor.text)
|
||||
}
|
||||
setOffset(nextCursor.offset)
|
||||
if (!currentCursor.equals(nextCursor)) {
|
||||
setValue(nextCursor.text, nextCursor.offset)
|
||||
}
|
||||
// SSH-coalesced Enter: on slow links, "o" + Enter can arrive as one
|
||||
// chunk "o\r". parseKeypress only matches s === '\r', so it hit the
|
||||
@@ -512,6 +591,7 @@ export function useTextInput({
|
||||
|
||||
return {
|
||||
onInput,
|
||||
value,
|
||||
renderedValue: cursor.render(
|
||||
cursorChar,
|
||||
mask,
|
||||
@@ -520,6 +600,7 @@ export function useTextInput({
|
||||
maxVisibleLines,
|
||||
),
|
||||
offset,
|
||||
setValue,
|
||||
setOffset,
|
||||
cursorLine: cursorPos.line - cursor.getViewportStartLine(maxVisibleLines),
|
||||
cursorColumn: cursorPos.column,
|
||||
|
||||
@@ -70,14 +70,14 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
// Vim behavior: move cursor left by 1 when exiting insert mode
|
||||
// (unless at beginning of line or at offset 0)
|
||||
const offset = textInput.offset
|
||||
if (offset > 0 && props.value[offset - 1] !== '\n') {
|
||||
if (offset > 0 && textInput.value[offset - 1] !== '\n') {
|
||||
textInput.setOffset(offset - 1)
|
||||
}
|
||||
|
||||
vimStateRef.current = { mode: 'NORMAL', command: { type: 'idle' } }
|
||||
setMode('NORMAL')
|
||||
onModeChange?.('NORMAL')
|
||||
}, [onModeChange, textInput, props.value])
|
||||
}, [onModeChange, textInput])
|
||||
|
||||
function createOperatorContext(
|
||||
cursor: Cursor,
|
||||
@@ -85,8 +85,8 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
): OperatorContext {
|
||||
return {
|
||||
cursor,
|
||||
text: props.value,
|
||||
setText: (newText: string) => props.onChange(newText),
|
||||
text: textInput.value,
|
||||
setText: (newText: string) => textInput.setValue(newText),
|
||||
setOffset: (offset: number) => textInput.setOffset(offset),
|
||||
enterInsert: (offset: number) => switchToInsertMode(offset),
|
||||
getRegister: () => persistentRef.current.register,
|
||||
@@ -110,15 +110,18 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
const change = persistentRef.current.lastChange
|
||||
if (!change) return
|
||||
|
||||
const cursor = Cursor.fromText(props.value, props.columns, textInput.offset)
|
||||
const cursor = Cursor.fromText(
|
||||
textInput.value,
|
||||
props.columns,
|
||||
textInput.offset,
|
||||
)
|
||||
const ctx = createOperatorContext(cursor, true)
|
||||
|
||||
switch (change.type) {
|
||||
case 'insert':
|
||||
if (change.text) {
|
||||
const newCursor = cursor.insert(change.text)
|
||||
props.onChange(newCursor.text)
|
||||
textInput.setOffset(newCursor.offset)
|
||||
textInput.setValue(newCursor.text, newCursor.offset)
|
||||
}
|
||||
break
|
||||
|
||||
@@ -179,7 +182,11 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
// lookups expect single chars and a prepended space would break them.
|
||||
const filtered = inputFilter ? inputFilter(rawInput, key) : rawInput
|
||||
const input = state.mode === 'INSERT' ? filtered : rawInput
|
||||
const cursor = Cursor.fromText(props.value, props.columns, textInput.offset)
|
||||
const cursor = Cursor.fromText(
|
||||
textInput.value,
|
||||
props.columns,
|
||||
textInput.offset,
|
||||
)
|
||||
|
||||
if (key.ctrl) {
|
||||
textInput.onInput(input, key)
|
||||
|
||||
@@ -115,7 +115,10 @@ export default class App extends PureComponent<Props, State> {
|
||||
keyParseState = INITIAL_STATE;
|
||||
// Timer for flushing incomplete escape sequences
|
||||
incompleteEscapeTimer: NodeJS.Timeout | null = null;
|
||||
stdinMode: 'readable' | 'data' = process.env.OPENCLAUDE_USE_READABLE_STDIN === '1' ? 'readable' : 'data';
|
||||
// Default to readable-mode stdin (legacy Ink behavior). The data-mode path
|
||||
// is kept as an explicit opt-in because some terminals can enter a state
|
||||
// where startup input appears frozen when data mode is the default.
|
||||
stdinMode: 'readable' | 'data' = process.env.OPENCLAUDE_USE_DATA_STDIN === '1' || process.env.OPENCLAUDE_USE_READABLE_STDIN === '0' ? 'data' : 'readable';
|
||||
// Timeout durations for incomplete sequences (ms)
|
||||
readonly NORMAL_TIMEOUT = 50; // Short timeout for regular esc sequences
|
||||
readonly PASTE_TIMEOUT = 500; // Longer timeout for paste operations
|
||||
|
||||
@@ -33,7 +33,7 @@ import createRenderer, { type Renderer } from './renderer.js';
|
||||
import { CellWidth, CharPool, cellAt, createScreen, HyperlinkPool, isEmptyCellAt, migrateScreenPools, StylePool } from './screen.js';
|
||||
import { applySearchHighlight } from './searchHighlight.js';
|
||||
import { applySelectionOverlay, captureScrolledRows, clearSelection, createSelectionState, extendSelection, type FocusMove, findPlainTextUrlAt, getSelectedText, hasSelection, moveFocus, type SelectionState, selectLineAt, selectWordAt, shiftAnchor, shiftSelection, shiftSelectionForFollow, startSelection, updateSelection } from './selection.js';
|
||||
import { SYNC_OUTPUT_SUPPORTED, supportsExtendedKeys, type Terminal, writeDiffToTerminal } from './terminal.js';
|
||||
import { shouldSkipMainScreenSyncMarkers, shouldUseMainScreenRewrite, SYNC_OUTPUT_SUPPORTED, supportsExtendedKeys, type Terminal, writeDiffToTerminal } from './terminal.js';
|
||||
import { CURSOR_HOME, cursorMove, cursorPosition, DISABLE_KITTY_KEYBOARD, DISABLE_MODIFY_OTHER_KEYS, ENABLE_KITTY_KEYBOARD, ENABLE_MODIFY_OTHER_KEYS, ERASE_SCREEN } from './termio/csi.js';
|
||||
import { DBP, DFE, DISABLE_MOUSE_TRACKING, ENABLE_MOUSE_TRACKING, ENTER_ALT_SCREEN, EXIT_ALT_SCREEN, SHOW_CURSOR } from './termio/dec.js';
|
||||
import { CLEAR_ITERM2_PROGRESS, CLEAR_TAB_STATUS, setClipboard, supportsTabStatus, wrapForMultiplexer } from './termio/osc.js';
|
||||
@@ -609,12 +609,13 @@ export default class Ink {
|
||||
};
|
||||
}
|
||||
const tDiff = performance.now();
|
||||
const rewriteMainScreen = !this.altScreenActive && shouldUseMainScreenRewrite();
|
||||
const diff = this.log.render(prevFrame, frame, this.altScreenActive,
|
||||
// DECSTBM needs BSU/ESU atomicity — without it the outer terminal
|
||||
// renders the scrolled-but-not-yet-repainted intermediate state.
|
||||
// tmux is the main case (re-emits DECSTBM with its own timing and
|
||||
// doesn't implement DEC 2026, so SYNC_OUTPUT_SUPPORTED is false).
|
||||
SYNC_OUTPUT_SUPPORTED);
|
||||
SYNC_OUTPUT_SUPPORTED, rewriteMainScreen);
|
||||
const diffMs = performance.now() - tDiff;
|
||||
// Swap buffers
|
||||
this.backFrame = this.frontFrame;
|
||||
@@ -759,7 +760,8 @@ export default class Ink {
|
||||
}
|
||||
}
|
||||
const tWrite = performance.now();
|
||||
writeDiffToTerminal(this.terminal, optimized, this.altScreenActive && !SYNC_OUTPUT_SUPPORTED);
|
||||
const skipSyncMarkers = this.altScreenActive ? !SYNC_OUTPUT_SUPPORTED : rewriteMainScreen || shouldSkipMainScreenSyncMarkers();
|
||||
writeDiffToTerminal(this.terminal, optimized, skipSyncMarkers);
|
||||
const writeMs = performance.now() - tWrite;
|
||||
|
||||
// Update blit safety for the NEXT frame. The frame just rendered
|
||||
|
||||
125
src/ink/log-update.test.ts
Normal file
125
src/ink/log-update.test.ts
Normal file
@@ -0,0 +1,125 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
|
||||
import type { Frame } from './frame.ts'
|
||||
import { LogUpdate } from './log-update.ts'
|
||||
import {
|
||||
CellWidth,
|
||||
CharPool,
|
||||
createScreen,
|
||||
HyperlinkPool,
|
||||
setCellAt,
|
||||
StylePool,
|
||||
} from './screen.ts'
|
||||
|
||||
function collectStdout(diff: ReturnType<LogUpdate['render']>): string {
|
||||
return diff
|
||||
.filter((patch): patch is Extract<(typeof diff)[number], { type: 'stdout' }> => patch.type === 'stdout')
|
||||
.map(patch => patch.content)
|
||||
.join('')
|
||||
}
|
||||
|
||||
function createHarness() {
|
||||
const stylePool = new StylePool()
|
||||
const charPool = new CharPool()
|
||||
const hyperlinkPool = new HyperlinkPool()
|
||||
|
||||
return {
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
log: new LogUpdate({ isTTY: true, stylePool }),
|
||||
}
|
||||
}
|
||||
|
||||
function frameFromLines(
|
||||
stylePool: StylePool,
|
||||
charPool: CharPool,
|
||||
hyperlinkPool: HyperlinkPool,
|
||||
lines: string[],
|
||||
cursor = { x: 0, y: lines.length, visible: true },
|
||||
): Frame {
|
||||
const width = lines.reduce((max, line) => Math.max(max, line.length), 0)
|
||||
const screen = createScreen(width, lines.length, stylePool, charPool, hyperlinkPool)
|
||||
|
||||
for (const [y, line] of lines.entries()) {
|
||||
for (const [x, char] of [...line].entries()) {
|
||||
setCellAt(screen, x, y, {
|
||||
char,
|
||||
styleId: stylePool.none,
|
||||
width: CellWidth.Narrow,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
screen,
|
||||
viewport: {
|
||||
width: Math.max(width, 1),
|
||||
height: 10,
|
||||
},
|
||||
cursor,
|
||||
}
|
||||
}
|
||||
|
||||
test('ghostty main-screen rewrite paints prompt content without full terminal reset when width is stable', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(stylePool, charPool, hyperlinkPool, [' '])
|
||||
const next = frameFromLines(stylePool, charPool, hyperlinkPool, ['prompt'])
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clearTerminal')).toBe(false)
|
||||
expect(diff.some(patch => patch.type === 'clear' && patch.count === 1)).toBe(
|
||||
true,
|
||||
)
|
||||
expect(stdout).toContain('prompt')
|
||||
})
|
||||
|
||||
test('ghostty main-screen rewrite clears only the changed prompt tail before repainting', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['status', '> abc'],
|
||||
)
|
||||
const next = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['status', '> abcd'],
|
||||
)
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clearTerminal')).toBe(false)
|
||||
expect(diff.some(patch => patch.type === 'clear' && patch.count === 1)).toBe(
|
||||
true,
|
||||
)
|
||||
expect(stdout).toContain('abcd')
|
||||
})
|
||||
|
||||
test('ghostty main-screen rewrite falls back to incremental diff for larger changes', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['row 0', 'row 1', 'row 2', 'row 3', 'row 4', '> abc'],
|
||||
)
|
||||
const next = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['row 0 updated', 'row 1', 'row 2', 'row 3', 'row 4', '> abcd'],
|
||||
)
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clear')).toBe(false)
|
||||
expect(stdout).toContain('updated')
|
||||
expect(stdout).toContain('abcd')
|
||||
})
|
||||
@@ -125,6 +125,7 @@ export class LogUpdate {
|
||||
next: Frame,
|
||||
altScreen = false,
|
||||
decstbmSafe = true,
|
||||
rewriteMainScreen = false,
|
||||
): Diff {
|
||||
if (!this.options.isTTY) {
|
||||
return this.renderFullFrame(next)
|
||||
@@ -146,6 +147,13 @@ export class LogUpdate {
|
||||
return fullResetSequence_CAUSES_FLICKER(next, 'resize', stylePool)
|
||||
}
|
||||
|
||||
if (!altScreen && rewriteMainScreen) {
|
||||
const rewriteStartY = findMainScreenRewriteStart(prev.screen, next.screen)
|
||||
if (rewriteStartY !== null) {
|
||||
return rewriteMainScreenFrame(prev, next, stylePool, rewriteStartY)
|
||||
}
|
||||
}
|
||||
|
||||
// DECSTBM scroll optimization: when a ScrollBox's scrollTop changed,
|
||||
// shift content with a hardware scroll (CSI top;bot r + CSI n S/T)
|
||||
// instead of rewriting the whole scroll region. The shiftRows on
|
||||
@@ -420,34 +428,8 @@ export class LogUpdate {
|
||||
// Main screen: if cursor needs to be past the last line of content
|
||||
// (typical: cursor.y = screen.height), emit \n to create that line
|
||||
// since cursor movement can't create new lines.
|
||||
if (altScreen) {
|
||||
// no-op; next frame's CSI H anchors cursor
|
||||
} else if (next.cursor.y >= next.screen.height) {
|
||||
// Move to column 0 of current line, then emit newlines to reach target row
|
||||
screen.txn(prev => {
|
||||
const rowsToCreate = next.cursor.y - prev.y
|
||||
if (rowsToCreate > 0) {
|
||||
// Use CR to resolve pending wrap (if any) without advancing
|
||||
// to the next line, then LF to create each new row.
|
||||
const patches: Diff = new Array<Diff[number]>(1 + rowsToCreate)
|
||||
patches[0] = CARRIAGE_RETURN
|
||||
for (let i = 0; i < rowsToCreate; i++) {
|
||||
patches[1 + i] = NEWLINE
|
||||
}
|
||||
return [patches, { dx: -prev.x, dy: rowsToCreate }]
|
||||
}
|
||||
// At or past target row - need to move cursor to correct position
|
||||
const dy = next.cursor.y - prev.y
|
||||
if (dy !== 0 || prev.x !== next.cursor.x) {
|
||||
// Use CR to clear pending wrap (if any), then cursor move
|
||||
const patches: Diff = [CARRIAGE_RETURN]
|
||||
patches.push({ type: 'cursorMove', x: next.cursor.x, y: dy })
|
||||
return [patches, { dx: next.cursor.x - prev.x, dy }]
|
||||
}
|
||||
return [[], { dx: 0, dy: 0 }]
|
||||
})
|
||||
} else {
|
||||
moveCursorTo(screen, next.cursor.x, next.cursor.y)
|
||||
if (!altScreen) {
|
||||
restoreMainScreenCursor(screen, next)
|
||||
}
|
||||
|
||||
const elapsed = performance.now() - startTime
|
||||
@@ -467,6 +449,77 @@ export class LogUpdate {
|
||||
}
|
||||
}
|
||||
|
||||
function rewriteMainScreenFrame(
|
||||
prev: Frame,
|
||||
next: Frame,
|
||||
stylePool: StylePool,
|
||||
startY: number,
|
||||
): Diff {
|
||||
const diff: Diff = []
|
||||
const clearCount = prev.screen.height - startY
|
||||
|
||||
if (clearCount > 0) {
|
||||
const clearStartY = prev.screen.height - 1
|
||||
const clearCursor = new VirtualScreen(prev.cursor, next.viewport.width)
|
||||
moveCursorTo(clearCursor, 0, clearStartY)
|
||||
diff.push(...clearCursor.diff)
|
||||
diff.push({ type: 'clear', count: clearCount })
|
||||
}
|
||||
|
||||
const screen = new VirtualScreen(
|
||||
clearCount > 0 ? { x: 0, y: startY } : prev.cursor,
|
||||
next.viewport.width,
|
||||
)
|
||||
renderFrameSlice(screen, next, startY, next.screen.height, stylePool)
|
||||
restoreMainScreenCursor(screen, next)
|
||||
|
||||
return [...diff, ...screen.diff]
|
||||
}
|
||||
|
||||
const MAX_MAIN_SCREEN_REWRITE_ROWS = 6
|
||||
|
||||
function findMainScreenRewriteStart(prev: Screen, next: Screen): number | null {
|
||||
const commonHeight = Math.min(prev.height, next.height)
|
||||
let firstChangedY = commonHeight
|
||||
|
||||
for (let y = 0; y < commonHeight; y += 1) {
|
||||
if (!rowsEqual(prev, next, y)) {
|
||||
firstChangedY = y
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
const rewriteRows = Math.max(prev.height, next.height) - firstChangedY
|
||||
if (rewriteRows <= 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return rewriteRows <= MAX_MAIN_SCREEN_REWRITE_ROWS ? firstChangedY : null
|
||||
}
|
||||
|
||||
function rowsEqual(prev: Screen, next: Screen, y: number): boolean {
|
||||
if (prev.width !== next.width) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (prev.softWrap[y] !== next.softWrap[y]) {
|
||||
return false
|
||||
}
|
||||
|
||||
const rowStart = y * prev.width
|
||||
const rowEnd = rowStart + prev.width
|
||||
for (let index = rowStart; index < rowEnd; index += 1) {
|
||||
if (
|
||||
prev.cells64[index] !== next.cells64[index] ||
|
||||
prev.noSelect[index] !== next.noSelect[index]
|
||||
) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
function transitionHyperlink(
|
||||
diff: Diff,
|
||||
current: Hyperlink,
|
||||
@@ -622,6 +675,37 @@ function renderFrameSlice(
|
||||
return screen
|
||||
}
|
||||
|
||||
function restoreMainScreenCursor(screen: VirtualScreen, next: Frame): void {
|
||||
if (next.cursor.y >= next.screen.height) {
|
||||
// Move to column 0 of current line, then emit newlines to reach target row
|
||||
screen.txn(prev => {
|
||||
const rowsToCreate = next.cursor.y - prev.y
|
||||
if (rowsToCreate > 0) {
|
||||
// Use CR to resolve pending wrap (if any) without advancing
|
||||
// to the next line, then LF to create each new row.
|
||||
const patches: Diff = new Array<Diff[number]>(1 + rowsToCreate)
|
||||
patches[0] = CARRIAGE_RETURN
|
||||
for (let i = 0; i < rowsToCreate; i++) {
|
||||
patches[1 + i] = NEWLINE
|
||||
}
|
||||
return [patches, { dx: -prev.x, dy: rowsToCreate }]
|
||||
}
|
||||
// At or past target row - need to move cursor to correct position
|
||||
const dy = next.cursor.y - prev.y
|
||||
if (dy !== 0 || prev.x !== next.cursor.x) {
|
||||
// Use CR to clear pending wrap (if any), then cursor move
|
||||
const patches: Diff = [CARRIAGE_RETURN]
|
||||
patches.push({ type: 'cursorMove', x: next.cursor.x, y: dy })
|
||||
return [patches, { dx: next.cursor.x - prev.x, dy }]
|
||||
}
|
||||
return [[], { dx: 0, dy: 0 }]
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
moveCursorTo(screen, next.cursor.x, next.cursor.y)
|
||||
}
|
||||
|
||||
type Delta = { dx: number; dy: number }
|
||||
|
||||
/**
|
||||
|
||||
369
src/ink/reconciler.test.ts
Normal file
369
src/ink/reconciler.test.ts
Normal file
@@ -0,0 +1,369 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { expect, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
|
||||
import type { DOMElement, ElementNames } from './dom.ts'
|
||||
import instances from './instances.ts'
|
||||
import { LayoutEdge } from './layout/node.ts'
|
||||
import type { ParsedKey } from './parse-keypress.ts'
|
||||
import { createRoot } from './root.ts'
|
||||
|
||||
type TestStdin = PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
const RAW_TEXT_STYLE = {
|
||||
flexDirection: 'row',
|
||||
flexGrow: 0,
|
||||
flexShrink: 1,
|
||||
textWrap: 'wrap',
|
||||
} as const
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: TestStdin
|
||||
} {
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as TestStdin
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
;(stdout as unknown as { rows: number }).rows = 24
|
||||
;(stdout as unknown as { isTTY: boolean }).isTTY = true
|
||||
|
||||
return { stdout, stdin }
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
errorMessage: string,
|
||||
timeoutMs = 2000,
|
||||
): Promise<void> {
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
|
||||
throw new Error(errorMessage)
|
||||
}
|
||||
|
||||
function getRootNode(stdout: PassThrough): DOMElement {
|
||||
const instance = getInkInstance(stdout)
|
||||
|
||||
if (!instance.rootNode) {
|
||||
throw new Error('Ink instance root node not found')
|
||||
}
|
||||
|
||||
return instance.rootNode
|
||||
}
|
||||
|
||||
function getInkInstance(stdout: PassThrough): {
|
||||
rootNode?: DOMElement
|
||||
dispatchKeyboardEvent: (parsedKey: ParsedKey) => void
|
||||
} {
|
||||
const instance = instances.get(
|
||||
stdout as unknown as NodeJS.WriteStream,
|
||||
) as
|
||||
| {
|
||||
rootNode?: DOMElement
|
||||
dispatchKeyboardEvent: (parsedKey: ParsedKey) => void
|
||||
}
|
||||
| undefined
|
||||
|
||||
if (!instance) {
|
||||
throw new Error('Ink instance not found')
|
||||
}
|
||||
|
||||
return instance
|
||||
}
|
||||
|
||||
function findElement(
|
||||
node: DOMElement,
|
||||
nodeName: ElementNames,
|
||||
): DOMElement | undefined {
|
||||
if (node.nodeName === nodeName) {
|
||||
return node
|
||||
}
|
||||
|
||||
for (const child of node.childNodes) {
|
||||
if (child.nodeName === '#text') {
|
||||
continue
|
||||
}
|
||||
|
||||
const found = findElement(child, nodeName)
|
||||
if (found) {
|
||||
return found
|
||||
}
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
function requireElement(stdout: PassThrough, nodeName: ElementNames): DOMElement {
|
||||
const found = findElement(getRootNode(stdout), nodeName)
|
||||
|
||||
if (!found) {
|
||||
throw new Error(`Expected to find ${nodeName} in Ink root tree`)
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
async function createHarness(): Promise<{
|
||||
stdout: PassThrough
|
||||
stdin: TestStdin
|
||||
root: Awaited<ReturnType<typeof createRoot>>
|
||||
dispose: () => Promise<void>
|
||||
}> {
|
||||
const { stdout, stdin } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
root,
|
||||
dispose: async () => {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
test('raw ink-box updates keyboard handlers and attributes in place across rerenders', async () => {
|
||||
const calls: string[] = []
|
||||
const firstHandler = () => calls.push('first')
|
||||
const secondHandler = () => calls.push('second')
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: firstHandler,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'first render',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const firstBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(firstBox.attributes.tabIndex).toBe(0)
|
||||
expect(firstBox._eventHandlers?.onKeyDown).toBe(firstHandler)
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: secondHandler,
|
||||
tabIndex: 1,
|
||||
},
|
||||
'second render',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const secondBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(secondBox).toBe(firstBox)
|
||||
expect(secondBox.attributes.tabIndex).toBe(1)
|
||||
expect(secondBox._eventHandlers?.onKeyDown).toBe(secondHandler)
|
||||
|
||||
getInkInstance(harness.stdout).dispatchKeyboardEvent({
|
||||
kind: 'key',
|
||||
name: 'a',
|
||||
fn: false,
|
||||
ctrl: false,
|
||||
meta: false,
|
||||
shift: false,
|
||||
option: false,
|
||||
super: false,
|
||||
sequence: 'a',
|
||||
raw: 'a',
|
||||
isPasted: false,
|
||||
})
|
||||
|
||||
await waitForCondition(
|
||||
() => calls.length === 1,
|
||||
'Timed out waiting for rerendered onKeyDown handler to fire',
|
||||
)
|
||||
|
||||
expect(calls).toEqual(['second'])
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-text updates textStyles in place across rerenders', async () => {
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-text',
|
||||
{
|
||||
style: RAW_TEXT_STYLE,
|
||||
textStyles: { color: 'ansi:red' },
|
||||
},
|
||||
'host text',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const firstText = requireElement(harness.stdout, 'ink-text')
|
||||
expect(firstText.textStyles).toEqual({ color: 'ansi:red' })
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-text',
|
||||
{
|
||||
style: RAW_TEXT_STYLE,
|
||||
textStyles: { color: 'ansi:blue' },
|
||||
},
|
||||
'host text',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const secondText = requireElement(harness.stdout, 'ink-text')
|
||||
expect(secondText).toBe(firstText)
|
||||
expect(secondText.textStyles).toEqual({ color: 'ansi:blue' })
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-box removes event handler when set to undefined', async () => {
|
||||
const calls: string[] = []
|
||||
const handler = () => calls.push('fired')
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: handler,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'with handler',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const box = requireElement(harness.stdout, 'ink-box')
|
||||
expect(box._eventHandlers?.onKeyDown).toBe(handler)
|
||||
|
||||
// Remove the handler
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'without handler',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const sameBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(sameBox).toBe(box)
|
||||
expect(sameBox._eventHandlers?.onKeyDown).toBeUndefined()
|
||||
|
||||
// Dispatch a key event and verify the removed handler is NOT called
|
||||
getInkInstance(harness.stdout).dispatchKeyboardEvent({
|
||||
kind: 'key',
|
||||
name: 'a',
|
||||
fn: false,
|
||||
ctrl: false,
|
||||
meta: false,
|
||||
shift: false,
|
||||
option: false,
|
||||
super: false,
|
||||
sequence: 'a',
|
||||
raw: 'a',
|
||||
isPasted: false,
|
||||
})
|
||||
|
||||
await Bun.sleep(50)
|
||||
expect(calls).toEqual([])
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-box updates layout style in place across rerenders', async () => {
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
style: { flexDirection: 'row', paddingLeft: 1 },
|
||||
},
|
||||
'styled box',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const box = requireElement(harness.stdout, 'ink-box')
|
||||
expect(box.style.flexDirection).toBe('row')
|
||||
expect(box.style.paddingLeft).toBe(1)
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
style: { flexDirection: 'column', paddingLeft: 2 },
|
||||
},
|
||||
'styled box',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const sameBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(sameBox).toBe(box)
|
||||
expect(sameBox.style.flexDirection).toBe('column')
|
||||
expect(sameBox.style.paddingLeft).toBe(2)
|
||||
|
||||
// Verify the update reached the layout engine, not just the style object
|
||||
const yogaNode = sameBox.yogaNode!
|
||||
expect(yogaNode).toBeDefined()
|
||||
yogaNode.calculateLayout(120)
|
||||
expect(yogaNode.getComputedPadding(LayoutEdge.Left)).toBe(2)
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
@@ -449,17 +449,25 @@ const reconciler = createReconciler<
|
||||
},
|
||||
commitUpdate(
|
||||
node: DOMElement,
|
||||
updatePayload: UpdatePayload | null,
|
||||
_type: ElementNames,
|
||||
_oldProps: Props,
|
||||
_newProps: Props,
|
||||
oldProps: Props,
|
||||
newProps: Props,
|
||||
): void {
|
||||
if (!updatePayload) {
|
||||
// React 19 mutation mode calls commitUpdate as
|
||||
// (instance, type, oldProps, newProps, fiber) and does not pass the
|
||||
// prepareUpdate() payload here. This renderer used to treat the second
|
||||
// argument as updatePayload, which left mounted ink-* nodes with stale
|
||||
// attributes, event handlers, and textStyles until something forced a
|
||||
// remount. Recompute the prop/style diff here so host nodes update
|
||||
// correctly in place on rerender.
|
||||
const props = diff(oldProps, newProps)
|
||||
const style = diff(oldProps['style'] as Styles, newProps['style'] as Styles)
|
||||
const nextStyle = newProps['style'] as Styles | undefined
|
||||
|
||||
if (!props && !style) {
|
||||
return
|
||||
}
|
||||
|
||||
const { props, style, nextStyle } = updatePayload
|
||||
|
||||
if (props) {
|
||||
for (const [key, value] of Object.entries(props)) {
|
||||
if (key === 'style') {
|
||||
|
||||
@@ -135,6 +135,13 @@ export function setXtversionName(name: string): void {
|
||||
if (xtversionName === undefined) xtversionName = name
|
||||
}
|
||||
|
||||
export function isGhosttyTerminal(): boolean {
|
||||
if (process.env.NODE_ENV === 'test') return false
|
||||
if (process.env.TERM_PROGRAM === 'ghostty') return true
|
||||
if (process.env.TERM === 'xterm-ghostty') return true
|
||||
return xtversionName?.toLowerCase().startsWith('ghostty') ?? false
|
||||
}
|
||||
|
||||
/** True if running in an xterm.js-based terminal (VS Code, Cursor, Windsurf
|
||||
* integrated terminals). Combines TERM_PROGRAM env check (fast, sync, but
|
||||
* not forwarded over SSH) with the XTVERSION probe result (async, survives
|
||||
@@ -145,6 +152,20 @@ export function isXtermJs(): boolean {
|
||||
return xtversionName?.startsWith('xterm.js') ?? false
|
||||
}
|
||||
|
||||
/** Ghostty currently repaints main-screen prompt updates more reliably
|
||||
* without DEC 2026 synchronized output. Prefer explicit terminal identity
|
||||
* (TERM_PROGRAM/TERM or XTVERSION) in real sessions, but keep tests
|
||||
* deterministic by disabling the env-based detection under NODE_ENV=test. */
|
||||
export function shouldSkipMainScreenSyncMarkers(): boolean {
|
||||
return isGhosttyTerminal()
|
||||
}
|
||||
|
||||
/** Ghostty's main-screen prompt updates are currently more reliable when we
|
||||
* bypass the incremental diff path and rewrite the visible prompt block. */
|
||||
export function shouldUseMainScreenRewrite(): boolean {
|
||||
return isGhosttyTerminal()
|
||||
}
|
||||
|
||||
// Terminals known to correctly implement the Kitty keyboard protocol
|
||||
// (CSI >1u) and/or xterm modifyOtherKeys (CSI >4;2m) for ctrl+shift+<letter>
|
||||
// disambiguation. We previously enabled unconditionally (#23350), assuming
|
||||
|
||||
@@ -13,6 +13,7 @@ const execFileNoThrowMock = mock(
|
||||
|
||||
mock.module('../../utils/execFileNoThrow.js', () => ({
|
||||
execFileNoThrow: execFileNoThrowMock,
|
||||
execFileNoThrowWithCwd: execFileNoThrowMock,
|
||||
}))
|
||||
|
||||
mock.module('../../utils/tempfile.js', () => ({
|
||||
|
||||
62
src/projectOnboardingState.test.ts
Normal file
62
src/projectOnboardingState.test.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import { afterEach, describe, expect, test } from 'bun:test'
|
||||
import { mkdir, mkdtemp, rm, writeFile } from 'node:fs/promises'
|
||||
import { tmpdir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
|
||||
import {
|
||||
getSteps,
|
||||
isProjectOnboardingComplete,
|
||||
} from './projectOnboardingSteps.js'
|
||||
import { runWithCwdOverride } from './utils/cwd.js'
|
||||
|
||||
let tempDir: string | undefined
|
||||
|
||||
afterEach(async () => {
|
||||
if (tempDir) {
|
||||
await rm(tempDir, { recursive: true, force: true })
|
||||
tempDir = undefined
|
||||
}
|
||||
})
|
||||
|
||||
describe('project onboarding completion', () => {
|
||||
test('is incomplete when neither AGENTS.md nor CLAUDE.md exists', async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
|
||||
|
||||
await runWithCwdOverride(tempDir, async () => {
|
||||
expect(isProjectOnboardingComplete()).toBe(false)
|
||||
expect(getSteps()[1]?.text).toContain('/init')
|
||||
expect(getSteps()[1]?.text).toContain('AGENTS.md')
|
||||
expect(getSteps()[1]?.text).toContain('CLAUDE.md')
|
||||
})
|
||||
})
|
||||
|
||||
test('is complete when only CLAUDE.md exists', async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
|
||||
await writeFile(join(tempDir, 'CLAUDE.md'), '# CLAUDE.md\n')
|
||||
|
||||
await runWithCwdOverride(tempDir, async () => {
|
||||
expect(isProjectOnboardingComplete()).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
test('is complete when only AGENTS.md exists', async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
|
||||
await writeFile(join(tempDir, 'AGENTS.md'), '# AGENTS.md\n')
|
||||
|
||||
await runWithCwdOverride(tempDir, async () => {
|
||||
expect(isProjectOnboardingComplete()).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
test('is complete from a nested cwd when repo instructions exist in an ancestor directory', async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
|
||||
const nestedDir = join(tempDir, 'packages', 'app')
|
||||
await writeFile(join(tempDir, 'AGENTS.md'), '# AGENTS.md\n')
|
||||
await mkdir(nestedDir, { recursive: true })
|
||||
await writeFile(join(nestedDir, 'index.ts'), 'export {}\n')
|
||||
|
||||
await runWithCwdOverride(nestedDir, async () => {
|
||||
expect(isProjectOnboardingComplete()).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,50 +1,14 @@
|
||||
import memoize from 'lodash-es/memoize.js'
|
||||
import { join } from 'path'
|
||||
import {
|
||||
getCurrentProjectConfig,
|
||||
saveCurrentProjectConfig,
|
||||
} from './utils/config.js'
|
||||
import { getCwd } from './utils/cwd.js'
|
||||
import { isDirEmpty } from './utils/file.js'
|
||||
import { getFsImplementation } from './utils/fsOperations.js'
|
||||
|
||||
export type Step = {
|
||||
key: string
|
||||
text: string
|
||||
isComplete: boolean
|
||||
isCompletable: boolean
|
||||
isEnabled: boolean
|
||||
}
|
||||
|
||||
export function getSteps(): Step[] {
|
||||
const hasClaudeMd = getFsImplementation().existsSync(
|
||||
join(getCwd(), 'CLAUDE.md'),
|
||||
)
|
||||
const isWorkspaceDirEmpty = isDirEmpty(getCwd())
|
||||
|
||||
return [
|
||||
{
|
||||
key: 'workspace',
|
||||
text: 'Ask Claude to create a new app or clone a repository',
|
||||
isComplete: false,
|
||||
isCompletable: true,
|
||||
isEnabled: isWorkspaceDirEmpty,
|
||||
},
|
||||
{
|
||||
key: 'claudemd',
|
||||
text: 'Run /init to create a CLAUDE.md file with instructions for Claude',
|
||||
isComplete: hasClaudeMd,
|
||||
isCompletable: true,
|
||||
isEnabled: !isWorkspaceDirEmpty,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
export function isProjectOnboardingComplete(): boolean {
|
||||
return getSteps()
|
||||
.filter(({ isCompletable, isEnabled }) => isCompletable && isEnabled)
|
||||
.every(({ isComplete }) => isComplete)
|
||||
}
|
||||
export {
|
||||
getSteps,
|
||||
isProjectOnboardingComplete,
|
||||
type Step,
|
||||
} from './projectOnboardingSteps.js'
|
||||
import { isProjectOnboardingComplete } from './projectOnboardingSteps.js'
|
||||
|
||||
export function maybeMarkProjectOnboardingComplete(): void {
|
||||
// Short-circuit on cached config — isProjectOnboardingComplete() hits
|
||||
|
||||
44
src/projectOnboardingSteps.ts
Normal file
44
src/projectOnboardingSteps.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import { getCwd } from './utils/cwd.js'
|
||||
import { isDirEmpty } from './utils/file.js'
|
||||
import { getFsImplementation } from './utils/fsOperations.js'
|
||||
import { findProjectInstructionFilePathInAncestors } from './utils/projectInstructions.js'
|
||||
|
||||
export type Step = {
|
||||
key: string
|
||||
text: string
|
||||
isComplete: boolean
|
||||
isCompletable: boolean
|
||||
isEnabled: boolean
|
||||
}
|
||||
|
||||
export function getSteps(): Step[] {
|
||||
const hasRepoInstructions =
|
||||
findProjectInstructionFilePathInAncestors(
|
||||
getCwd(),
|
||||
getFsImplementation().existsSync,
|
||||
) !== null
|
||||
const isWorkspaceDirEmpty = isDirEmpty(getCwd())
|
||||
|
||||
return [
|
||||
{
|
||||
key: 'workspace',
|
||||
text: 'Ask Claude to create a new app or clone a repository',
|
||||
isComplete: false,
|
||||
isCompletable: true,
|
||||
isEnabled: isWorkspaceDirEmpty,
|
||||
},
|
||||
{
|
||||
key: 'claudemd',
|
||||
text: 'Set up repo instructions (/init creates AGENTS.md or updates existing CLAUDE.md; either file counts)',
|
||||
isComplete: hasRepoInstructions,
|
||||
isCompletable: true,
|
||||
isEnabled: !isWorkspaceDirEmpty,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
export function isProjectOnboardingComplete(): boolean {
|
||||
return getSteps()
|
||||
.filter(({ isCompletable, isEnabled }) => isCompletable && isEnabled)
|
||||
.every(({ isComplete }) => isComplete)
|
||||
}
|
||||
@@ -238,6 +238,7 @@ import { usePromptsFromClaudeInChrome } from 'src/hooks/usePromptsFromClaudeInCh
|
||||
import { getTipToShowOnSpinner, recordShownTip } from 'src/services/tips/tipScheduler.js';
|
||||
import type { Theme } from 'src/utils/theme.js';
|
||||
import { isPromptTypingSuppressionActive } from './replInputSuppression.js';
|
||||
import { shouldRunStartupChecks } from './replStartupGates.js';
|
||||
import { checkAndDisableBypassPermissionsIfNeeded, checkAndDisableAutoModeIfNeeded, useKickOffCheckAndDisableBypassPermissionsIfNeeded, useKickOffCheckAndDisableAutoModeIfNeeded } from 'src/utils/permissions/bypassPermissionsKillswitch.js';
|
||||
import { SandboxManager } from 'src/utils/sandbox/sandbox-adapter.js';
|
||||
import { SANDBOX_NETWORK_ACCESS_TOOL_NAME } from 'src/cli/structuredIO.js';
|
||||
@@ -616,7 +617,6 @@ export function REPL({
|
||||
const toolPermissionContext = useAppState(s => s.toolPermissionContext);
|
||||
const verbose = useAppState(s => s.verbose);
|
||||
const mcp = useAppState(s => s.mcp);
|
||||
const plugins = useAppState(s => s.plugins);
|
||||
const agentDefinitions = useAppState(s => s.agentDefinitions);
|
||||
const fileHistory = useAppState(s => s.fileHistory);
|
||||
const initialMessage = useAppState(s => s.initialMessage);
|
||||
@@ -779,7 +779,7 @@ export function REPL({
|
||||
}, [localTools, initialTools]);
|
||||
|
||||
// Initialize plugin management
|
||||
useManagePlugins({
|
||||
const pluginCommands = useManagePlugins({
|
||||
enabled: !isRemoteSession
|
||||
});
|
||||
const tasksV2 = useTasksV2WithCollapseEffect();
|
||||
@@ -792,10 +792,8 @@ export function REPL({
|
||||
// accepts, and only then is the REPL component mounted and this effect runs.
|
||||
// This ensures that plugin installations from repository and user settings only
|
||||
// happen after explicit user consent to trust the current working directory.
|
||||
useEffect(() => {
|
||||
if (isRemoteSession) return;
|
||||
void performStartupChecks(setAppState);
|
||||
}, [setAppState, isRemoteSession]);
|
||||
// Deferring startup checks is handled below (after promptTypingSuppressionActive
|
||||
// is declared) to avoid temporal dead zone issues.
|
||||
|
||||
// Allow Claude in Chrome MCP to send prompts through MCP notifications
|
||||
// and sync permission mode changes to the Chrome extension
|
||||
@@ -827,10 +825,16 @@ export function REPL({
|
||||
}, [mainThreadAgentDefinition, mergedTools]);
|
||||
|
||||
// Merge commands from local state, plugins, and MCP
|
||||
const commandsWithPlugins = useMergedCommands(localCommands, plugins.commands as Command[]);
|
||||
const commandsWithPlugins = useMergedCommands(localCommands, pluginCommands as Command[]);
|
||||
const mergedCommands = useMergedCommands(commandsWithPlugins, mcp.commands as Command[]);
|
||||
// Keep plugin commands out of render-time command props. Feeding the full
|
||||
// execution set into PromptInput/Messages reintroduced the startup repaint
|
||||
// freeze, while transcript rendering still round-trips plugin skills via the
|
||||
// SkillTool's `skill` payload without needing plugin command objects here.
|
||||
const renderMergedCommands = useMergedCommands(localCommands, mcp.commands as Command[]);
|
||||
// Filter out all commands if disableSlashCommands is true
|
||||
const commands = useMemo(() => disableSlashCommands ? [] : mergedCommands, [disableSlashCommands, mergedCommands]);
|
||||
const renderCommands = useMemo(() => disableSlashCommands ? [] : renderMergedCommands, [disableSlashCommands, renderMergedCommands]);
|
||||
useIdeLogging(isRemoteSession ? EMPTY_MCP_CLIENTS : mcp.clients);
|
||||
useIdeSelection(isRemoteSession ? EMPTY_MCP_CLIENTS : mcp.clients, setIDESelection);
|
||||
const [streamMode, setStreamMode] = useState<SpinnerMode>('responding');
|
||||
@@ -1429,6 +1433,25 @@ export function REPL({
|
||||
const activeRemote = sshRemote.isRemoteMode ? sshRemote : directConnect.isRemoteMode ? directConnect : remoteSession;
|
||||
const [pastedContents, setPastedContents] = useState<Record<number, PastedContent>>({});
|
||||
const [submitCount, setSubmitCount] = useState(0);
|
||||
|
||||
// Defer startup checks until the user has submitted their first message.
|
||||
// A timeout or grace period is insufficient (issue #363): if the user pauses
|
||||
// before typing, startup checks can still fire and recommendation dialogs
|
||||
// steal focus. Only the user's first submission guarantees the prompt was
|
||||
// the first thing they interacted with.
|
||||
const startupChecksStartedRef = React.useRef(false);
|
||||
const hasHadFirstSubmission = (submitCount ?? 0) > 0;
|
||||
useEffect(() => {
|
||||
if (isRemoteSession) return;
|
||||
if (startupChecksStartedRef.current) return;
|
||||
if (!shouldRunStartupChecks({
|
||||
isRemoteSession,
|
||||
hasStarted: startupChecksStartedRef.current,
|
||||
hasHadFirstSubmission,
|
||||
})) return;
|
||||
startupChecksStartedRef.current = true;
|
||||
void performStartupChecks(setAppState);
|
||||
}, [setAppState, isRemoteSession, hasHadFirstSubmission]);
|
||||
// Ref instead of state to avoid triggering React re-renders on every
|
||||
// streaming text_delta. The spinner reads this via its animation timer.
|
||||
const responseLengthRef = useRef(0);
|
||||
@@ -2061,13 +2084,14 @@ export function REPL({
|
||||
if (allowDialogsWithAnimation && showRemoteCallout) return 'remote-callout';
|
||||
|
||||
// LSP plugin recommendation (lowest priority - non-blocking suggestion)
|
||||
if (allowDialogsWithAnimation && lspRecommendation) return 'lsp-recommendation';
|
||||
// Suppress during startup window to prevent stealing focus from the prompt (issue #363)
|
||||
if (allowDialogsWithAnimation && lspRecommendation && startupChecksStartedRef.current) return 'lsp-recommendation';
|
||||
|
||||
// Plugin hint from CLI/SDK stderr (same priority band as LSP rec)
|
||||
if (allowDialogsWithAnimation && hintRecommendation) return 'plugin-hint';
|
||||
if (allowDialogsWithAnimation && hintRecommendation && startupChecksStartedRef.current) return 'plugin-hint';
|
||||
|
||||
// Desktop app upsell (max 3 launches, lowest priority)
|
||||
if (allowDialogsWithAnimation && showDesktopUpsellStartup) return 'desktop-upsell';
|
||||
if (allowDialogsWithAnimation && showDesktopUpsellStartup && startupChecksStartedRef.current) return 'desktop-upsell';
|
||||
return undefined;
|
||||
}
|
||||
const focusedInputDialog = getFocusedInputDialog();
|
||||
@@ -4408,7 +4432,7 @@ export function REPL({
|
||||
// and transcript-mode are mutually exclusive (this early return), so
|
||||
// only one ScrollBox is ever mounted at a time.
|
||||
const transcriptScrollRef = isFullscreenEnvEnabled() && !disableVirtualScroll && !dumpMode ? scrollRef : undefined;
|
||||
const transcriptMessagesElement = <Messages messages={transcriptMessages} tools={tools} commands={commands} verbose={true} toolJSX={null} toolUseConfirmQueue={[]} inProgressToolUseIDs={inProgressToolUseIDs} isMessageSelectorVisible={false} conversationId={conversationId} screen={screen} agentDefinitions={agentDefinitions} streamingToolUses={transcriptStreamingToolUses} showAllInTranscript={showAllInTranscript} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} hidePastThinking={true} streamingThinking={streamingThinking} scrollRef={transcriptScrollRef} jumpRef={jumpRef} onSearchMatchesChange={onSearchMatchesChange} scanElement={scanElement} setPositions={setPositions} disableRenderCap={dumpMode} />;
|
||||
const transcriptMessagesElement = <Messages messages={transcriptMessages} tools={tools} commands={renderCommands} verbose={true} toolJSX={null} toolUseConfirmQueue={[]} inProgressToolUseIDs={inProgressToolUseIDs} isMessageSelectorVisible={false} conversationId={conversationId} screen={screen} agentDefinitions={agentDefinitions} streamingToolUses={transcriptStreamingToolUses} showAllInTranscript={showAllInTranscript} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} hidePastThinking={true} streamingThinking={streamingThinking} scrollRef={transcriptScrollRef} jumpRef={jumpRef} onSearchMatchesChange={onSearchMatchesChange} scanElement={scanElement} setPositions={setPositions} disableRenderCap={dumpMode} />;
|
||||
const transcriptToolJSX = toolJSX && <Box flexDirection="column" width="100%">
|
||||
{toolJSX.jsx}
|
||||
</Box>;
|
||||
@@ -4576,7 +4600,7 @@ export function REPL({
|
||||
jumpToNew(scrollRef.current);
|
||||
}} scrollable={<>
|
||||
<TeammateViewHeader />
|
||||
<Messages messages={displayedMessages} tools={tools} commands={commands} verbose={verbose} toolJSX={toolJSX} toolUseConfirmQueue={toolUseConfirmQueue} inProgressToolUseIDs={viewedTeammateTask ? viewedTeammateTask.inProgressToolUseIDs ?? new Set() : inProgressToolUseIDs} isMessageSelectorVisible={isMessageSelectorVisible} conversationId={conversationId} screen={screen} streamingToolUses={streamingToolUses} showAllInTranscript={showAllInTranscript} agentDefinitions={agentDefinitions} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} streamingText={isLoading && !viewedAgentTask ? visibleStreamingText : null} isBriefOnly={viewedAgentTask ? false : isBriefOnly} unseenDivider={viewedAgentTask ? undefined : unseenDivider} scrollRef={isFullscreenEnvEnabled() ? scrollRef : undefined} trackStickyPrompt={isFullscreenEnvEnabled() ? true : undefined} cursor={cursor} setCursor={setCursor} cursorNavRef={cursorNavRef} />
|
||||
<Messages messages={displayedMessages} tools={tools} commands={renderCommands} verbose={verbose} toolJSX={toolJSX} toolUseConfirmQueue={toolUseConfirmQueue} inProgressToolUseIDs={viewedTeammateTask ? viewedTeammateTask.inProgressToolUseIDs ?? new Set() : inProgressToolUseIDs} isMessageSelectorVisible={isMessageSelectorVisible} conversationId={conversationId} screen={screen} streamingToolUses={streamingToolUses} showAllInTranscript={showAllInTranscript} agentDefinitions={agentDefinitions} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} streamingText={isLoading && !viewedAgentTask ? visibleStreamingText : null} isBriefOnly={viewedAgentTask ? false : isBriefOnly} unseenDivider={viewedAgentTask ? undefined : unseenDivider} scrollRef={isFullscreenEnvEnabled() ? scrollRef : undefined} trackStickyPrompt={isFullscreenEnvEnabled() ? true : undefined} cursor={cursor} setCursor={setCursor} cursorNavRef={cursorNavRef} />
|
||||
<AwsAuthStatusBox />
|
||||
{/* Hide the processing placeholder while a modal is showing —
|
||||
it would sit at the last visible transcript row right above
|
||||
@@ -4909,7 +4933,7 @@ export function REPL({
|
||||
{"external" === 'ant' && skillImprovementSurvey.suggestion && <SkillImprovementSurvey isOpen={skillImprovementSurvey.isOpen} skillName={skillImprovementSurvey.suggestion.skillName} updates={skillImprovementSurvey.suggestion.updates} handleSelect={skillImprovementSurvey.handleSelect} inputValue={inputValue} setInputValue={setInputValue} />}
|
||||
{showIssueFlagBanner && <IssueFlagBanner />}
|
||||
{ }
|
||||
<PromptInput debug={debug} ideSelection={ideSelection} hasSuppressedDialogs={!!hasSuppressedDialogs} isLocalJSXCommandActive={isShowingLocalJSXCommand} getToolUseContext={getToolUseContext} toolPermissionContext={toolPermissionContext} setToolPermissionContext={setToolPermissionContext} apiKeyStatus={apiKeyStatus} commands={commands} agents={agentDefinitions.activeAgents} isLoading={isLoading} onExit={handleExit} verbose={verbose} messages={messages} onAutoUpdaterResult={setAutoUpdaterResult} autoUpdaterResult={autoUpdaterResult} input={inputValue} onInputChange={setInputValue} mode={inputMode} onModeChange={setInputMode} stashedPrompt={stashedPrompt} setStashedPrompt={setStashedPrompt} submitCount={submitCount} onShowMessageSelector={handleShowMessageSelector} onMessageActionsEnter={
|
||||
<PromptInput debug={debug} ideSelection={ideSelection} hasSuppressedDialogs={!!hasSuppressedDialogs} isLocalJSXCommandActive={isShowingLocalJSXCommand} getToolUseContext={getToolUseContext} toolPermissionContext={toolPermissionContext} setToolPermissionContext={setToolPermissionContext} apiKeyStatus={apiKeyStatus} commands={renderCommands} agents={agentDefinitions.activeAgents} isLoading={isLoading} onExit={handleExit} verbose={verbose} messages={messages} onAutoUpdaterResult={setAutoUpdaterResult} autoUpdaterResult={autoUpdaterResult} input={inputValue} onInputChange={setInputValue} mode={inputMode} onModeChange={setInputMode} stashedPrompt={stashedPrompt} setStashedPrompt={setStashedPrompt} submitCount={submitCount} onShowMessageSelector={handleShowMessageSelector} onMessageActionsEnter={
|
||||
// Works during isLoading — edit cancels first; uuid selection survives appends.
|
||||
feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? enterMessageActions : undefined} mcpClients={mcpClients} pastedContents={pastedContents} setPastedContents={setPastedContents} vimMode={vimMode} setVimMode={setVimMode} showBashesDialog={showBashesDialog} setShowBashesDialog={setShowBashesDialog} onSubmit={onSubmit} onAgentSubmit={onAgentSubmit} isSearchingHistory={isSearchingHistory} setIsSearchingHistory={setIsSearchingHistory} helpOpen={isHelpOpen} setHelpOpen={setIsHelpOpen} insertTextRef={feature('VOICE_MODE') ? insertTextRef : undefined} voiceInterimRange={voice.interimRange} />
|
||||
<SessionBackgroundHint onBackgroundSession={handleBackgroundSession} isLoading={isLoading} />
|
||||
|
||||
53
src/screens/replStartupGates.test.ts
Normal file
53
src/screens/replStartupGates.test.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { shouldRunStartupChecks } from './replStartupGates.js'
|
||||
|
||||
describe('shouldRunStartupChecks', () => {
|
||||
test('runs checks after first message submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(true)
|
||||
})
|
||||
|
||||
test('skips checks in remote sessions even after submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: true,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('skips checks if already started', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: true,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('does not run checks before first submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('does not run checks when idle before first submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('skips checks in remote session regardless of other conditions', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: true,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
})
|
||||
35
src/screens/replStartupGates.ts
Normal file
35
src/screens/replStartupGates.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
/**
|
||||
* Startup gates for the REPL.
|
||||
*
|
||||
* Prevents startup plugin checks and recommendation dialogs from stealing
|
||||
* focus before the user has interacted with the prompt.
|
||||
*
|
||||
* This addresses the root cause of issue #363: on mount, performStartupChecks
|
||||
* triggers plugin loading, which populates trackedFiles, which triggers
|
||||
* useLspPluginRecommendation to surface an LSP recommendation dialog. Since
|
||||
* promptTypingSuppressionActive is false before the user has typed anything,
|
||||
* getFocusedInputDialog() returns the dialog, unmounting PromptInput entirely.
|
||||
*
|
||||
* The fix gates startup checks on actual prompt interaction. A pure timeout
|
||||
* or grace period is insufficient because pausing before typing would still
|
||||
* allow dialogs to steal focus. Only the user's first submission guarantees
|
||||
* the prompt is no longer in the vulnerable pre-interaction window.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Determines whether startup checks should run.
|
||||
*
|
||||
* Startup checks are deferred until the user has submitted their first
|
||||
* message. This guarantees the prompt was the first thing the user interacted
|
||||
* with, so no recommendation dialog can steal focus before the first keystroke.
|
||||
*/
|
||||
export function shouldRunStartupChecks(options: {
|
||||
isRemoteSession: boolean;
|
||||
hasStarted: boolean;
|
||||
hasHadFirstSubmission: boolean;
|
||||
}): boolean {
|
||||
if (options.isRemoteSession) return false;
|
||||
if (options.hasStarted) return false;
|
||||
if (!options.hasHadFirstSubmission) return false;
|
||||
return true;
|
||||
}
|
||||
@@ -14,16 +14,27 @@ type ShimClient = {
|
||||
const originalFetch = globalThis.fetch
|
||||
const originalMacro = (globalThis as Record<string, unknown>).MACRO
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
GEMINI_API_KEY: process.env.GEMINI_API_KEY,
|
||||
GEMINI_MODEL: process.env.GEMINI_MODEL,
|
||||
GEMINI_BASE_URL: process.env.GEMINI_BASE_URL,
|
||||
GEMINI_AUTH_MODE: process.env.GEMINI_AUTH_MODE,
|
||||
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
|
||||
ANTHROPIC_AUTH_TOKEN: process.env.ANTHROPIC_AUTH_TOKEN,
|
||||
ANTHROPIC_CUSTOM_HEADERS: process.env.ANTHROPIC_CUSTOM_HEADERS,
|
||||
}
|
||||
|
||||
function restoreEnv(key: string, value: string | undefined): void {
|
||||
if (value === undefined) {
|
||||
delete process.env[key]
|
||||
} else {
|
||||
process.env[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -32,27 +43,33 @@ beforeEach(() => {
|
||||
process.env.GEMINI_API_KEY = 'gemini-test-key'
|
||||
process.env.GEMINI_MODEL = 'gemini-2.0-flash'
|
||||
process.env.GEMINI_BASE_URL = 'https://gemini.example/v1beta/openai'
|
||||
process.env.GEMINI_AUTH_MODE = 'api-key'
|
||||
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.GOOGLE_API_KEY
|
||||
delete process.env.OPENAI_API_KEY
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.ANTHROPIC_API_KEY
|
||||
delete process.env.ANTHROPIC_AUTH_TOKEN
|
||||
delete process.env.ANTHROPIC_CUSTOM_HEADERS
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
;(globalThis as Record<string, unknown>).MACRO = originalMacro
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.GEMINI_API_KEY = originalEnv.GEMINI_API_KEY
|
||||
process.env.GEMINI_MODEL = originalEnv.GEMINI_MODEL
|
||||
process.env.GEMINI_BASE_URL = originalEnv.GEMINI_BASE_URL
|
||||
process.env.GOOGLE_API_KEY = originalEnv.GOOGLE_API_KEY
|
||||
process.env.OPENAI_API_KEY = originalEnv.OPENAI_API_KEY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
process.env.ANTHROPIC_API_KEY = originalEnv.ANTHROPIC_API_KEY
|
||||
process.env.ANTHROPIC_AUTH_TOKEN = originalEnv.ANTHROPIC_AUTH_TOKEN
|
||||
restoreEnv('CLAUDE_CODE_USE_OPENAI', originalEnv.CLAUDE_CODE_USE_OPENAI)
|
||||
restoreEnv('CLAUDE_CODE_USE_GEMINI', originalEnv.CLAUDE_CODE_USE_GEMINI)
|
||||
restoreEnv('GEMINI_API_KEY', originalEnv.GEMINI_API_KEY)
|
||||
restoreEnv('GEMINI_MODEL', originalEnv.GEMINI_MODEL)
|
||||
restoreEnv('GEMINI_BASE_URL', originalEnv.GEMINI_BASE_URL)
|
||||
restoreEnv('GEMINI_AUTH_MODE', originalEnv.GEMINI_AUTH_MODE)
|
||||
restoreEnv('GOOGLE_API_KEY', originalEnv.GOOGLE_API_KEY)
|
||||
restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY)
|
||||
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
|
||||
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
|
||||
restoreEnv('ANTHROPIC_API_KEY', originalEnv.ANTHROPIC_API_KEY)
|
||||
restoreEnv('ANTHROPIC_AUTH_TOKEN', originalEnv.ANTHROPIC_AUTH_TOKEN)
|
||||
restoreEnv('ANTHROPIC_CUSTOM_HEADERS', originalEnv.ANTHROPIC_CUSTOM_HEADERS)
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
@@ -119,3 +136,135 @@ test('routes Gemini provider requests through the OpenAI-compatible shim', async
|
||||
model: 'gemini-2.0-flash',
|
||||
})
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific custom headers before sending OpenAI-compatible shim requests', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_API_KEY = 'openai-test-key'
|
||||
process.env.OPENAI_BASE_URL = 'http://example.test/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
||||
process.env.ANTHROPIC_CUSTOM_HEADERS = [
|
||||
'anthropic-version: 2023-06-01',
|
||||
'anthropic-beta: prompt-caching-2024-07-31',
|
||||
'x-anthropic-additional-protection: true',
|
||||
'x-claude-remote-session-id: remote-123',
|
||||
'x-app: cli',
|
||||
'x-safe-header: keep-me',
|
||||
].join('\n')
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-openai',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = (await getAnthropicClient({
|
||||
maxRetries: 0,
|
||||
model: 'gpt-4o',
|
||||
})) as unknown as ShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'gpt-4o',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-app')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer openai-test-key')
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific custom headers on providerOverride shim requests too', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.ANTHROPIC_CUSTOM_HEADERS = [
|
||||
'anthropic-version: 2023-06-01',
|
||||
'anthropic-beta: prompt-caching-2024-07-31',
|
||||
'x-claude-remote-session-id: remote-123',
|
||||
'x-safe-header: keep-me',
|
||||
].join('\n')
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-provider-override',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = (await getAnthropicClient({
|
||||
maxRetries: 0,
|
||||
providerOverride: {
|
||||
model: 'gpt-4o',
|
||||
baseURL: 'http://example.test/v1',
|
||||
apiKey: 'provider-test-key',
|
||||
},
|
||||
})) as unknown as ShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'unused',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer provider-test-key')
|
||||
})
|
||||
|
||||
@@ -177,7 +177,8 @@ export async function getAnthropicClient({
|
||||
if (
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
) {
|
||||
const { createOpenAIShimClient } = await import('./openaiShim.js')
|
||||
return createOpenAIShimClient({
|
||||
|
||||
@@ -17,16 +17,23 @@ const tempDirs: string[] = []
|
||||
const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
if (originalEnv.OPENAI_BASE_URL === undefined) delete process.env.OPENAI_BASE_URL
|
||||
else process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
|
||||
if (originalEnv.OPENAI_API_BASE === undefined) delete process.env.OPENAI_API_BASE
|
||||
else process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
|
||||
if (originalEnv.CLAUDE_CODE_USE_GITHUB === undefined) delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
else process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
while (tempDirs.length > 0) {
|
||||
const dir = tempDirs.pop()
|
||||
if (dir) rmSync(dir, { recursive: true, force: true })
|
||||
}
|
||||
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
})
|
||||
|
||||
function createTempAuthJson(payload: Record<string, unknown>): string {
|
||||
@@ -71,6 +78,7 @@ describe('Codex provider config', () => {
|
||||
test('resolves codexplan alias to Codex transport with reasoning', () => {
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
const resolved = resolveProviderRequest({ model: 'codexplan' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
@@ -457,6 +465,37 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('strips leaked reasoning preamble from completed Codex text responses', () => {
|
||||
const message = convertCodexResponseToAnthropicMessage(
|
||||
{
|
||||
id: 'resp_1',
|
||||
model: 'gpt-5.4',
|
||||
output: [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'output_text',
|
||||
text:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
usage: { input_tokens: 12, output_tokens: 4 },
|
||||
},
|
||||
'gpt-5.4',
|
||||
)
|
||||
|
||||
expect(message.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'Hey! How can I help you today?',
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('translates Codex SSE text stream into Anthropic events', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
@@ -487,4 +526,44 @@ describe('Codex request translation', () => {
|
||||
'message_stop',
|
||||
])
|
||||
})
|
||||
|
||||
test('strips leaked reasoning preamble from Codex SSE text stream', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
|
||||
'',
|
||||
'event: response.content_part.added',
|
||||
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
|
||||
'',
|
||||
'event: response.output_text.delta',
|
||||
'data: {"type":"response.output_text.delta","content_index":0,"delta":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?","item_id":"msg_1","output_index":0,"sequence_number":2}',
|
||||
'',
|
||||
'event: response.output_item.done',
|
||||
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}],"role":"assistant"},"output_index":0,"sequence_number":3}',
|
||||
'',
|
||||
'event: response.completed',
|
||||
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
|
||||
'',
|
||||
].join('\n')
|
||||
|
||||
const stream = new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(responseText))
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of codexStreamToAnthropic(
|
||||
new Response(stream),
|
||||
'gpt-5.4',
|
||||
)) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -4,6 +4,11 @@ import type {
|
||||
ResolvedProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
import { sanitizeSchemaForOpenAICompat } from './openaiSchemaSanitizer.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
|
||||
export interface AnthropicUsage {
|
||||
input_tokens: number
|
||||
@@ -75,12 +80,17 @@ type CodexSseEvent = {
|
||||
function makeUsage(usage?: {
|
||||
input_tokens?: number
|
||||
output_tokens?: number
|
||||
input_tokens_details?: { cached_tokens?: number }
|
||||
prompt_tokens_details?: { cached_tokens?: number }
|
||||
}): AnthropicUsage {
|
||||
return {
|
||||
input_tokens: usage?.input_tokens ?? 0,
|
||||
output_tokens: usage?.output_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
cache_read_input_tokens:
|
||||
usage?.input_tokens_details?.cached_tokens ??
|
||||
usage?.prompt_tokens_details?.cached_tokens ??
|
||||
0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -678,17 +688,34 @@ export async function* codexStreamToAnthropic(
|
||||
{ index: number; toolUseId: string }
|
||||
>()
|
||||
let activeTextBlockIndex: number | null = null
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
let nextContentBlockIndex = 0
|
||||
let sawToolUse = false
|
||||
let finalResponse: Record<string, any> | undefined
|
||||
|
||||
const closeActiveTextBlock = async function* () {
|
||||
if (activeTextBlockIndex === null) return
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: sanitized,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: activeTextBlockIndex,
|
||||
}
|
||||
activeTextBlockIndex = null
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
const startTextBlockIfNeeded = async function* () {
|
||||
@@ -764,7 +791,36 @@ export async function* codexStreamToAnthropic(
|
||||
|
||||
if (event.event === 'response.output_text.delta') {
|
||||
yield* startTextBlockIfNeeded()
|
||||
activeTextBuffer += payload.delta ?? ''
|
||||
if (activeTextBlockIndex !== null) {
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
},
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
@@ -839,8 +895,16 @@ export async function* codexStreamToAnthropic(
|
||||
stop_sequence: null,
|
||||
},
|
||||
usage: {
|
||||
input_tokens: finalResponse?.usage?.input_tokens ?? 0,
|
||||
// Subtract cached tokens: OpenAI includes them in input_tokens,
|
||||
// but Anthropic convention treats input_tokens as non-cached only.
|
||||
input_tokens: (finalResponse?.usage?.input_tokens ?? 0) -
|
||||
(finalResponse?.usage?.input_tokens_details?.cached_tokens ??
|
||||
finalResponse?.usage?.prompt_tokens_details?.cached_tokens ?? 0),
|
||||
output_tokens: finalResponse?.usage?.output_tokens ?? 0,
|
||||
cache_read_input_tokens:
|
||||
finalResponse?.usage?.input_tokens_details?.cached_tokens ??
|
||||
finalResponse?.usage?.prompt_tokens_details?.cached_tokens ??
|
||||
0,
|
||||
},
|
||||
}
|
||||
yield { type: 'message_stop' }
|
||||
@@ -859,7 +923,7 @@ export function convertCodexResponseToAnthropicMessage(
|
||||
if (part?.type === 'output_text') {
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: part.text ?? '',
|
||||
text: stripLeakedReasoningPreamble(part.text ?? ''),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,10 @@ const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
GEMINI_API_KEY: process.env.GEMINI_API_KEY,
|
||||
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
|
||||
@@ -15,6 +19,7 @@ const originalEnv = {
|
||||
GEMINI_BASE_URL: process.env.GEMINI_BASE_URL,
|
||||
GEMINI_MODEL: process.env.GEMINI_MODEL,
|
||||
GOOGLE_CLOUD_PROJECT: process.env.GOOGLE_CLOUD_PROJECT,
|
||||
ANTHROPIC_CUSTOM_HEADERS: process.env.ANTHROPIC_CUSTOM_HEADERS,
|
||||
}
|
||||
|
||||
const originalFetch = globalThis.fetch
|
||||
@@ -70,6 +75,10 @@ beforeEach(() => {
|
||||
process.env.OPENAI_BASE_URL = 'http://example.test/v1'
|
||||
process.env.OPENAI_API_KEY = 'test-key'
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.GEMINI_API_KEY
|
||||
delete process.env.GOOGLE_API_KEY
|
||||
@@ -78,12 +87,17 @@ beforeEach(() => {
|
||||
delete process.env.GEMINI_BASE_URL
|
||||
delete process.env.GEMINI_MODEL
|
||||
delete process.env.GOOGLE_CLOUD_PROJECT
|
||||
delete process.env.ANTHROPIC_CUSTOM_HEADERS
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
|
||||
restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY)
|
||||
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
|
||||
restoreEnv('CLAUDE_CODE_USE_GITHUB', originalEnv.CLAUDE_CODE_USE_GITHUB)
|
||||
restoreEnv('GITHUB_TOKEN', originalEnv.GITHUB_TOKEN)
|
||||
restoreEnv('GH_TOKEN', originalEnv.GH_TOKEN)
|
||||
restoreEnv('CLAUDE_CODE_USE_OPENAI', originalEnv.CLAUDE_CODE_USE_OPENAI)
|
||||
restoreEnv('CLAUDE_CODE_USE_GEMINI', originalEnv.CLAUDE_CODE_USE_GEMINI)
|
||||
restoreEnv('GEMINI_API_KEY', originalEnv.GEMINI_API_KEY)
|
||||
restoreEnv('GOOGLE_API_KEY', originalEnv.GOOGLE_API_KEY)
|
||||
@@ -92,9 +106,227 @@ afterEach(() => {
|
||||
restoreEnv('GEMINI_BASE_URL', originalEnv.GEMINI_BASE_URL)
|
||||
restoreEnv('GEMINI_MODEL', originalEnv.GEMINI_MODEL)
|
||||
restoreEnv('GOOGLE_CLOUD_PROJECT', originalEnv.GOOGLE_CLOUD_PROJECT)
|
||||
restoreEnv('ANTHROPIC_CUSTOM_HEADERS', originalEnv.ANTHROPIC_CUSTOM_HEADERS)
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
test('strips canonical Anthropic headers from direct shim defaultHeaders', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({
|
||||
defaultHeaders: {
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
'x-anthropic-additional-protection': 'true',
|
||||
'x-claude-remote-session-id': 'remote-123',
|
||||
'x-app': 'cli',
|
||||
'x-client-app': 'sdk',
|
||||
'x-safe-header': 'keep-me',
|
||||
},
|
||||
}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'gpt-4o',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-app')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-client-app')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
})
|
||||
|
||||
test('strips canonical Anthropic headers from per-request shim headers too', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create(
|
||||
{
|
||||
model: 'gpt-4o',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
'x-safe-header': 'keep-me',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific headers on GitHub Codex transport requests', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_API_KEY = 'github-test-key'
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response('', {
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
},
|
||||
})
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create(
|
||||
{
|
||||
model: 'github:gpt-5-codex',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
'x-anthropic-additional-protection': 'true',
|
||||
'x-safe-header': 'keep-me',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer github-test-key')
|
||||
expect(capturedHeaders?.get('editor-plugin-version')).toBe('copilot-chat/0.26.7')
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific headers on GitHub Codex transport with providerOverride API key', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_API_KEY = 'env-should-not-win'
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response('', {
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
},
|
||||
})
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({
|
||||
providerOverride: {
|
||||
model: 'github:gpt-5-codex',
|
||||
baseURL: 'https://api.githubcopilot.com',
|
||||
apiKey: 'provider-override-key',
|
||||
},
|
||||
}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create(
|
||||
{
|
||||
model: 'ignored',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
'anthropic-version': '2023-06-01',
|
||||
'x-claude-remote-session-id': 'remote-123',
|
||||
'x-safe-header': 'keep-me',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer provider-override-key')
|
||||
expect(capturedHeaders?.get('editor-plugin-version')).toBe('copilot-chat/0.26.7')
|
||||
})
|
||||
|
||||
test('preserves usage from final OpenAI stream chunk with empty choices', async () => {
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
const url = typeof _input === 'string' ? _input : _input.url
|
||||
@@ -1806,12 +2038,70 @@ test('sanitizes malformed MCP tool schemas before sending them to OpenAI', async
|
||||
| undefined
|
||||
|
||||
expect(parameters?.additionalProperties).toBe(false)
|
||||
expect(parameters?.required).toEqual(['priority'])
|
||||
// No required[] in the original schema → none added (optional properties must not be forced required)
|
||||
expect(parameters?.required).toEqual([])
|
||||
expect(properties?.priority?.type).toBe('integer')
|
||||
expect(properties?.priority?.enum).toEqual([0, 1, 2, 3])
|
||||
expect(properties?.priority).not.toHaveProperty('default')
|
||||
})
|
||||
|
||||
test('optional tool properties are not added to required[] — fixes Groq/Azure 400 tool_use_failed', async () => {
|
||||
// Regression test for: all optional properties being sent as required in strict mode,
|
||||
// causing providers like Groq to reject valid tool calls where the model omits optional args.
|
||||
let requestBody: Record<string, unknown> | undefined
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
requestBody = JSON.parse(String(init?.body))
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-4',
|
||||
model: 'gpt-4o',
|
||||
choices: [{ message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop' }],
|
||||
usage: { prompt_tokens: 5, completion_tokens: 2, total_tokens: 7 },
|
||||
}),
|
||||
{ headers: { 'Content-Type': 'application/json' } },
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [{ role: 'user', content: 'read a file' }],
|
||||
tools: [
|
||||
{
|
||||
name: 'Read',
|
||||
description: 'Read a file',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
file_path: { type: 'string', description: 'Absolute path to file' },
|
||||
offset: { type: 'number', description: 'Line to start from' },
|
||||
limit: { type: 'number', description: 'Max lines to read' },
|
||||
pages: { type: 'string', description: 'Page range for PDFs' },
|
||||
},
|
||||
required: ['file_path'],
|
||||
},
|
||||
},
|
||||
],
|
||||
max_tokens: 16,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
const parameters = (
|
||||
requestBody?.tools as Array<{ function?: { parameters?: Record<string, unknown> } }>
|
||||
)?.[0]?.function?.parameters
|
||||
|
||||
expect(parameters?.required).toEqual(['file_path'])
|
||||
|
||||
const required = parameters?.required as string[] | undefined
|
||||
expect(required).not.toContain('offset')
|
||||
expect(required).not.toContain('limit')
|
||||
expect(required).not.toContain('pages')
|
||||
expect(parameters?.additionalProperties).toBe(false)
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Issue #202 — consecutive role coalescing (Devstral, Mistral strict templates)
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -1849,7 +2139,7 @@ test('coalesces consecutive user messages to avoid alternation errors (issue #20
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(sentMessages?.length).toBe(2) // system + 1 merged user
|
||||
expect(sentMessages?.length).toBe(2)
|
||||
expect(sentMessages?.[0]?.role).toBe('system')
|
||||
expect(sentMessages?.[1]?.role).toBe('user')
|
||||
const userContent = sentMessages?.[1]?.content as string
|
||||
@@ -1883,13 +2173,12 @@ test('coalesces consecutive assistant messages preserving tool_calls (issue #202
|
||||
stream: false,
|
||||
})
|
||||
|
||||
// system + user + merged assistant + tool
|
||||
const assistantMsgs = sentMessages?.filter(m => m.role === 'assistant')
|
||||
expect(assistantMsgs?.length).toBe(1) // two assistant turns merged into one
|
||||
expect(assistantMsgs?.length).toBe(1)
|
||||
expect(assistantMsgs?.[0]?.tool_calls?.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
test('non-streaming: reasoning_content emitted as thinking block, used as text when content is null', async () => {
|
||||
test('non-streaming: reasoning_content emitted as thinking block only when content is null', async () => {
|
||||
globalThis.fetch = (async (_input, _init) => {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
@@ -1931,7 +2220,6 @@ test('non-streaming: reasoning_content emitted as thinking block, used as text w
|
||||
|
||||
expect(result.content).toEqual([
|
||||
{ type: 'thinking', thinking: 'Let me think about this step by step.' },
|
||||
{ type: 'text', text: 'Let me think about this step by step.' },
|
||||
])
|
||||
})
|
||||
|
||||
@@ -1975,11 +2263,8 @@ test('non-streaming: empty string content does not fall through to reasoning_con
|
||||
stream: false,
|
||||
})) as { content: Array<Record<string, unknown>> }
|
||||
|
||||
// reasoning_content should be a thinking block, and also used as text
|
||||
// since content is empty string (treated as absent)
|
||||
expect(result.content).toEqual([
|
||||
{ type: 'thinking', thinking: 'Chain of thought here.' },
|
||||
{ type: 'text', text: 'Chain of thought here.' },
|
||||
])
|
||||
})
|
||||
|
||||
@@ -2029,6 +2314,46 @@ test('non-streaming: real content takes precedence over reasoning_content', asyn
|
||||
])
|
||||
})
|
||||
|
||||
test('non-streaming: strips leaked reasoning preamble from assistant content', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 10,
|
||||
completion_tokens: 20,
|
||||
total_tokens: 30,
|
||||
},
|
||||
}),
|
||||
{ headers: { 'Content-Type': 'application/json' } },
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
const result = (await client.beta.messages.create({
|
||||
model: 'gpt-5-mini',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hey' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})) as { content: Array<Record<string, unknown>> }
|
||||
|
||||
expect(result.content).toEqual([
|
||||
{ type: 'text', text: 'Hey! How can I help you today?' },
|
||||
])
|
||||
})
|
||||
|
||||
test('streaming: thinking block closed before tool call', async () => {
|
||||
globalThis.fetch = (async (_input, _init) => {
|
||||
const chunks = makeStreamChunks([
|
||||
@@ -2104,7 +2429,6 @@ test('streaming: thinking block closed before tool call', async () => {
|
||||
|
||||
const types = events.map(e => e.type)
|
||||
|
||||
// Verify thinking block is started, then closed, then tool call starts
|
||||
const thinkingStartIdx = types.indexOf('content_block_start')
|
||||
const firstStopIdx = types.indexOf('content_block_stop')
|
||||
const toolStartIdx = types.indexOf(
|
||||
@@ -2116,9 +2440,139 @@ test('streaming: thinking block closed before tool call', async () => {
|
||||
expect(firstStopIdx).toBeGreaterThan(thinkingStartIdx)
|
||||
expect(toolStartIdx).toBeGreaterThan(firstStopIdx)
|
||||
|
||||
// Verify thinking block start content
|
||||
const thinkingStart = events[thinkingStartIdx] as {
|
||||
content_block?: Record<string, unknown>
|
||||
}
|
||||
expect(thinkingStart?.content_block?.type).toBe('thinking')
|
||||
})
|
||||
|
||||
test('streaming: strips leaked reasoning preamble from assistant content deltas', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
const chunks = makeStreamChunks([
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
role: 'assistant',
|
||||
content:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
return makeSseResponse(chunks)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
const result = await client.beta.messages
|
||||
.create({
|
||||
model: 'gpt-5-mini',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hey' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
})
|
||||
.withResponse()
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of result.data) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
})
|
||||
|
||||
test('streaming: strips leaked reasoning preamble when split across multiple content chunks', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
const chunks = makeStreamChunks([
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
role: 'assistant',
|
||||
content: 'The user said "hey" - this is a simple greeting. ',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
content:
|
||||
'I should respond in a friendly, concise way.\n\nHey! How can I help you today?',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
return makeSseResponse(chunks)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
const result = await client.beta.messages
|
||||
.create({
|
||||
model: 'gpt-5-mini',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hey' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
})
|
||||
.withResponse()
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of result.data) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
})
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
* OPENAI_MODEL=gpt-4o — default model override
|
||||
* CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark
|
||||
*
|
||||
* GitHub Models (models.github.ai), OpenAI-compatible:
|
||||
* GitHub Copilot API (api.githubcopilot.com), OpenAI-compatible:
|
||||
* CLAUDE_CODE_USE_GITHUB=1 — enable GitHub inference (no need for USE_OPENAI)
|
||||
* GITHUB_TOKEN or GH_TOKEN — PAT with models access (mapped to Bearer auth)
|
||||
* GITHUB_TOKEN or GH_TOKEN — Copilot API token (mapped to Bearer auth)
|
||||
* OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs
|
||||
*/
|
||||
|
||||
@@ -26,10 +26,17 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { resolveGeminiCredential } from '../../utils/geminiAuth.js'
|
||||
import { hydrateGeminiAccessTokenFromSecureStorage } from '../../utils/geminiCredentials.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
import {
|
||||
codexStreamToAnthropic,
|
||||
collectCodexCompletedResponse,
|
||||
convertAnthropicMessagesToResponsesInput,
|
||||
convertCodexResponseToAnthropicMessage,
|
||||
convertToolsToResponsesTools,
|
||||
performCodexRequest,
|
||||
type AnthropicStreamEvent,
|
||||
type AnthropicUsage,
|
||||
@@ -39,6 +46,7 @@ import {
|
||||
isLocalProviderUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
getGithubEndpointType,
|
||||
} from './providerConfig.js'
|
||||
import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js'
|
||||
import { redactSecretValueForDisplay } from '../../utils/providerProfile.js'
|
||||
@@ -53,19 +61,56 @@ type SecretValueSource = Partial<{
|
||||
GEMINI_API_KEY: string
|
||||
GOOGLE_API_KEY: string
|
||||
GEMINI_ACCESS_TOKEN: string
|
||||
MISTRAL_API_KEY: string
|
||||
}>
|
||||
|
||||
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
|
||||
const GITHUB_API_VERSION = '2022-11-28'
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
const GITHUB_429_MAX_RETRIES = 3
|
||||
const GITHUB_429_BASE_DELAY_SEC = 1
|
||||
const GITHUB_429_MAX_DELAY_SEC = 32
|
||||
const GEMINI_API_HOST = 'generativelanguage.googleapis.com'
|
||||
|
||||
const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
function isGithubModelsMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
}
|
||||
|
||||
function isMistralMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
}
|
||||
|
||||
function filterAnthropicHeaders(
|
||||
headers: Record<string, string> | undefined,
|
||||
): Record<string, string> {
|
||||
if (!headers) return {}
|
||||
|
||||
const filtered: Record<string, string> = {}
|
||||
for (const [key, value] of Object.entries(headers)) {
|
||||
const lower = key.toLowerCase()
|
||||
if (
|
||||
lower.startsWith('x-anthropic') ||
|
||||
lower.startsWith('anthropic-') ||
|
||||
lower.startsWith('x-claude') ||
|
||||
lower === 'x-app' ||
|
||||
lower === 'x-client-app' ||
|
||||
lower === 'authorization' ||
|
||||
lower === 'x-api-key' ||
|
||||
lower === 'api-key'
|
||||
) {
|
||||
continue
|
||||
}
|
||||
filtered[key] = value
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
function hasGeminiApiHost(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
|
||||
@@ -412,11 +457,13 @@ function normalizeSchemaForOpenAI(
|
||||
record.properties = normalizedProps
|
||||
|
||||
if (strict) {
|
||||
// OpenAI strict mode requires every property to be listed in required[]
|
||||
const allKeys = Object.keys(normalizedProps)
|
||||
record.required = Array.from(new Set([...existingRequired, ...allKeys]))
|
||||
// OpenAI strict mode requires additionalProperties: false on all object
|
||||
// schemas — override unconditionally to ensure nested objects comply.
|
||||
// Keep only the properties that were originally marked required in the schema.
|
||||
// Adding every property to required[] (the previous behaviour) caused strict
|
||||
// OpenAI-compatible providers (Groq, Azure, etc.) to reject tool calls because
|
||||
// the model correctly omits optional arguments — but the provider treats them
|
||||
// as missing required fields and returns a 400 / tool_use_failed error.
|
||||
record.required = existingRequired.filter(k => k in normalizedProps)
|
||||
// additionalProperties: false is still required by strict-mode providers.
|
||||
record.additionalProperties = false
|
||||
} else {
|
||||
// For Gemini: keep only existing required keys that are present in properties
|
||||
@@ -522,11 +569,14 @@ function convertChunkUsage(
|
||||
): Partial<AnthropicUsage> | undefined {
|
||||
if (!usage) return undefined
|
||||
|
||||
const cached = usage.prompt_tokens_details?.cached_tokens ?? 0
|
||||
return {
|
||||
input_tokens: usage.prompt_tokens ?? 0,
|
||||
// Subtract cached tokens: OpenAI includes them in prompt_tokens,
|
||||
// but Anthropic convention treats input_tokens as non-cached only.
|
||||
input_tokens: (usage.prompt_tokens ?? 0) - cached,
|
||||
output_tokens: usage.completion_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: usage.prompt_tokens_details?.cached_tokens ?? 0,
|
||||
cache_read_input_tokens: cached,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -577,6 +627,8 @@ async function* openaiStreamToAnthropic(
|
||||
let hasEmittedContentStart = false
|
||||
let hasEmittedThinkingStart = false
|
||||
let hasClosedThinking = false
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||
let hasEmittedFinalUsage = false
|
||||
let hasProcessedFinishReason = false
|
||||
@@ -607,6 +659,30 @@ async function* openaiStreamToAnthropic(
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
const closeActiveContentBlock = async function* () {
|
||||
if (!hasEmittedContentStart) return
|
||||
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'text_delta', text: sanitized },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
@@ -661,6 +737,7 @@ async function* openaiStreamToAnthropic(
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
activeTextBuffer += delta.content
|
||||
if (!hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
@@ -669,6 +746,35 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
hasEmittedContentStart = true
|
||||
}
|
||||
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
},
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
@@ -687,12 +793,7 @@ async function* openaiStreamToAnthropic(
|
||||
hasClosedThinking = true
|
||||
}
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
yield* closeActiveContentBlock()
|
||||
}
|
||||
|
||||
const toolBlockIndex = contentBlockIndex
|
||||
@@ -775,10 +876,7 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
// Close any open content blocks
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
yield* closeActiveContentBlock()
|
||||
}
|
||||
// Close active tool calls
|
||||
for (const [, tc] of activeToolCalls) {
|
||||
@@ -925,7 +1023,7 @@ class OpenAIShimMessages {
|
||||
private providerOverride?: { model: string; baseURL: string; apiKey: string }
|
||||
|
||||
constructor(defaultHeaders: Record<string, string>, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh', providerOverride?: { model: string; baseURL: string; apiKey: string }) {
|
||||
this.defaultHeaders = defaultHeaders
|
||||
this.defaultHeaders = filterAnthropicHeaders(defaultHeaders)
|
||||
this.reasoningEffort = reasoningEffort
|
||||
this.providerOverride = providerOverride
|
||||
}
|
||||
@@ -944,8 +1042,9 @@ class OpenAIShimMessages {
|
||||
httpResponse = response
|
||||
|
||||
if (params.stream) {
|
||||
const isResponsesStream = response.url?.includes('/responses')
|
||||
return new OpenAIShimStream(
|
||||
request.transport === 'codex_responses'
|
||||
(request.transport === 'codex_responses' || isResponsesStream)
|
||||
? codexStreamToAnthropic(response, request.resolvedModel)
|
||||
: openaiStreamToAnthropic(response, request.resolvedModel),
|
||||
)
|
||||
@@ -959,8 +1058,38 @@ class OpenAIShimMessages {
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return self._convertNonStreamingResponse(data, request.resolvedModel)
|
||||
const isResponsesNonStream = response.url?.includes('/responses')
|
||||
if (isResponsesNonStream || (request.transport === 'chat_completions' && isGithubModelsMode())) {
|
||||
const contentType = response.headers.get('content-type') ?? ''
|
||||
if (contentType.includes('application/json')) {
|
||||
const parsed = await response.json() as Record<string, unknown>
|
||||
if (
|
||||
parsed &&
|
||||
typeof parsed === 'object' &&
|
||||
('output' in parsed || 'incomplete_details' in parsed)
|
||||
) {
|
||||
return convertCodexResponseToAnthropicMessage(
|
||||
parsed,
|
||||
request.resolvedModel,
|
||||
)
|
||||
}
|
||||
return self._convertNonStreamingResponse(parsed, request.resolvedModel)
|
||||
}
|
||||
}
|
||||
|
||||
const contentType = response.headers.get('content-type') ?? ''
|
||||
if (contentType.includes('application/json')) {
|
||||
const data = await response.json()
|
||||
return self._convertNonStreamingResponse(data, request.resolvedModel)
|
||||
}
|
||||
|
||||
const textBody = await response.text().catch(() => '')
|
||||
throw APIError.generate(
|
||||
response.status,
|
||||
undefined,
|
||||
`OpenAI API error ${response.status}: unexpected response: ${textBody.slice(0, 500)}`,
|
||||
response.headers as unknown as Headers,
|
||||
)
|
||||
})()
|
||||
|
||||
; (promise as unknown as Record<string, unknown>).withResponse =
|
||||
@@ -982,7 +1111,36 @@ class OpenAIShimMessages {
|
||||
params: ShimCreateParams,
|
||||
options?: { signal?: AbortSignal; headers?: Record<string, string> },
|
||||
): Promise<Response> {
|
||||
if (request.transport === 'codex_responses') {
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubMode = isGithubModelsMode()
|
||||
const isGithubWithCodexTransport = isGithubMode && request.transport === 'codex_responses'
|
||||
const isGithubCopilotEndpoint = isGithubMode && githubEndpointType === 'copilot'
|
||||
|
||||
if (isGithubWithCodexTransport) {
|
||||
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
'GitHub Copilot auth is required. Run /onboard-github to sign in.',
|
||||
)
|
||||
}
|
||||
|
||||
return performCodexRequest({
|
||||
request,
|
||||
credentials: {
|
||||
apiKey,
|
||||
source: 'env',
|
||||
},
|
||||
params,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders,
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
...COPILOT_HEADERS,
|
||||
},
|
||||
signal: options?.signal,
|
||||
})
|
||||
}
|
||||
|
||||
if (request.transport === 'codex_responses' && !isGithubMode) {
|
||||
const credentials = resolveCodexApiCredentials()
|
||||
if (!credentials.apiKey) {
|
||||
const authHint = credentials.authPath
|
||||
@@ -1007,7 +1165,7 @@ class OpenAIShimMessages {
|
||||
params,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders,
|
||||
...(options?.headers ?? {}),
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
},
|
||||
signal: options?.signal,
|
||||
})
|
||||
@@ -1034,6 +1192,7 @@ class OpenAIShimMessages {
|
||||
model: request.resolvedModel,
|
||||
messages: openaiMessages,
|
||||
stream: params.stream ?? false,
|
||||
store: false,
|
||||
}
|
||||
// Convert max_tokens to max_completion_tokens for OpenAI API compatibility.
|
||||
// Azure OpenAI requires max_completion_tokens and does not accept max_tokens.
|
||||
@@ -1056,11 +1215,22 @@ class OpenAIShimMessages {
|
||||
}
|
||||
|
||||
const isGithub = isGithubModelsMode()
|
||||
if (isGithub && body.max_completion_tokens !== undefined) {
|
||||
const isMistral = isMistralMode()
|
||||
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubCopilot = isGithub && githubEndpointType === 'copilot'
|
||||
const isGithubModels = isGithub && (githubEndpointType === 'models' || githubEndpointType === 'custom')
|
||||
|
||||
if ((isGithub || isMistral) && body.max_completion_tokens !== undefined) {
|
||||
body.max_tokens = body.max_completion_tokens
|
||||
delete body.max_completion_tokens
|
||||
}
|
||||
|
||||
// mistral also doesn't recognize body.store
|
||||
if (isMistral) {
|
||||
delete body.store
|
||||
}
|
||||
|
||||
if (params.temperature !== undefined) body.temperature = params.temperature
|
||||
if (params.top_p !== undefined) body.top_p = params.top_p
|
||||
|
||||
@@ -1095,12 +1265,11 @@ class OpenAIShimMessages {
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
...this.defaultHeaders,
|
||||
...(options?.headers ?? {}),
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
}
|
||||
|
||||
const isGemini = isGeminiMode()
|
||||
const apiKey =
|
||||
this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
||||
// path segments like https://evil.com/cognitiveservices.azure.com/
|
||||
let isAzure = false
|
||||
@@ -1121,15 +1290,17 @@ class OpenAIShimMessages {
|
||||
const geminiCredential = await resolveGeminiCredential(process.env)
|
||||
if (geminiCredential.kind !== 'none') {
|
||||
headers.Authorization = `Bearer ${geminiCredential.credential}`
|
||||
if (geminiCredential.projectId) {
|
||||
if (geminiCredential.kind !== 'api-key' && 'projectId' in geminiCredential && geminiCredential.projectId) {
|
||||
headers['x-goog-user-project'] = geminiCredential.projectId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isGithub) {
|
||||
headers.Accept = 'application/vnd.github.v3+json'
|
||||
headers['X-GitHub-Api-Version'] = GITHUB_API_VERSION
|
||||
if (isGithubCopilot) {
|
||||
Object.assign(headers, COPILOT_HEADERS)
|
||||
} else if (isGithubModels) {
|
||||
headers['Accept'] = 'application/vnd.github+json'
|
||||
headers['X-GitHub-Api-Version'] = '2022-11-28'
|
||||
}
|
||||
|
||||
// Build the chat completions URL
|
||||
@@ -1181,9 +1352,83 @@ class OpenAIShimMessages {
|
||||
await sleepMs(delaySec * 1000)
|
||||
continue
|
||||
}
|
||||
// Read body exactly once here — Response body is a stream that can only
|
||||
// be consumed a single time.
|
||||
const errorBody = await response.text().catch(() => 'unknown error')
|
||||
const rateHint =
|
||||
isGithub && response.status === 429 ? formatRetryAfterHint(response) : ''
|
||||
|
||||
// If GitHub Copilot returns error about /chat/completions,
|
||||
// try the /responses endpoint (needed for GPT-5+ models)
|
||||
if (isGithub && response.status === 400) {
|
||||
if (errorBody.includes('/chat/completions') || errorBody.includes('not accessible')) {
|
||||
const responsesUrl = `${request.baseUrl}/responses`
|
||||
const responsesBody: Record<string, unknown> = {
|
||||
model: request.resolvedModel,
|
||||
input: convertAnthropicMessagesToResponsesInput(
|
||||
params.messages as Array<{
|
||||
role?: string
|
||||
message?: { role?: string; content?: unknown }
|
||||
content?: unknown
|
||||
}>,
|
||||
),
|
||||
stream: params.stream ?? false,
|
||||
store: false,
|
||||
}
|
||||
|
||||
if (!Array.isArray(responsesBody.input) || responsesBody.input.length === 0) {
|
||||
responsesBody.input = [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'user',
|
||||
content: [{ type: 'input_text', text: '' }],
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
const systemText = convertSystemPrompt(params.system)
|
||||
if (systemText) {
|
||||
responsesBody.instructions = systemText
|
||||
}
|
||||
|
||||
if (body.max_tokens !== undefined) {
|
||||
responsesBody.max_output_tokens = body.max_tokens
|
||||
}
|
||||
|
||||
if (params.tools && params.tools.length > 0) {
|
||||
const convertedTools = convertToolsToResponsesTools(
|
||||
params.tools as Array<{
|
||||
name?: string
|
||||
description?: string
|
||||
input_schema?: Record<string, unknown>
|
||||
}>,
|
||||
)
|
||||
if (convertedTools.length > 0) {
|
||||
responsesBody.tools = convertedTools
|
||||
}
|
||||
}
|
||||
|
||||
const responsesResponse = await fetch(responsesUrl, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(responsesBody),
|
||||
signal: options?.signal,
|
||||
})
|
||||
if (responsesResponse.ok) {
|
||||
return responsesResponse
|
||||
}
|
||||
const responsesErrorBody = await responsesResponse.text().catch(() => 'unknown error')
|
||||
let responsesErrorResponse: object | undefined
|
||||
try { responsesErrorResponse = JSON.parse(responsesErrorBody) } catch { /* raw text */ }
|
||||
throw APIError.generate(
|
||||
responsesResponse.status,
|
||||
responsesErrorResponse,
|
||||
`OpenAI API error ${responsesResponse.status}: ${responsesErrorBody}`,
|
||||
responsesResponse.headers,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
let errorResponse: object | undefined
|
||||
try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ }
|
||||
throw APIError.generate(
|
||||
@@ -1233,9 +1478,9 @@ class OpenAIShimMessages {
|
||||
const choice = data.choices?.[0]
|
||||
const content: Array<Record<string, unknown>> = []
|
||||
|
||||
// Some reasoning models (e.g. GLM-5) put their reply in reasoning_content
|
||||
// while content stays null — emit reasoning as a thinking block, then
|
||||
// fall back to it for visible text if content is empty.
|
||||
// Some reasoning models (e.g. GLM-5) put their chain-of-thought in
|
||||
// reasoning_content while content stays null. Preserve it as a thinking
|
||||
// block, but do not surface it as visible assistant text.
|
||||
const reasoningText = choice?.message?.reasoning_content
|
||||
if (typeof reasoningText === 'string' && reasoningText) {
|
||||
content.push({ type: 'thinking', thinking: reasoningText })
|
||||
@@ -1243,9 +1488,12 @@ class OpenAIShimMessages {
|
||||
const rawContent =
|
||||
choice?.message?.content !== '' && choice?.message?.content != null
|
||||
? choice?.message?.content
|
||||
: choice?.message?.reasoning_content
|
||||
: null
|
||||
if (typeof rawContent === 'string' && rawContent) {
|
||||
content.push({ type: 'text', text: rawContent })
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(rawContent),
|
||||
})
|
||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||
const parts: string[] = []
|
||||
for (const part of rawContent) {
|
||||
@@ -1260,7 +1508,10 @@ class OpenAIShimMessages {
|
||||
}
|
||||
const joined = parts.join('\n')
|
||||
if (joined) {
|
||||
content.push({ type: 'text', text: joined })
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(joined),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1350,8 +1601,15 @@ export function createOpenAIShimClient(options: {
|
||||
if (process.env.GEMINI_MODEL && !process.env.OPENAI_MODEL) {
|
||||
process.env.OPENAI_MODEL = process.env.GEMINI_MODEL
|
||||
}
|
||||
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
process.env.OPENAI_BASE_URL =
|
||||
process.env.MISTRAL_BASE_URL ?? 'https://api.mistral.ai/v1'
|
||||
process.env.OPENAI_API_KEY = process.env.MISTRAL_API_KEY
|
||||
if (process.env.MISTRAL_MODEL) {
|
||||
process.env.OPENAI_MODEL = process.env.MISTRAL_MODEL
|
||||
}
|
||||
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
process.env.OPENAI_BASE_URL ??= GITHUB_MODELS_DEFAULT_BASE
|
||||
process.env.OPENAI_BASE_URL ??= GITHUB_COPILOT_BASE
|
||||
process.env.OPENAI_API_KEY ??=
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? ''
|
||||
}
|
||||
|
||||
@@ -23,6 +23,9 @@ test.each([
|
||||
['github:gpt-4o', 'gpt-4o'],
|
||||
['gpt-4o', 'gpt-4o'],
|
||||
['github:copilot?reasoning=high', DEFAULT_GITHUB_MODELS_API_MODEL],
|
||||
// normalizeGithubModelsApiModel preserves provider prefix for models.github.ai compatibility
|
||||
['github:openai/gpt-4.1', 'openai/gpt-4.1'],
|
||||
['openai/gpt-4.1', 'openai/gpt-4.1'],
|
||||
] as const)('normalizeGithubModelsApiModel(%s) -> %s', (input, expected) => {
|
||||
expect(normalizeGithubModelsApiModel(input)).toBe(expected)
|
||||
})
|
||||
@@ -34,6 +37,20 @@ test('resolveProviderRequest applies GitHub normalization when CLAUDE_CODE_USE_G
|
||||
expect(r.transport).toBe('chat_completions')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest routes GitHub GPT-5 codex models to responses transport', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const r = resolveProviderRequest({ model: 'gpt-5.3-codex' })
|
||||
expect(r.resolvedModel).toBe('gpt-5.3-codex')
|
||||
expect(r.transport).toBe('codex_responses')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest keeps gpt-5-mini on chat_completions for GitHub', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const r = resolveProviderRequest({ model: 'gpt-5-mini' })
|
||||
expect(r.resolvedModel).toBe('gpt-5-mini')
|
||||
expect(r.transport).toBe('chat_completions')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest leaves model unchanged without GitHub flag', () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
const r = resolveProviderRequest({ model: 'github:gpt-4o' })
|
||||
|
||||
@@ -7,8 +7,9 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
||||
/** Default GitHub Models API model when user selects copilot / github:copilot */
|
||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'openai/gpt-4.1'
|
||||
export const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1'
|
||||
/** Default GitHub Copilot API model when user selects copilot / github:copilot */
|
||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'gpt-4o'
|
||||
|
||||
const CODEX_ALIAS_MODELS: Record<
|
||||
string,
|
||||
@@ -227,6 +228,21 @@ export function shouldUseCodexTransport(
|
||||
return isCodexBaseUrl(explicitBaseUrl) || (!explicitBaseUrl && isCodexAlias(model))
|
||||
}
|
||||
|
||||
function shouldUseGithubResponsesApi(model: string): boolean {
|
||||
const normalized = model.trim().toLowerCase()
|
||||
|
||||
// Codex-branded models require /responses.
|
||||
if (normalized.includes('codex')) return true
|
||||
|
||||
// GPT-5+ models use /responses, except gpt-5-mini.
|
||||
const match = /^gpt-(\d+)/.exec(normalized)
|
||||
if (!match) return false
|
||||
const major = Number(match[1])
|
||||
if (major < 5) return false
|
||||
if (normalized.startsWith('gpt-5-mini')) return false
|
||||
return true
|
||||
}
|
||||
|
||||
export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
try {
|
||||
@@ -280,19 +296,61 @@ export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize user model string for GitHub Models inference (models.github.ai).
|
||||
* Mirrors runtime devsper `github._normalize_model_id`.
|
||||
* Normalize user model string for GitHub Copilot API inference.
|
||||
* Mirrors how Copilot resolves model IDs internally.
|
||||
*/
|
||||
export function normalizeGithubModelsApiModel(requestedModel: string): string {
|
||||
export function normalizeGithubCopilotModel(requestedModel: string): string {
|
||||
const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel
|
||||
const segment =
|
||||
noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim()
|
||||
if (!segment || segment.toLowerCase() === 'copilot') {
|
||||
return DEFAULT_GITHUB_MODELS_API_MODEL
|
||||
}
|
||||
// Strip provider prefix if present (e.g., "openai/gpt-4o" -> "gpt-4o")
|
||||
const slashIndex = segment.indexOf('/')
|
||||
if (slashIndex !== -1) {
|
||||
return segment.slice(slashIndex + 1)
|
||||
}
|
||||
return segment
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize user model string for GitHub Models API inference.
|
||||
* Only normalizes the default alias, preserves provider-qualified models.
|
||||
*/
|
||||
export function normalizeGithubModelsApiModel(requestedModel: string): string {
|
||||
const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel
|
||||
const segment =
|
||||
noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim()
|
||||
// Only normalize the default alias for GitHub Models
|
||||
if (!segment || segment.toLowerCase() === 'copilot') {
|
||||
return DEFAULT_GITHUB_MODELS_API_MODEL
|
||||
}
|
||||
// Preserve provider prefix for GitHub Models (e.g., "openai/gpt-4.1" stays as-is)
|
||||
return segment
|
||||
}
|
||||
|
||||
export const GITHUB_COPILOT_BASE_URL = 'https://api.githubcopilot.com'
|
||||
export const GITHUB_MODELS_BASE_URL = 'https://models.github.ai/inference'
|
||||
|
||||
export function getGithubEndpointType(
|
||||
baseUrl: string | undefined,
|
||||
): 'copilot' | 'models' | 'custom' {
|
||||
if (!baseUrl) return 'copilot'
|
||||
try {
|
||||
const hostname = new URL(baseUrl).hostname.toLowerCase()
|
||||
if (hostname === 'api.githubcopilot.com') {
|
||||
return 'copilot'
|
||||
}
|
||||
if (hostname === 'models.github.ai' || hostname.endsWith('.github.ai')) {
|
||||
return 'models'
|
||||
}
|
||||
return 'custom'
|
||||
} catch {
|
||||
return 'copilot'
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveProviderRequest(options?: {
|
||||
model?: string
|
||||
baseUrl?: string
|
||||
@@ -300,41 +358,64 @@ export function resolveProviderRequest(options?: {
|
||||
reasoningEffortOverride?: ReasoningEffort
|
||||
}): ResolvedProviderRequest {
|
||||
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const isMistralMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
const requestedModel =
|
||||
options?.model?.trim() ||
|
||||
process.env.OPENAI_MODEL?.trim() ||
|
||||
(isMistralMode
|
||||
? process.env.MISTRAL_MODEL?.trim()
|
||||
: process.env.OPENAI_MODEL?.trim()) ||
|
||||
options?.fallbackModel?.trim() ||
|
||||
(isGithubMode ? 'github:copilot' : 'gpt-4o')
|
||||
const descriptor = parseModelDescriptor(requestedModel)
|
||||
const rawBaseUrl =
|
||||
asEnvUrl(options?.baseUrl) ??
|
||||
asEnvUrl(process.env.OPENAI_BASE_URL) ??
|
||||
asEnvUrl(
|
||||
isMistralMode ? (process.env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL) : process.env.OPENAI_BASE_URL,
|
||||
) ??
|
||||
asEnvUrl(process.env.OPENAI_API_BASE)
|
||||
|
||||
const githubEndpointType = isGithubMode
|
||||
? getGithubEndpointType(rawBaseUrl)
|
||||
: 'custom'
|
||||
const isGithubCopilot = isGithubMode && githubEndpointType === 'copilot'
|
||||
const isGithubModels = isGithubMode && githubEndpointType === 'models'
|
||||
const isGithubCustom = isGithubMode && githubEndpointType === 'custom'
|
||||
|
||||
const githubResolvedModel = isGithubMode
|
||||
? normalizeGithubModelsApiModel(requestedModel)
|
||||
: requestedModel
|
||||
|
||||
const transport: ProviderTransport =
|
||||
shouldUseCodexTransport(requestedModel, rawBaseUrl)
|
||||
shouldUseCodexTransport(requestedModel, rawBaseUrl) ||
|
||||
(isGithubCopilot && shouldUseGithubResponsesApi(githubResolvedModel))
|
||||
? 'codex_responses'
|
||||
: 'chat_completions'
|
||||
|
||||
const resolvedModel =
|
||||
transport === 'chat_completions' &&
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
? normalizeGithubModelsApiModel(requestedModel)
|
||||
: descriptor.baseModel
|
||||
// For GitHub Copilot API, normalize to real model ID (e.g., "github:copilot" -> "gpt-4o")
|
||||
// For GitHub Models/custom endpoints:
|
||||
// - Normalize default alias (github:copilot -> gpt-4o)
|
||||
// - Preserve provider-qualified models (openai/gpt-4.1 stays as-is)
|
||||
const resolvedModel = isGithubCopilot
|
||||
? normalizeGithubCopilotModel(descriptor.baseModel)
|
||||
: (isGithubModels || isGithubCustom
|
||||
? normalizeGithubModelsApiModel(descriptor.baseModel)
|
||||
: descriptor.baseModel)
|
||||
|
||||
const reasoning = options?.reasoningEffortOverride
|
||||
? { effort: options.reasoningEffortOverride }
|
||||
: descriptor.reasoning
|
||||
|
||||
|
||||
return {
|
||||
transport,
|
||||
requestedModel,
|
||||
resolvedModel,
|
||||
baseUrl:
|
||||
(rawBaseUrl ??
|
||||
(transport === 'codex_responses'
|
||||
? DEFAULT_CODEX_BASE_URL
|
||||
: DEFAULT_OPENAI_BASE_URL)
|
||||
(isGithubCopilot && transport === 'codex_responses'
|
||||
? GITHUB_COPILOT_BASE_URL
|
||||
: (isGithubMode
|
||||
? GITHUB_COPILOT_BASE_URL
|
||||
: DEFAULT_OPENAI_BASE_URL))
|
||||
).replace(/\/+$/, ''),
|
||||
reasoning,
|
||||
}
|
||||
@@ -343,6 +424,7 @@ export function resolveProviderRequest(options?: {
|
||||
export function getAdditionalModelOptionsCacheScope(): string | null {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) &&
|
||||
|
||||
46
src/services/api/reasoningLeakSanitizer.test.ts
Normal file
46
src/services/api/reasoningLeakSanitizer.test.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.ts'
|
||||
|
||||
describe('reasoning leak sanitizer', () => {
|
||||
test('strips explicit internal reasoning preambles', () => {
|
||||
const text =
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(true)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(
|
||||
'Hey! How can I help you today?',
|
||||
)
|
||||
})
|
||||
|
||||
test('does not strip normal user-facing advice that mentions "the user should"', () => {
|
||||
const text =
|
||||
'The user should reset their password immediately.\n\nHere are the steps...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
|
||||
test('does not strip legitimate first-person advice about responding to an incident', () => {
|
||||
const text =
|
||||
'I need to respond to this security incident immediately. The system is compromised.\n\nHere are the remediation steps...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
|
||||
test('does not strip legitimate first-person advice about answering a support ticket', () => {
|
||||
const text =
|
||||
'I need to answer the support ticket before end of day. The customer is waiting.\n\nHere is the response I drafted...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
})
|
||||
54
src/services/api/reasoningLeakSanitizer.ts
Normal file
54
src/services/api/reasoningLeakSanitizer.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
const EXPLICIT_REASONING_START_RE =
|
||||
/^\s*(i should\b|i need to\b|let me think\b|the task\b|the request\b)/i
|
||||
|
||||
const EXPLICIT_REASONING_META_RE =
|
||||
/\b(user|request|question|prompt|message|task|greeting|small talk|briefly|friendly|concise)\b/i
|
||||
|
||||
const USER_META_START_RE =
|
||||
/^\s*the user\s+(just\s+)?(said|asked|is asking|wants|wanted|mentioned|seems|appears)\b/i
|
||||
|
||||
const USER_REASONING_RE =
|
||||
/^\s*the user\s+(just\s+)?(said|asked|is asking|wants|wanted|mentioned|seems|appears)\b[\s\S]*\b(i should|i need to|let me think|respond|reply|answer|greeting|small talk|briefly|friendly|concise)\b/i
|
||||
|
||||
export function shouldBufferPotentialReasoningPrefix(text: string): boolean {
|
||||
const normalized = text.trim()
|
||||
if (!normalized) return false
|
||||
|
||||
if (looksLikeLeakedReasoningPrefix(normalized)) {
|
||||
return true
|
||||
}
|
||||
|
||||
const hasParagraphBoundary = /\n\s*\n/.test(normalized)
|
||||
if (hasParagraphBoundary) {
|
||||
return false
|
||||
}
|
||||
|
||||
return (
|
||||
EXPLICIT_REASONING_START_RE.test(normalized) ||
|
||||
USER_META_START_RE.test(normalized)
|
||||
)
|
||||
}
|
||||
|
||||
export function looksLikeLeakedReasoningPrefix(text: string): boolean {
|
||||
const normalized = text.trim()
|
||||
if (!normalized) return false
|
||||
return (
|
||||
(EXPLICIT_REASONING_START_RE.test(normalized) &&
|
||||
EXPLICIT_REASONING_META_RE.test(normalized)) ||
|
||||
USER_REASONING_RE.test(normalized)
|
||||
)
|
||||
}
|
||||
|
||||
export function stripLeakedReasoningPreamble(text: string): string {
|
||||
const normalized = text.replace(/\r\n/g, '\n')
|
||||
const parts = normalized.split(/\n\s*\n/)
|
||||
if (parts.length < 2) return text
|
||||
|
||||
const first = parts[0]?.trim() ?? ''
|
||||
if (!looksLikeLeakedReasoningPrefix(first)) {
|
||||
return text
|
||||
}
|
||||
|
||||
const remainder = parts.slice(1).join('\n\n').trim()
|
||||
return remainder || text
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { APIError } from '@anthropic-ai/sdk'
|
||||
|
||||
// Helper to build a mock APIError with specific headers
|
||||
@@ -15,15 +15,27 @@ function makeError(headers: Record<string, string>): APIError {
|
||||
|
||||
// Save/restore env vars between tests
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
const envKeys = [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'OPENAI_MODEL',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_BASE',
|
||||
] as const
|
||||
|
||||
beforeEach(() => {
|
||||
for (const key of envKeys) {
|
||||
delete process.env[key]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const key of [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
]) {
|
||||
for (const key of envKeys) {
|
||||
if (originalEnv[key] === undefined) delete process.env[key]
|
||||
else process.env[key] = originalEnv[key]
|
||||
}
|
||||
|
||||
106
src/services/autoFix/autoFixConfig.test.ts
Normal file
106
src/services/autoFix/autoFixConfig.test.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { AutoFixConfigSchema, getAutoFixConfig, type AutoFixConfig } from './autoFixConfig.js'
|
||||
|
||||
describe('AutoFixConfigSchema', () => {
|
||||
test('parses valid full config', () => {
|
||||
const input = {
|
||||
enabled: true,
|
||||
lint: 'eslint . --fix',
|
||||
test: 'bun test',
|
||||
maxRetries: 3,
|
||||
timeout: 30000,
|
||||
}
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.data.enabled).toBe(true)
|
||||
expect(result.data.lint).toBe('eslint . --fix')
|
||||
expect(result.data.test).toBe('bun test')
|
||||
expect(result.data.maxRetries).toBe(3)
|
||||
expect(result.data.timeout).toBe(30000)
|
||||
}
|
||||
})
|
||||
|
||||
test('parses minimal config with defaults', () => {
|
||||
const input = { enabled: true, lint: 'eslint .' }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.data.maxRetries).toBe(3)
|
||||
expect(result.data.timeout).toBe(30000)
|
||||
expect(result.data.test).toBeUndefined()
|
||||
}
|
||||
})
|
||||
|
||||
test('rejects config with enabled but no lint or test', () => {
|
||||
const input = { enabled: true }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
test('accepts disabled config without commands', () => {
|
||||
const input = { enabled: false }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
})
|
||||
|
||||
test('rejects negative maxRetries', () => {
|
||||
const input = { enabled: true, lint: 'eslint .', maxRetries: -1 }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
test('rejects maxRetries above 10', () => {
|
||||
const input = { enabled: true, lint: 'eslint .', maxRetries: 11 }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAutoFixConfig', () => {
|
||||
test('returns null when settings have no autoFix', () => {
|
||||
const result = getAutoFixConfig(undefined)
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test('returns null when autoFix is disabled', () => {
|
||||
const result = getAutoFixConfig({ enabled: false })
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test('returns parsed config when valid and enabled', () => {
|
||||
const result = getAutoFixConfig({ enabled: true, lint: 'eslint .' })
|
||||
expect(result).not.toBeNull()
|
||||
expect(result!.enabled).toBe(true)
|
||||
expect(result!.lint).toBe('eslint .')
|
||||
})
|
||||
})
|
||||
|
||||
describe('SettingsSchema autoFix integration', () => {
|
||||
test('SettingsSchema accepts autoFix field', async () => {
|
||||
const { SettingsSchema } = await import('../../utils/settings/types.js')
|
||||
const settings = {
|
||||
autoFix: {
|
||||
enabled: true,
|
||||
lint: 'eslint .',
|
||||
test: 'bun test',
|
||||
maxRetries: 3,
|
||||
timeout: 30000,
|
||||
},
|
||||
}
|
||||
const result = SettingsSchema().safeParse(settings)
|
||||
expect(result.success).toBe(true)
|
||||
})
|
||||
|
||||
test('SettingsSchema rejects invalid autoFix', async () => {
|
||||
const { SettingsSchema } = await import('../../utils/settings/types.js')
|
||||
const settings = {
|
||||
autoFix: {
|
||||
enabled: true,
|
||||
// missing lint and test - should fail refine
|
||||
},
|
||||
}
|
||||
const result = SettingsSchema().safeParse(settings)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
})
|
||||
52
src/services/autoFix/autoFixConfig.ts
Normal file
52
src/services/autoFix/autoFixConfig.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import { z } from 'zod/v4'
|
||||
|
||||
export const AutoFixConfigSchema = z
|
||||
.object({
|
||||
enabled: z.boolean().describe('Whether auto-fix is enabled'),
|
||||
lint: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Lint command to run after file edits (e.g. "eslint . --fix")'),
|
||||
test: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Test command to run after file edits (e.g. "bun test")'),
|
||||
maxRetries: z
|
||||
.number()
|
||||
.int()
|
||||
.min(0)
|
||||
.max(10)
|
||||
.default(3)
|
||||
.describe('Maximum number of auto-fix retry attempts (default: 3)'),
|
||||
timeout: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1000)
|
||||
.max(300000)
|
||||
.default(30000)
|
||||
.describe('Timeout in ms for each lint/test command (default: 30000)'),
|
||||
})
|
||||
.refine(
|
||||
data => !data.enabled || data.lint !== undefined || data.test !== undefined,
|
||||
{
|
||||
message: 'At least one of "lint" or "test" must be set when enabled',
|
||||
},
|
||||
)
|
||||
|
||||
export type AutoFixConfig = z.infer<typeof AutoFixConfigSchema>
|
||||
|
||||
export function getAutoFixConfig(
|
||||
rawConfig: unknown,
|
||||
): AutoFixConfig | null {
|
||||
if (!rawConfig || typeof rawConfig !== 'object') {
|
||||
return null
|
||||
}
|
||||
const parsed = AutoFixConfigSchema.safeParse(rawConfig)
|
||||
if (!parsed.success) {
|
||||
return null
|
||||
}
|
||||
if (!parsed.data.enabled) {
|
||||
return null
|
||||
}
|
||||
return parsed.data
|
||||
}
|
||||
63
src/services/autoFix/autoFixHook.test.ts
Normal file
63
src/services/autoFix/autoFixHook.test.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
shouldRunAutoFix,
|
||||
buildAutoFixContext,
|
||||
} from './autoFixHook.js'
|
||||
|
||||
describe('shouldRunAutoFix', () => {
|
||||
test('returns true for file_edit tool when autoFix enabled', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_edit', config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true for file_write tool when autoFix enabled', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_write', config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false for bash tool', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('bash', config)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for file_read tool', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_read', config)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false when config is null', () => {
|
||||
expect(shouldRunAutoFix('file_edit', null)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('buildAutoFixContext', () => {
|
||||
test('formats lint errors as AI-readable context', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: true,
|
||||
lintOutput: 'src/foo.ts:10:5 error no-unused-vars',
|
||||
lintExitCode: 1,
|
||||
errorSummary: 'Lint errors (exit code 1):\nsrc/foo.ts:10:5 error no-unused-vars',
|
||||
})
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('no-unused-vars')
|
||||
expect(context).toContain('Please fix')
|
||||
})
|
||||
|
||||
test('returns null when no errors', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: false,
|
||||
})
|
||||
expect(context).toBeNull()
|
||||
})
|
||||
|
||||
test('formats test failures as AI-readable context', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: true,
|
||||
testOutput: 'FAIL src/foo.test.ts\n expected true, got false',
|
||||
testExitCode: 1,
|
||||
errorSummary: 'Test failures (exit code 1):\nFAIL src/foo.test.ts',
|
||||
})
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('FAIL')
|
||||
})
|
||||
})
|
||||
25
src/services/autoFix/autoFixHook.ts
Normal file
25
src/services/autoFix/autoFixHook.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import type { AutoFixConfig } from './autoFixConfig.js'
|
||||
import type { AutoFixResult } from './autoFixRunner.js'
|
||||
|
||||
const AUTO_FIX_TOOLS = new Set(['file_edit', 'file_write'])
|
||||
|
||||
export function shouldRunAutoFix(
|
||||
toolName: string,
|
||||
config: AutoFixConfig | null,
|
||||
): boolean {
|
||||
if (!config) return false
|
||||
return AUTO_FIX_TOOLS.has(toolName)
|
||||
}
|
||||
|
||||
export function buildAutoFixContext(result: AutoFixResult): string | null {
|
||||
if (!result.hasErrors || !result.errorSummary) return null
|
||||
|
||||
return (
|
||||
`<auto_fix_feedback>\n` +
|
||||
`AUTO-FIX: The file you just edited has errors. Please fix them:\n\n` +
|
||||
`${result.errorSummary}\n\n` +
|
||||
`Please fix these errors in the files you just edited. ` +
|
||||
`Do not ask the user — just apply the fix.\n` +
|
||||
`</auto_fix_feedback>`
|
||||
)
|
||||
}
|
||||
48
src/services/autoFix/autoFixIntegration.test.ts
Normal file
48
src/services/autoFix/autoFixIntegration.test.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { getAutoFixConfig } from './autoFixConfig.js'
|
||||
import { shouldRunAutoFix, buildAutoFixContext } from './autoFixHook.js'
|
||||
import { runAutoFixCheck } from './autoFixRunner.js'
|
||||
|
||||
describe('autoFix end-to-end flow', () => {
|
||||
test('full flow: config → shouldRun → check → context', async () => {
|
||||
const config = getAutoFixConfig({
|
||||
enabled: true,
|
||||
lint: 'echo "error: unused" && exit 1',
|
||||
maxRetries: 2,
|
||||
timeout: 5000,
|
||||
})
|
||||
expect(config).not.toBeNull()
|
||||
expect(shouldRunAutoFix('file_edit', config)).toBe(true)
|
||||
|
||||
const result = await runAutoFixCheck({
|
||||
lint: config!.lint,
|
||||
test: config!.test,
|
||||
timeout: config!.timeout,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
|
||||
const context = buildAutoFixContext(result)
|
||||
expect(context).not.toBeNull()
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('unused')
|
||||
})
|
||||
|
||||
test('full flow: no errors = no context', async () => {
|
||||
const config = getAutoFixConfig({
|
||||
enabled: true,
|
||||
lint: 'echo "all clean"',
|
||||
timeout: 5000,
|
||||
})
|
||||
const result = await runAutoFixCheck({
|
||||
lint: config!.lint,
|
||||
timeout: config!.timeout,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
const context = buildAutoFixContext(result)
|
||||
expect(context).toBeNull()
|
||||
})
|
||||
})
|
||||
103
src/services/autoFix/autoFixRunner.test.ts
Normal file
103
src/services/autoFix/autoFixRunner.test.ts
Normal file
@@ -0,0 +1,103 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
runAutoFixCheck,
|
||||
type AutoFixResult,
|
||||
type AutoFixCheckOptions,
|
||||
} from './autoFixRunner.js'
|
||||
|
||||
describe('runAutoFixCheck', () => {
|
||||
test('returns success when lint command exits 0', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "all clean"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
expect(result.lintOutput).toContain('all clean')
|
||||
expect(result.testOutput).toBeUndefined()
|
||||
})
|
||||
|
||||
test('returns errors when lint command exits non-zero', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "error: unused var" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.lintOutput).toContain('unused var')
|
||||
expect(result.lintExitCode).toBe(1)
|
||||
})
|
||||
|
||||
test('returns errors when test command exits non-zero', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
test: 'echo "FAIL test_foo" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.testOutput).toContain('FAIL test_foo')
|
||||
expect(result.testExitCode).toBe(1)
|
||||
})
|
||||
|
||||
test('runs both lint and test commands', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "lint ok"',
|
||||
test: 'echo "test ok"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
expect(result.lintOutput).toContain('lint ok')
|
||||
expect(result.testOutput).toContain('test ok')
|
||||
})
|
||||
|
||||
test('skips test if lint fails', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "lint error" && exit 1',
|
||||
test: 'echo "should not run"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.lintOutput).toContain('lint error')
|
||||
expect(result.testOutput).toBeUndefined()
|
||||
})
|
||||
|
||||
test('handles timeout gracefully', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'sleep 10',
|
||||
timeout: 100,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.timedOut).toBe(true)
|
||||
})
|
||||
|
||||
test('returns success with no commands configured', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
})
|
||||
|
||||
test('formats error summary for AI consumption', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "src/foo.ts:10:5 error no-unused-vars" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
const summary = result.errorSummary
|
||||
expect(summary).toContain('Lint errors')
|
||||
expect(summary).toContain('no-unused-vars')
|
||||
})
|
||||
})
|
||||
169
src/services/autoFix/autoFixRunner.ts
Normal file
169
src/services/autoFix/autoFixRunner.ts
Normal file
@@ -0,0 +1,169 @@
|
||||
import { spawn } from 'child_process'
|
||||
|
||||
export interface AutoFixCheckOptions {
|
||||
lint?: string
|
||||
test?: string
|
||||
timeout: number
|
||||
cwd: string
|
||||
signal?: AbortSignal
|
||||
}
|
||||
|
||||
export interface AutoFixResult {
|
||||
hasErrors: boolean
|
||||
lintOutput?: string
|
||||
lintExitCode?: number
|
||||
testOutput?: string
|
||||
testExitCode?: number
|
||||
timedOut?: boolean
|
||||
errorSummary?: string
|
||||
}
|
||||
|
||||
async function runCommand(
|
||||
command: string,
|
||||
cwd: string,
|
||||
timeout: number,
|
||||
signal?: AbortSignal,
|
||||
): Promise<{ stdout: string; stderr: string; exitCode: number; timedOut: boolean }> {
|
||||
return new Promise((resolve) => {
|
||||
if (signal?.aborted) {
|
||||
resolve({ stdout: '', stderr: 'Aborted', exitCode: 1, timedOut: false })
|
||||
return
|
||||
}
|
||||
|
||||
let timedOut = false
|
||||
let stdout = ''
|
||||
let stderr = ''
|
||||
|
||||
const isWindows = process.platform === 'win32'
|
||||
const proc = spawn(command, [], {
|
||||
cwd,
|
||||
env: { ...process.env },
|
||||
shell: true,
|
||||
windowsHide: true,
|
||||
// On Unix, create a process group so we can kill child processes on timeout/abort
|
||||
detached: !isWindows,
|
||||
})
|
||||
|
||||
const killTree = () => {
|
||||
try {
|
||||
if (!isWindows && proc.pid) {
|
||||
// Kill the entire process group
|
||||
process.kill(-proc.pid, 'SIGTERM')
|
||||
} else {
|
||||
proc.kill('SIGTERM')
|
||||
}
|
||||
} catch {
|
||||
// Process may have already exited
|
||||
}
|
||||
}
|
||||
|
||||
const onAbort = () => {
|
||||
killTree()
|
||||
}
|
||||
signal?.addEventListener('abort', onAbort, { once: true })
|
||||
|
||||
proc.stdout?.on('data', (data: Buffer) => {
|
||||
stdout += data.toString()
|
||||
})
|
||||
proc.stderr?.on('data', (data: Buffer) => {
|
||||
stderr += data.toString()
|
||||
})
|
||||
|
||||
const timer = setTimeout(() => {
|
||||
timedOut = true
|
||||
killTree()
|
||||
}, timeout)
|
||||
|
||||
proc.on('close', (code) => {
|
||||
clearTimeout(timer)
|
||||
signal?.removeEventListener('abort', onAbort)
|
||||
resolve({
|
||||
stdout: stdout.slice(0, 10000),
|
||||
stderr: stderr.slice(0, 10000),
|
||||
exitCode: code ?? 1,
|
||||
timedOut,
|
||||
})
|
||||
})
|
||||
|
||||
proc.on('error', () => {
|
||||
clearTimeout(timer)
|
||||
signal?.removeEventListener('abort', onAbort)
|
||||
resolve({
|
||||
stdout,
|
||||
stderr: stderr || 'Command failed to start',
|
||||
exitCode: 1,
|
||||
timedOut: false,
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function buildErrorSummary(result: AutoFixResult): string | undefined {
|
||||
if (!result.hasErrors) return undefined
|
||||
const parts: string[] = []
|
||||
|
||||
if (result.timedOut) {
|
||||
parts.push('Command timed out.')
|
||||
}
|
||||
if (result.lintExitCode !== undefined && result.lintExitCode !== 0) {
|
||||
parts.push(`Lint errors (exit code ${result.lintExitCode}):\n${result.lintOutput ?? ''}`)
|
||||
}
|
||||
if (result.testExitCode !== undefined && result.testExitCode !== 0) {
|
||||
parts.push(`Test failures (exit code ${result.testExitCode}):\n${result.testOutput ?? ''}`)
|
||||
}
|
||||
|
||||
return parts.join('\n\n')
|
||||
}
|
||||
|
||||
export async function runAutoFixCheck(
|
||||
options: AutoFixCheckOptions,
|
||||
): Promise<AutoFixResult> {
|
||||
const { lint, test, timeout, cwd, signal } = options
|
||||
|
||||
if (!lint && !test) {
|
||||
return { hasErrors: false }
|
||||
}
|
||||
|
||||
if (signal?.aborted) {
|
||||
return { hasErrors: false }
|
||||
}
|
||||
|
||||
const result: AutoFixResult = { hasErrors: false }
|
||||
|
||||
// Run lint first
|
||||
if (lint) {
|
||||
const lintResult = await runCommand(lint, cwd, timeout, signal)
|
||||
result.lintOutput = (lintResult.stdout + '\n' + lintResult.stderr).trim()
|
||||
result.lintExitCode = lintResult.exitCode
|
||||
|
||||
if (lintResult.timedOut) {
|
||||
result.hasErrors = true
|
||||
result.timedOut = true
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
|
||||
if (lintResult.exitCode !== 0) {
|
||||
result.hasErrors = true
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// Run tests only if lint passed (or no lint configured)
|
||||
if (test) {
|
||||
const testResult = await runCommand(test, cwd, timeout, signal)
|
||||
result.testOutput = (testResult.stdout + '\n' + testResult.stderr).trim()
|
||||
result.testExitCode = testResult.exitCode
|
||||
|
||||
if (testResult.timedOut) {
|
||||
result.hasErrors = true
|
||||
result.timedOut = true
|
||||
} else if (testResult.exitCode !== 0) {
|
||||
result.hasErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
@@ -9,7 +9,10 @@ const sessionTranscriptModule = feature('KAIROS')
|
||||
|
||||
import { APIUserAbortError } from '@anthropic-ai/sdk'
|
||||
import { markPostCompaction } from 'src/bootstrap/state.js'
|
||||
import { getInvokedSkillsForAgent } from '../../bootstrap/state.js'
|
||||
import {
|
||||
getInvokedSkillsForAgent,
|
||||
getOriginalCwd,
|
||||
} from '../../bootstrap/state.js'
|
||||
import type { QuerySource } from '../../constants/querySource.js'
|
||||
import type { CanUseToolFn } from '../../hooks/useCanUseTool.js'
|
||||
import type { Tool, ToolUseContext } from '../../Tool.js'
|
||||
@@ -68,6 +71,7 @@ import {
|
||||
} from '../../utils/messages.js'
|
||||
import { expandPath } from '../../utils/path.js'
|
||||
import { getPlan, getPlanFilePath } from '../../utils/plans.js'
|
||||
import { getProjectInstructionFilePaths } from '../../utils/projectInstructions.js'
|
||||
import {
|
||||
isSessionActivityTrackingActive,
|
||||
sendSessionActivitySignal,
|
||||
@@ -1689,8 +1693,13 @@ function shouldExcludeFromPostCompactRestore(
|
||||
// and to also match child directory memory files (.claude/rules/*.md, etc.)
|
||||
try {
|
||||
const normalizedMemoryPaths = new Set(
|
||||
MEMORY_TYPE_VALUES.map(type => expandPath(getMemoryPath(type))),
|
||||
MEMORY_TYPE_VALUES.filter(type => type !== 'Project').map(type =>
|
||||
expandPath(getMemoryPath(type)),
|
||||
),
|
||||
)
|
||||
for (const path of getProjectInstructionFilePaths(getOriginalCwd())) {
|
||||
normalizedMemoryPaths.add(expandPath(path))
|
||||
}
|
||||
|
||||
if (normalizedMemoryPaths.has(normalizedFilename)) {
|
||||
return true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
DEFAULT_GITHUB_DEVICE_SCOPE,
|
||||
@@ -7,14 +7,26 @@ import {
|
||||
requestDeviceCode,
|
||||
} from './deviceFlow.js'
|
||||
|
||||
async function importFreshModule() {
|
||||
mock.restore()
|
||||
return import(`./deviceFlow.ts?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
describe('requestDeviceCode', () => {
|
||||
const originalFetch = globalThis.fetch
|
||||
|
||||
beforeEach(() => {
|
||||
mock.restore()
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
test('parses successful device code response', async () => {
|
||||
const { requestDeviceCode } = await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(
|
||||
@@ -42,6 +54,9 @@ describe('requestDeviceCode', () => {
|
||||
})
|
||||
|
||||
test('throws on HTTP error', async () => {
|
||||
const { requestDeviceCode, GitHubDeviceFlowError } =
|
||||
await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(new Response('bad', { status: 500 })),
|
||||
)
|
||||
@@ -134,6 +149,8 @@ describe('pollAccessToken', () => {
|
||||
})
|
||||
|
||||
test('returns token when GitHub responds with access_token immediately', async () => {
|
||||
const { pollAccessToken } = await importFreshModule()
|
||||
|
||||
let calls = 0
|
||||
globalThis.fetch = mock(() => {
|
||||
calls++
|
||||
@@ -153,6 +170,8 @@ describe('pollAccessToken', () => {
|
||||
})
|
||||
|
||||
test('throws on access_denied', async () => {
|
||||
const { pollAccessToken } = await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(JSON.stringify({ error: 'access_denied' }), {
|
||||
@@ -168,3 +187,62 @@ describe('pollAccessToken', () => {
|
||||
).rejects.toThrow(/denied/)
|
||||
})
|
||||
})
|
||||
|
||||
describe('exchangeForCopilotToken', () => {
|
||||
const originalFetch = globalThis.fetch
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
test('parses successful Copilot token response', async () => {
|
||||
const { exchangeForCopilotToken } = await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
token: 'copilot-token-xyz',
|
||||
expires_at: 1700000000,
|
||||
refresh_in: 3600,
|
||||
endpoints: {
|
||||
api: 'https://api.githubcopilot.com',
|
||||
},
|
||||
}),
|
||||
{ status: 200 },
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
const result = await exchangeForCopilotToken('oauth-token', globalThis.fetch)
|
||||
expect(result.token).toBe('copilot-token-xyz')
|
||||
expect(result.expires_at).toBe(1700000000)
|
||||
expect(result.refresh_in).toBe(3600)
|
||||
expect(result.endpoints.api).toBe('https://api.githubcopilot.com')
|
||||
})
|
||||
|
||||
test('throws on HTTP error', async () => {
|
||||
const { exchangeForCopilotToken, GitHubDeviceFlowError } =
|
||||
await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(new Response('unauthorized', { status: 401 })),
|
||||
)
|
||||
await expect(
|
||||
exchangeForCopilotToken('bad-token', globalThis.fetch),
|
||||
).rejects.toThrow(GitHubDeviceFlowError)
|
||||
})
|
||||
|
||||
test('throws on malformed response', async () => {
|
||||
const { exchangeForCopilotToken } = await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(JSON.stringify({ invalid: 'data' }), { status: 200 }),
|
||||
),
|
||||
)
|
||||
await expect(
|
||||
exchangeForCopilotToken('oauth-token', globalThis.fetch),
|
||||
).rejects.toThrow(/Malformed/)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,19 +1,35 @@
|
||||
/**
|
||||
* GitHub OAuth device flow for CLI login (https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps#device-flow).
|
||||
* Uses GitHub Copilot's official OAuth app for device authentication.
|
||||
*/
|
||||
|
||||
import { execFileNoThrow } from '../../utils/execFileNoThrow.js'
|
||||
|
||||
export const DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID = 'Ov23liXjWSSui6QIahPl'
|
||||
export const DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID = 'Iv1.b507a08c87ecfe98'
|
||||
|
||||
export const GITHUB_DEVICE_CODE_URL = 'https://github.com/login/device/code'
|
||||
export const GITHUB_DEVICE_ACCESS_TOKEN_URL =
|
||||
'https://github.com/login/oauth/access_token'
|
||||
export const COPILOT_TOKEN_URL = 'https://api.github.com/copilot_internal/v2/token'
|
||||
|
||||
// OAuth app device flow does not accept the GitHub Models permission token
|
||||
// scope (models:read). Use an OAuth-safe default.
|
||||
const OAUTH_SAFE_GITHUB_DEVICE_SCOPE = 'read:user'
|
||||
export const DEFAULT_GITHUB_DEVICE_SCOPE = OAUTH_SAFE_GITHUB_DEVICE_SCOPE
|
||||
/** Only read:user scope — required for Copilot OAuth */
|
||||
export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user'
|
||||
|
||||
export const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
export type CopilotTokenResponse = {
|
||||
token: string
|
||||
expires_at: number
|
||||
refresh_in: number
|
||||
endpoints: {
|
||||
api: string
|
||||
}
|
||||
}
|
||||
|
||||
export class GitHubDeviceFlowError extends Error {
|
||||
constructor(message: string) {
|
||||
@@ -30,6 +46,8 @@ export type DeviceCodeResult = {
|
||||
interval: number
|
||||
}
|
||||
|
||||
type FetchLike = (input: RequestInfo | URL, init?: RequestInit) => Promise<Response>
|
||||
|
||||
export function getGithubDeviceFlowClientId(): string {
|
||||
return (
|
||||
process.env.GITHUB_DEVICE_FLOW_CLIENT_ID?.trim() ||
|
||||
@@ -44,21 +62,21 @@ function sleep(ms: number): Promise<void> {
|
||||
export async function requestDeviceCode(options?: {
|
||||
clientId?: string
|
||||
scope?: string
|
||||
fetchImpl?: typeof fetch
|
||||
fetchImpl?: FetchLike
|
||||
}): Promise<DeviceCodeResult> {
|
||||
const clientId = options?.clientId ?? getGithubDeviceFlowClientId()
|
||||
if (!clientId) {
|
||||
throw new GitHubDeviceFlowError(
|
||||
'No OAuth client ID: set GITHUB_DEVICE_FLOW_CLIENT_ID or paste a PAT instead.',
|
||||
'No OAuth client ID: set GITHUB_DEVICE_FLOW_CLIENT_ID.',
|
||||
)
|
||||
}
|
||||
const fetchFn = options?.fetchImpl ?? fetch
|
||||
const requestedScope =
|
||||
options?.scope?.trim() || DEFAULT_GITHUB_DEVICE_SCOPE
|
||||
const scopesToTry =
|
||||
requestedScope === OAUTH_SAFE_GITHUB_DEVICE_SCOPE
|
||||
requestedScope === DEFAULT_GITHUB_DEVICE_SCOPE
|
||||
? [requestedScope]
|
||||
: [requestedScope, OAUTH_SAFE_GITHUB_DEVICE_SCOPE]
|
||||
: [requestedScope, DEFAULT_GITHUB_DEVICE_SCOPE]
|
||||
|
||||
let lastError = 'Device code request failed.'
|
||||
|
||||
@@ -77,7 +95,7 @@ export async function requestDeviceCode(options?: {
|
||||
lastError = `Device code request failed: ${res.status} ${text}`
|
||||
const isInvalidScope = /invalid_scope/i.test(text)
|
||||
const canRetryWithFallback =
|
||||
scope !== OAUTH_SAFE_GITHUB_DEVICE_SCOPE && isInvalidScope
|
||||
scope !== DEFAULT_GITHUB_DEVICE_SCOPE && isInvalidScope
|
||||
if (canRetryWithFallback) {
|
||||
continue
|
||||
}
|
||||
@@ -114,7 +132,7 @@ export type PollOptions = {
|
||||
clientId?: string
|
||||
initialInterval?: number
|
||||
timeoutSeconds?: number
|
||||
fetchImpl?: typeof fetch
|
||||
fetchImpl?: FetchLike
|
||||
}
|
||||
|
||||
export async function pollAccessToken(
|
||||
@@ -197,3 +215,49 @@ export async function openVerificationUri(uri: string): Promise<void> {
|
||||
// User can open the URL manually
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Exchange an OAuth access token for a Copilot API token.
|
||||
* The OAuth token alone cannot be used with the Copilot API endpoint.
|
||||
*/
|
||||
export async function exchangeForCopilotToken(
|
||||
oauthToken: string,
|
||||
fetchImpl?: FetchLike,
|
||||
): Promise<CopilotTokenResponse> {
|
||||
const fetchFn = fetchImpl ?? fetch
|
||||
const res = await fetchFn(COPILOT_TOKEN_URL, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: 'application/json',
|
||||
Authorization: `Bearer ${oauthToken}`,
|
||||
...COPILOT_HEADERS,
|
||||
},
|
||||
})
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => '')
|
||||
throw new GitHubDeviceFlowError(
|
||||
`Copilot token exchange failed: ${res.status} ${text}`,
|
||||
)
|
||||
}
|
||||
const data = (await res.json()) as Record<string, unknown>
|
||||
const token = data.token
|
||||
const expires_at = data.expires_at
|
||||
const refresh_in = data.refresh_in
|
||||
const endpoints = data.endpoints
|
||||
if (
|
||||
typeof token !== 'string' ||
|
||||
typeof expires_at !== 'number' ||
|
||||
typeof refresh_in !== 'number' ||
|
||||
!endpoints ||
|
||||
typeof endpoints !== 'object' ||
|
||||
typeof (endpoints as Record<string, unknown>).api !== 'string'
|
||||
) {
|
||||
throw new GitHubDeviceFlowError('Malformed Copilot token response')
|
||||
}
|
||||
return {
|
||||
token,
|
||||
expires_at,
|
||||
refresh_in,
|
||||
endpoints: endpoints as { api: string },
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
// Mock rate limits for testing [internal-only]
|
||||
// The external build keeps this module as a stable no-op surface so imports
|
||||
// remain valid without exposing internal-only rate-limit simulation behavior.
|
||||
// This allows testing various rate limit scenarios without hitting actual limits
|
||||
//
|
||||
// WARNING: This is for internal testing/demo purposes only!
|
||||
// The mock headers may not exactly match the API specification or real-world behavior.
|
||||
// Always validate against actual API responses before relying on this for production features.
|
||||
|
||||
import { setMockBillingAccessOverride } from '../utils/billing.js'
|
||||
import type { OverageDisabledReason } from './claudeAiLimits.js'
|
||||
|
||||
@@ -645,7 +645,7 @@ const internalOnlyTips: Tip[] =
|
||||
{
|
||||
id: 'skillify',
|
||||
content: async () =>
|
||||
'[internal] Turn repeatable workflows into reusable project skills when they keep recurring',
|
||||
'[internal] Use /skillify to turn repeatable recurring workflows into reusable project skills',
|
||||
cooldownSessions: 15,
|
||||
isRelevant: async () => true,
|
||||
},
|
||||
|
||||
@@ -29,6 +29,13 @@ import {
|
||||
} from '../../utils/permissions/PermissionResult.js'
|
||||
import { checkRuleBasedPermissions } from '../../utils/permissions/permissions.js'
|
||||
import { formatError } from '../../utils/toolErrors.js'
|
||||
import { getAutoFixConfig } from '../autoFix/autoFixConfig.js'
|
||||
import { shouldRunAutoFix, buildAutoFixContext } from '../autoFix/autoFixHook.js'
|
||||
import { runAutoFixCheck } from '../autoFix/autoFixRunner.js'
|
||||
|
||||
// Track auto-fix retry count per query chain to enforce maxRetries cap.
|
||||
// Key: queryChainId (or 'default'), Value: number of auto-fix attempts used.
|
||||
const autoFixRetryCount = new Map<string, number>()
|
||||
import { isMcpTool } from '../mcp/utils.js'
|
||||
import type { McpServerType, MessageUpdateLazy } from './toolExecution.js'
|
||||
|
||||
@@ -185,6 +192,65 @@ export async function* runPostToolUseHooks<Input extends AnyObject, Output>(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-fix: run lint/test if configured for this tool
|
||||
const autoFixSettings = toolUseContext.getAppState().settings
|
||||
const autoFixConfig = getAutoFixConfig(
|
||||
autoFixSettings && typeof autoFixSettings === 'object' && 'autoFix' in autoFixSettings
|
||||
? (autoFixSettings as Record<string, unknown>).autoFix
|
||||
: undefined,
|
||||
)
|
||||
if (shouldRunAutoFix(tool.name, autoFixConfig) && autoFixConfig) {
|
||||
// Enforce maxRetries cap to prevent unbounded auto-fix loops.
|
||||
// Uses queryChainId to scope the counter to the current conversation turn.
|
||||
const chainKey = (toolUseContext.queryTracking?.chainId as string) ?? 'default'
|
||||
const currentRetries = autoFixRetryCount.get(chainKey) ?? 0
|
||||
|
||||
if (currentRetries >= autoFixConfig.maxRetries) {
|
||||
// Max retries reached — skip auto-fix and let the user know
|
||||
yield {
|
||||
message: createAttachmentMessage({
|
||||
type: 'hook_additional_context',
|
||||
content: [
|
||||
`<auto_fix_feedback>\nAUTO-FIX: Maximum retry limit (${autoFixConfig.maxRetries}) reached. ` +
|
||||
`Skipping further auto-fix attempts. Please review the errors manually.\n</auto_fix_feedback>`,
|
||||
],
|
||||
hookName: `AutoFix:${tool.name}`,
|
||||
toolUseID,
|
||||
hookEvent: 'PostToolUse',
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const cwd = toolUseContext.options?.cwd ?? process.cwd()
|
||||
const autoFixResult = await runAutoFixCheck({
|
||||
lint: autoFixConfig.lint,
|
||||
test: autoFixConfig.test,
|
||||
timeout: autoFixConfig.timeout,
|
||||
cwd,
|
||||
signal: toolUseContext.abortController.signal,
|
||||
})
|
||||
const autoFixContext = buildAutoFixContext(autoFixResult)
|
||||
if (autoFixContext) {
|
||||
autoFixRetryCount.set(chainKey, currentRetries + 1)
|
||||
yield {
|
||||
message: createAttachmentMessage({
|
||||
type: 'hook_additional_context',
|
||||
content: [autoFixContext],
|
||||
hookName: `AutoFix:${tool.name}`,
|
||||
toolUseID,
|
||||
hookEvent: 'PostToolUse',
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
// Lint/test passed — reset the retry counter for this chain
|
||||
autoFixRetryCount.delete(chainKey)
|
||||
}
|
||||
} catch (autoFixError) {
|
||||
logError(autoFixError)
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
}
|
||||
|
||||
68
src/services/wiki/indexBuilder.ts
Normal file
68
src/services/wiki/indexBuilder.ts
Normal file
@@ -0,0 +1,68 @@
|
||||
import { readdir, readFile, writeFile } from 'fs/promises'
|
||||
import { basename, relative } from 'path'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
|
||||
async function listMarkdownFiles(dir: string): Promise<string[]> {
|
||||
const entries = await readdir(dir, { withFileTypes: true })
|
||||
const files: string[] = []
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = `${dir}/${entry.name}`
|
||||
if (entry.isDirectory()) {
|
||||
files.push(...(await listMarkdownFiles(fullPath)))
|
||||
} else if (entry.isFile() && entry.name.endsWith('.md')) {
|
||||
files.push(fullPath)
|
||||
}
|
||||
}
|
||||
|
||||
return files.sort()
|
||||
}
|
||||
|
||||
async function getPageTitle(path: string): Promise<string> {
|
||||
const content = await readFile(path, 'utf8')
|
||||
const titleLine = content
|
||||
.split('\n')
|
||||
.map(line => line.trim())
|
||||
.find(line => line.startsWith('# '))
|
||||
|
||||
return titleLine ? titleLine.replace(/^#\s+/, '') : basename(path, '.md')
|
||||
}
|
||||
|
||||
export async function rebuildWikiIndex(cwd: string): Promise<void> {
|
||||
const paths = getWikiPaths(cwd)
|
||||
const pageFiles = await listMarkdownFiles(paths.pagesDir)
|
||||
const sourceFiles = await listMarkdownFiles(paths.sourcesDir)
|
||||
|
||||
const pageLinks = await Promise.all(
|
||||
pageFiles.map(async file => {
|
||||
const rel = relative(paths.root, file)
|
||||
const title = await getPageTitle(file)
|
||||
return `- [${title}](./${rel.replace(/\\/g, '/')})`
|
||||
}),
|
||||
)
|
||||
|
||||
const sourceLinks = sourceFiles.map(file => {
|
||||
const rel = relative(paths.root, file).replace(/\\/g, '/')
|
||||
const title = basename(file, '.md')
|
||||
return `- [${title}](./${rel})`
|
||||
})
|
||||
|
||||
const content = `# ${basename(cwd)} Wiki
|
||||
|
||||
This wiki is maintained by OpenClaude as a durable project knowledge layer.
|
||||
|
||||
## Core Pages
|
||||
|
||||
${pageLinks.length > 0 ? pageLinks.join('\n') : '- No pages yet'}
|
||||
|
||||
## Sources
|
||||
|
||||
${sourceLinks.length > 0 ? sourceLinks.join('\n') : '- No sources yet'}
|
||||
|
||||
## Recent Updates
|
||||
|
||||
- See [log.md](./log.md)
|
||||
`
|
||||
|
||||
await writeFile(paths.indexFile, content, 'utf8')
|
||||
}
|
||||
48
src/services/wiki/ingest.test.ts
Normal file
48
src/services/wiki/ingest.test.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
import { mkdtemp, readFile, rm, writeFile } from 'fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
import { ingestLocalWikiSource } from './ingest.js'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
tempDirs.splice(0).map(dir => rm(dir, { recursive: true, force: true })),
|
||||
)
|
||||
})
|
||||
|
||||
async function makeProjectDir(): Promise<string> {
|
||||
const dir = await mkdtemp(join(tmpdir(), 'openclaude-wiki-ingest-'))
|
||||
tempDirs.push(dir)
|
||||
return dir
|
||||
}
|
||||
|
||||
test('ingestLocalWikiSource creates a source note and updates log/index', async () => {
|
||||
const cwd = await makeProjectDir()
|
||||
const sourcePath = join(cwd, 'notes.md')
|
||||
await writeFile(
|
||||
sourcePath,
|
||||
'# Design Notes\n\nThis subsystem coordinates provider routing and session state.\nIt should be documented for future contributors.\n',
|
||||
'utf8',
|
||||
)
|
||||
|
||||
const result = await ingestLocalWikiSource(cwd, 'notes.md')
|
||||
const paths = getWikiPaths(cwd)
|
||||
|
||||
expect(result.sourceFile).toBe('notes.md')
|
||||
expect(result.title).toBe('Design Notes')
|
||||
expect(result.sourceNote.startsWith('.openclaude/wiki/sources/')).toBe(true)
|
||||
|
||||
const sourceNote = await readFile(join(cwd, result.sourceNote), 'utf8')
|
||||
expect(sourceNote).toContain('# Design Notes')
|
||||
expect(sourceNote).toContain('Path: `notes.md`')
|
||||
|
||||
const log = await readFile(paths.logFile, 'utf8')
|
||||
expect(log).toContain('Ingested `notes.md`')
|
||||
|
||||
const index = await readFile(paths.indexFile, 'utf8')
|
||||
expect(index).toContain('./sources/')
|
||||
expect(index).toContain(result.sourceNote.replace('.openclaude/wiki/', './'))
|
||||
})
|
||||
93
src/services/wiki/ingest.ts
Normal file
93
src/services/wiki/ingest.ts
Normal file
@@ -0,0 +1,93 @@
|
||||
import { appendFile, readFile, stat, writeFile } from 'fs/promises'
|
||||
import { basename, extname, isAbsolute, relative, resolve } from 'path'
|
||||
import { initializeWiki } from './init.js'
|
||||
import { rebuildWikiIndex } from './indexBuilder.js'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
import type { WikiIngestResult } from './types.js'
|
||||
import {
|
||||
extractTitleFromText,
|
||||
sanitizeWikiSlug,
|
||||
summarizeText,
|
||||
} from './utils.js'
|
||||
|
||||
function buildSourceNote(params: {
|
||||
title: string
|
||||
sourcePath: string
|
||||
ingestedAt: string
|
||||
summary: string
|
||||
excerpt: string
|
||||
}): string {
|
||||
const { title, sourcePath, ingestedAt, summary, excerpt } = params
|
||||
|
||||
return `# ${title}
|
||||
|
||||
## Source
|
||||
|
||||
- Path: \`${sourcePath}\`
|
||||
- Ingested at: ${ingestedAt}
|
||||
|
||||
## Summary
|
||||
|
||||
${summary}
|
||||
|
||||
## Excerpt
|
||||
|
||||
\`\`\`
|
||||
${excerpt}
|
||||
\`\`\`
|
||||
|
||||
## Linked Pages
|
||||
|
||||
- [Architecture](../pages/architecture.md)
|
||||
`
|
||||
}
|
||||
|
||||
function buildLogEntry(sourcePath: string, title: string, ingestedAt: string): string {
|
||||
return `- ${ingestedAt}: Ingested \`${sourcePath}\` into source note "${title}"`
|
||||
}
|
||||
|
||||
export async function ingestLocalWikiSource(
|
||||
cwd: string,
|
||||
rawPath: string,
|
||||
): Promise<WikiIngestResult> {
|
||||
await initializeWiki(cwd)
|
||||
|
||||
const resolvedPath = isAbsolute(rawPath) ? rawPath : resolve(cwd, rawPath)
|
||||
const fileInfo = await stat(resolvedPath)
|
||||
if (!fileInfo.isFile()) {
|
||||
throw new Error(`Not a file: ${resolvedPath}`)
|
||||
}
|
||||
|
||||
const content = await readFile(resolvedPath, 'utf8')
|
||||
const relSourcePath = relative(cwd, resolvedPath).replace(/\\/g, '/')
|
||||
const ingestedAt = new Date().toISOString()
|
||||
const baseName = basename(resolvedPath, extname(resolvedPath))
|
||||
const title = extractTitleFromText(baseName, content)
|
||||
const summary = summarizeText(content)
|
||||
const excerpt = content.split('\n').slice(0, 20).join('\n').trim()
|
||||
const slug = sanitizeWikiSlug(`${baseName}-${Date.now()}`) || `source-${Date.now()}`
|
||||
|
||||
const paths = getWikiPaths(cwd)
|
||||
const sourceNotePath = `${paths.sourcesDir}/${slug}.md`
|
||||
|
||||
await writeFile(
|
||||
sourceNotePath,
|
||||
buildSourceNote({
|
||||
title,
|
||||
sourcePath: relSourcePath,
|
||||
ingestedAt,
|
||||
summary,
|
||||
excerpt,
|
||||
}),
|
||||
'utf8',
|
||||
)
|
||||
await appendFile(paths.logFile, `${buildLogEntry(relSourcePath, title, ingestedAt)}\n`, 'utf8')
|
||||
await rebuildWikiIndex(cwd)
|
||||
|
||||
return {
|
||||
sourceFile: relSourcePath,
|
||||
sourceNote: relative(cwd, sourceNotePath).replace(/\\/g, '/'),
|
||||
summary,
|
||||
title,
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user