Compare commits
8 Commits
v0.2.3
...
feat/auto-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff7eccc36c | ||
|
|
fbc838ce55 | ||
|
|
8c2d56844b | ||
|
|
6041b7f016 | ||
|
|
122f7b83f3 | ||
|
|
68230f3ffb | ||
|
|
832e80e535 | ||
|
|
93dc5a1554 |
90
.env.example
90
.env.example
@@ -248,93 +248,3 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
|
||||
|
||||
# Enable debug logging
|
||||
# CLAUDE_DEBUG=1
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# WEB SEARCH (OPTIONAL)
|
||||
# =============================================================================
|
||||
# OpenClaude includes a web search tool. By default it uses DuckDuckGo (free)
|
||||
# or the provider's native search (Anthropic firstParty / vertex).
|
||||
#
|
||||
# Set one API key below to enable a provider. That's it.
|
||||
|
||||
# ── Provider API keys — set ONE of these ────────────────────────────
|
||||
|
||||
# Tavily (AI-optimized search, recommended)
|
||||
# TAVILY_API_KEY=tvly-your-key-here
|
||||
|
||||
# Exa (neural/semantic search)
|
||||
# EXA_API_KEY=your-exa-key-here
|
||||
|
||||
# You.com (RAG-ready snippets)
|
||||
# YOU_API_KEY=your-you-key-here
|
||||
|
||||
# Jina (s.jina.ai endpoint)
|
||||
# JINA_API_KEY=your-jina-key-here
|
||||
|
||||
# Bing Web Search
|
||||
# BING_API_KEY=your-bing-key-here
|
||||
|
||||
# Mojeek (privacy-focused)
|
||||
# MOJEEK_API_KEY=your-mojeek-key-here
|
||||
|
||||
# Linkup
|
||||
# LINKUP_API_KEY=your-linkup-key-here
|
||||
|
||||
# Firecrawl (premium, uses @mendable/firecrawl-js)
|
||||
# FIRECRAWL_API_KEY=fc-your-key-here
|
||||
|
||||
# ── Provider selection mode ─────────────────────────────────────────
|
||||
#
|
||||
# WEB_SEARCH_PROVIDER controls fallback behavior:
|
||||
#
|
||||
# "auto" (default) — try all configured providers, fall through on failure
|
||||
# "custom" — custom API only, throw on failure (NOT in auto chain)
|
||||
# "firecrawl" — firecrawl only
|
||||
# "tavily" — tavily only
|
||||
# "exa" — exa only
|
||||
# "you" — you.com only
|
||||
# "jina" — jina only
|
||||
# "bing" — bing only
|
||||
# "mojeek" — mojeek only
|
||||
# "linkup" — linkup only
|
||||
# "ddg" — duckduckgo only
|
||||
# "native" — anthropic native / codex only
|
||||
#
|
||||
# Auto mode priority: firecrawl → tavily → exa → you → jina → bing → mojeek →
|
||||
# linkup → ddg
|
||||
# Note: "custom" is NOT in the auto chain. To use the custom API provider,
|
||||
# you must explicitly set WEB_SEARCH_PROVIDER=custom.
|
||||
#
|
||||
# WEB_SEARCH_PROVIDER=auto
|
||||
|
||||
# ── Built-in custom API presets ─────────────────────────────────────
|
||||
#
|
||||
# Use with WEB_KEY for the API key:
|
||||
# WEB_PROVIDER=searxng|google|brave|serpapi
|
||||
# WEB_KEY=your-api-key-here
|
||||
|
||||
# ── Custom API endpoint (advanced) ──────────────────────────────────
|
||||
#
|
||||
# WEB_SEARCH_API — base URL of your search endpoint
|
||||
# WEB_QUERY_PARAM — query parameter name (default: "q")
|
||||
# WEB_METHOD — GET or POST (default: GET)
|
||||
# WEB_PARAMS — extra static query params as JSON: {"lang":"en","count":"10"}
|
||||
# WEB_URL_TEMPLATE — URL template with {query} for path embedding
|
||||
# WEB_BODY_TEMPLATE — custom POST body with {query} placeholder
|
||||
# WEB_AUTH_HEADER — header name for API key (default: "Authorization")
|
||||
# WEB_AUTH_SCHEME — prefix before key (default: "Bearer")
|
||||
# WEB_HEADERS — extra headers as "Name: value; Name2: value2"
|
||||
# WEB_JSON_PATH — dot-path to results array in response
|
||||
|
||||
# ── Custom API security guardrails ──────────────────────────────────
|
||||
#
|
||||
# The custom provider enforces security guardrails by default.
|
||||
# Override these only if you understand the risks.
|
||||
#
|
||||
# WEB_CUSTOM_TIMEOUT_SEC=15 — request timeout in seconds (default 15)
|
||||
# WEB_CUSTOM_MAX_BODY_KB=300 — max POST body size in KB (default 300)
|
||||
# WEB_CUSTOM_ALLOW_ARBITRARY_HEADERS=false — set "true" to use non-standard headers
|
||||
# WEB_CUSTOM_ALLOW_HTTP=false — set "true" to allow http:// URLs
|
||||
# WEB_CUSTOM_ALLOW_PRIVATE=false — set "true" to target localhost/private IPs
|
||||
# (needed for self-hosted SearXNG)
|
||||
|
||||
88
.github/workflows/release.yml
vendored
88
.github/workflows/release.yml
vendored
@@ -1,88 +0,0 @@
|
||||
name: Auto Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: auto-release-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
release-please:
|
||||
name: Release Please
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
outputs:
|
||||
release_created: ${{ steps.release.outputs.release_created }}
|
||||
tag_name: ${{ steps.release.outputs.tag_name }}
|
||||
version: ${{ steps.release.outputs.version }}
|
||||
steps:
|
||||
- name: Run release-please
|
||||
id: release
|
||||
uses: googleapis/release-please-action@16a9c90856f42705d54a6fda1823352bdc62cf38
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
release-type: node
|
||||
|
||||
publish-npm:
|
||||
name: Publish to npm
|
||||
needs: release-please
|
||||
if: ${{ needs.release-please.outputs.release_created == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout release tag
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5
|
||||
with:
|
||||
ref: ${{ needs.release-please.outputs.tag_name }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020
|
||||
with:
|
||||
node-version: 24
|
||||
registry-url: https://registry.npmjs.org
|
||||
|
||||
- name: Set up Bun
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6
|
||||
with:
|
||||
bun-version: 1.3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
- name: Run unit tests
|
||||
run: bun test --max-concurrency=1
|
||||
|
||||
- name: Smoke test
|
||||
run: bun run smoke
|
||||
|
||||
- name: Build
|
||||
run: bun run build
|
||||
|
||||
- name: Dry-run package
|
||||
run: npm pack --dry-run
|
||||
|
||||
- name: Clear token auth for trusted publishing
|
||||
run: |
|
||||
unset NODE_AUTH_TOKEN
|
||||
echo "NODE_AUTH_TOKEN=" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Publish to npm
|
||||
run: npm publish --access public --provenance
|
||||
|
||||
- name: Release summary
|
||||
run: |
|
||||
{
|
||||
echo "## Released ${{ needs.release-please.outputs.tag_name }}"
|
||||
echo
|
||||
echo "- npm: https://www.npmjs.com/package/@gitlawb/openclaude"
|
||||
echo "- GitHub: https://github.com/Gitlawb/openclaude/releases/tag/${{ needs.release-please.outputs.tag_name }}"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
".": "0.2.3"
|
||||
}
|
||||
68
CHANGELOG.md
68
CHANGELOG.md
@@ -1,68 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
## [0.2.3](https://github.com/Gitlawb/openclaude/compare/v0.2.2...v0.2.3) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* prevent infinite auto-compact loop for unknown 3P models ([#635](https://github.com/Gitlawb/openclaude/issues/635)) ([#636](https://github.com/Gitlawb/openclaude/issues/636)) ([aeaa658](https://github.com/Gitlawb/openclaude/commit/aeaa658f776fb8df95721e8b8962385f8b00f66a))
|
||||
|
||||
## [0.2.2](https://github.com/Gitlawb/openclaude/compare/v0.2.1...v0.2.2) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **read/edit:** make compact line prefix unambiguous for tab-indented files ([#613](https://github.com/Gitlawb/openclaude/issues/613)) ([08cc6f3](https://github.com/Gitlawb/openclaude/commit/08cc6f328711cd93ce9fa53351266c29a0b0a341))
|
||||
|
||||
## [0.2.1](https://github.com/Gitlawb/openclaude/compare/v0.2.0...v0.2.1) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **provider:** add recovery guidance for missing OpenAI API key ([#616](https://github.com/Gitlawb/openclaude/issues/616)) ([9419e8a](https://github.com/Gitlawb/openclaude/commit/9419e8a4a21b3771d9ddb10f7072e0a8c5b5b631))
|
||||
|
||||
## [0.2.0](https://github.com/Gitlawb/openclaude/compare/v0.1.8...v0.2.0) (2026-04-12)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add /cache-probe diagnostic command ([#580](https://github.com/Gitlawb/openclaude/issues/580)) ([9ccaa7a](https://github.com/Gitlawb/openclaude/commit/9ccaa7a6759b6991f4a566b4118c06e68a2398fe)), closes [#515](https://github.com/Gitlawb/openclaude/issues/515)
|
||||
* add auto-fix service — auto-lint and test after AI file edits ([#508](https://github.com/Gitlawb/openclaude/issues/508)) ([c385047](https://github.com/Gitlawb/openclaude/commit/c385047abba4366866f4c87bfb5e0b0bd4dcbb9d))
|
||||
* Add Gemini support with thought_signature fix ([#404](https://github.com/Gitlawb/openclaude/issues/404)) ([5012c16](https://github.com/Gitlawb/openclaude/commit/5012c160c9a2dff9418e7ee19dc9a4d29ef2b024))
|
||||
* add headless gRPC server for external agent integration ([#278](https://github.com/Gitlawb/openclaude/issues/278)) ([26eef92](https://github.com/Gitlawb/openclaude/commit/26eef92fe72e9c3958d61435b8d3571e12bf2b74))
|
||||
* add wiki mvp commands ([#532](https://github.com/Gitlawb/openclaude/issues/532)) ([c328fdf](https://github.com/Gitlawb/openclaude/commit/c328fdf9e2fe59ad101b049301298ce9ff24caca))
|
||||
* GitHub provider lifecycle and onboarding hardening ([#351](https://github.com/Gitlawb/openclaude/issues/351)) ([ff7d499](https://github.com/Gitlawb/openclaude/commit/ff7d49990de515825ddbe4099f3a39b944b61370))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add File polyfill for Node < 20 to prevent startup deadlock with proxy ([#442](https://github.com/Gitlawb/openclaude/issues/442)) ([85aa8b0](https://github.com/Gitlawb/openclaude/commit/85aa8b0985c8f3cb8801efa5141114a0ab0f6a83))
|
||||
* add GitHub Copilot model context windows and output limits ([#576](https://github.com/Gitlawb/openclaude/issues/576)) ([a7f5982](https://github.com/Gitlawb/openclaude/commit/a7f5982f6438ab0ddc3f0daae31ea68ac7ac206c)), closes [#515](https://github.com/Gitlawb/openclaude/issues/515)
|
||||
* add LiteLLM-style aliases for GitHub Copilot context windows ([#606](https://github.com/Gitlawb/openclaude/issues/606)) ([2e0e14d](https://github.com/Gitlawb/openclaude/commit/2e0e14d71313e0e501efaa9e55c6c56f2742fb10))
|
||||
* add store:false to Chat Completions and /responses fallback ([#578](https://github.com/Gitlawb/openclaude/issues/578)) ([8aaa4f2](https://github.com/Gitlawb/openclaude/commit/8aaa4f22ac5b942d82aa9cad54af30d56034515a))
|
||||
* address code scanning alerts ([#434](https://github.com/Gitlawb/openclaude/issues/434)) ([e365cb4](https://github.com/Gitlawb/openclaude/commit/e365cb4010becabacd7cbccb4c3e59ea23a41e90))
|
||||
* avoid sync github credential reads in provider manager ([#428](https://github.com/Gitlawb/openclaude/issues/428)) ([aff2bd8](https://github.com/Gitlawb/openclaude/commit/aff2bd87e4f2821992f74fb95481c505d0ba5d5d))
|
||||
* convert dragged file paths to [@mentions](https://github.com/mentions) for attachment ([#382](https://github.com/Gitlawb/openclaude/issues/382)) ([112df59](https://github.com/Gitlawb/openclaude/commit/112df5911791ea71ee9efbb98ea59c5ded1ea161))
|
||||
* custom web search — WEB_URL_TEMPLATE not recognized, timeout too short, silent native fallback ([#537](https://github.com/Gitlawb/openclaude/issues/537)) ([32fbd0c](https://github.com/Gitlawb/openclaude/commit/32fbd0c7b4168b32dcb13a5b69342e2727269201))
|
||||
* defer startup checks and suppress recommendation dialogs during startup window (issue [#363](https://github.com/Gitlawb/openclaude/issues/363)) ([#504](https://github.com/Gitlawb/openclaude/issues/504)) ([2caf2fd](https://github.com/Gitlawb/openclaude/commit/2caf2fd982af1ec845c50152ad9d28d1a597f82f))
|
||||
* display selected model in startup screen instead of hardcoded sonnet 4.6 ([#587](https://github.com/Gitlawb/openclaude/issues/587)) ([b126e38](https://github.com/Gitlawb/openclaude/commit/b126e38b1affddd2de83fcc3ba26f2e44b42a509))
|
||||
* handle missing skill parameter in SkillTool ([#485](https://github.com/Gitlawb/openclaude/issues/485)) ([f9ce81b](https://github.com/Gitlawb/openclaude/commit/f9ce81bfb384e909353813fb6f6760cadd508ae7))
|
||||
* include MCP tool results in microcompact to reduce token waste ([#348](https://github.com/Gitlawb/openclaude/issues/348)) ([52d33a8](https://github.com/Gitlawb/openclaude/commit/52d33a87a047b943aedaaaf772cd48636c263509))
|
||||
* **ink:** restore host prop updates in React 19 reconciler ([#589](https://github.com/Gitlawb/openclaude/issues/589)) ([6e94dd9](https://github.com/Gitlawb/openclaude/commit/6e94dd913688b2d6433a9abe62a245c5f031b776))
|
||||
* let saved provider profiles win on restart ([#513](https://github.com/Gitlawb/openclaude/issues/513)) ([cb8f8b7](https://github.com/Gitlawb/openclaude/commit/cb8f8b7ac2e3e74516ee219a3a48156db7c6ed78))
|
||||
* normalize malformed Bash tool arguments from OpenAI-compatible providers ([#385](https://github.com/Gitlawb/openclaude/issues/385)) ([b4bd95b](https://github.com/Gitlawb/openclaude/commit/b4bd95b47715c9896240d708c106777507fd26ec))
|
||||
* preserve only originally-required properties in strict tool schemas ([#471](https://github.com/Gitlawb/openclaude/issues/471)) ([ccaa193](https://github.com/Gitlawb/openclaude/commit/ccaa193eec5761f0972ffb58eb3189a81a9244b0))
|
||||
* preserve unicode in Windows clipboard fallback ([#388](https://github.com/Gitlawb/openclaude/issues/388)) ([c193497](https://github.com/Gitlawb/openclaude/commit/c1934974aaf64db460cc850a044bd13cc744cce7))
|
||||
* rebrand prompt identity to openclaude ([#496](https://github.com/Gitlawb/openclaude/issues/496)) ([598651f](https://github.com/Gitlawb/openclaude/commit/598651f42389ce76311ec00e8a9c701c939ead27))
|
||||
* replace isDeepStrictEqual with navigation-aware options comparison ([#507](https://github.com/Gitlawb/openclaude/issues/507)) ([537c469](https://github.com/Gitlawb/openclaude/commit/537c469c3a2f7cb0eed05fa2f54dca57b6bc273f)), closes [#472](https://github.com/Gitlawb/openclaude/issues/472)
|
||||
* report cache reads in streaming and correct cost calculation ([#577](https://github.com/Gitlawb/openclaude/issues/577)) ([f4ac709](https://github.com/Gitlawb/openclaude/commit/f4ac709fa6eda732bf45204fcab625ba6c5674b9))
|
||||
* restore default context window for unknown 3p models ([#494](https://github.com/Gitlawb/openclaude/issues/494)) ([69ea1f1](https://github.com/Gitlawb/openclaude/commit/69ea1f1e4a99e9436215d8cb391a116a64442b94))
|
||||
* restore Grep and Glob reliability on OpenAI paths ([#461](https://github.com/Gitlawb/openclaude/issues/461)) ([600c01f](https://github.com/Gitlawb/openclaude/commit/600c01faf761a080a2c7dede872ddbe05a132f23))
|
||||
* restore Ollama auto-detect in first-run setup ([#561](https://github.com/Gitlawb/openclaude/issues/561)) ([68c2968](https://github.com/Gitlawb/openclaude/commit/68c296833dcef54ce44cb18b24357230b5204dbc))
|
||||
* scrub canonical Anthropic headers from 3P shim requests ([#499](https://github.com/Gitlawb/openclaude/issues/499)) ([07621a6](https://github.com/Gitlawb/openclaude/commit/07621a6f8d0918170281869a47b5dbff90e71594))
|
||||
* strip Anthropic params from 3P resume paths ([#479](https://github.com/Gitlawb/openclaude/issues/479)) ([4975cfc](https://github.com/Gitlawb/openclaude/commit/4975cfc2e0ddbe34aa4e8e3f52ee5eba07fbe465))
|
||||
* suppress startup dialogs when input is buffered ([#423](https://github.com/Gitlawb/openclaude/issues/423)) ([8ece290](https://github.com/Gitlawb/openclaude/commit/8ece2900872dadd157e798ef501ddf126dac66c4))
|
||||
* **tui:** restore prompt rendering on startup ([#498](https://github.com/Gitlawb/openclaude/issues/498)) ([e30ad17](https://github.com/Gitlawb/openclaude/commit/e30ad17ae0056787273be2caafd6cf5340b6ab57))
|
||||
* update theme preview on focus change ([#562](https://github.com/Gitlawb/openclaude/issues/562)) ([6924718](https://github.com/Gitlawb/openclaude/commit/692471850fc789ee0797190089272407f9a4d953))
|
||||
* **web-search:** close SSRF bypasses in custom provider hostname guard ([#610](https://github.com/Gitlawb/openclaude/issues/610)) ([a02c441](https://github.com/Gitlawb/openclaude/commit/a02c44143b257fbee7f38f1b93873cc0ea68a1f9))
|
||||
* WebSearch providers + MCPTool bugs ([#593](https://github.com/Gitlawb/openclaude/issues/593)) ([91e4cfb](https://github.com/Gitlawb/openclaude/commit/91e4cfb15b62c04615834fd3c417fe38b4feb914))
|
||||
@@ -137,9 +137,10 @@ export OPENAI_MODEL=llama-3.3-70b-versatile
|
||||
### Mistral
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_MISTRAL=1
|
||||
export MISTRAL_API_KEY=...
|
||||
export MISTRAL_MODEL=mistral-large-latest
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=...
|
||||
export OPENAI_BASE_URL=https://api.mistral.ai/v1
|
||||
export OPENAI_MODEL=mistral-large-latest
|
||||
```
|
||||
|
||||
### Azure OpenAI
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@gitlawb/openclaude",
|
||||
"version": "0.2.3",
|
||||
"version": "0.1.8",
|
||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -140,7 +140,7 @@
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/Gitlawb/openclaude.git"
|
||||
"url": "https://gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude"
|
||||
},
|
||||
"keywords": [
|
||||
"claude-code",
|
||||
|
||||
@@ -112,14 +112,6 @@ def build_default_providers() -> list[Provider]:
|
||||
big_model=big if "gemini" in big else "gemini-2.5-pro",
|
||||
small_model=small if "gemini" in small else "gemini-2.0-flash",
|
||||
),
|
||||
Provider(
|
||||
name="mistral",
|
||||
ping_url="",
|
||||
api_key_env="MISTRAL_API_KEY",
|
||||
cost_per_1k_tokens=0.0001,
|
||||
big_model=big if "mistral" in big else "devstral-latest",
|
||||
small_model=small if "small" in small else "ministral-3b-latest",
|
||||
),
|
||||
Provider(
|
||||
name="ollama",
|
||||
ping_url=f"{ollama_url}/api/tags",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
|
||||
"packages": {
|
||||
".": {
|
||||
"release-type": "node",
|
||||
"package-name": "@gitlawb/openclaude",
|
||||
"bump-minor-pre-major": true,
|
||||
"include-v-in-tag": true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import {
|
||||
buildAtomicChatProfileEnv,
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildMistralProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
createProfileFile,
|
||||
@@ -38,7 +37,7 @@ function parseArg(name: string): string | null {
|
||||
|
||||
function parseProviderArg(): ProviderProfile | 'auto' {
|
||||
const p = parseArg('--provider')?.toLowerCase()
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'mistral' || p === 'atomic-chat') return p
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p
|
||||
return 'auto'
|
||||
}
|
||||
|
||||
@@ -91,21 +90,6 @@ async function main(): Promise<void> {
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = builtEnv
|
||||
} else if (selected === 'mistral') {
|
||||
const builtEnv = buildMistralProfileEnv({
|
||||
model: argModel || null,
|
||||
baseUrl: argBaseUrl || null,
|
||||
apiKey: argApiKey || null,
|
||||
processEnv: process.env,
|
||||
})
|
||||
|
||||
if (!builtEnv) {
|
||||
console.error('Mistral profile requires an API key. Use --api-key or set MISTRAL_API_KEY.')
|
||||
console.error('Get a free key at: https://admin.mistral.ai/organization/api-keys')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = builtEnv
|
||||
} else if (selected === 'ollama') {
|
||||
resolvedOllamaModel ??= await resolveOllamaModel(argModel, argBaseUrl, goal)
|
||||
@@ -185,7 +169,7 @@ async function main(): Promise<void> {
|
||||
|
||||
console.log(`Saved profile: ${selected}`)
|
||||
console.log(`Goal: ${goal}`)
|
||||
console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.MISTRAL_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
|
||||
console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
|
||||
console.log(`Path: ${outputPath}`)
|
||||
console.log('Next: bun run dev:profile')
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
|
||||
continue
|
||||
}
|
||||
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower ==='mistral' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
requestedProfile = lower as ProviderProfile | 'auto'
|
||||
continue
|
||||
}
|
||||
@@ -124,8 +124,6 @@ function printSummary(profile: ProviderProfile): void {
|
||||
console.log(`Launching profile: ${profile}`)
|
||||
if (profile === 'gemini') {
|
||||
console.log('Using configured Gemini provider settings.')
|
||||
} else if (profile === 'mistral') {
|
||||
console.log('Using configured Mistral provider settings.')
|
||||
} else if (profile === 'codex') {
|
||||
console.log('Using configured Codex/OpenAI-compatible provider settings.')
|
||||
} else if (profile === 'atomic-chat') {
|
||||
@@ -141,7 +139,7 @@ async function main(): Promise<void> {
|
||||
const options = parseLaunchOptions(process.argv.slice(2))
|
||||
const requestedProfile = options.requestedProfile
|
||||
if (!requestedProfile) {
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|mistral|atomic-chat|mistral|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
@@ -207,11 +205,6 @@ async function main(): Promise<void> {
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (profile === 'mistral' && !env.MISTRAL_API_KEY) {
|
||||
console.error('MISTRAL_API_KEY is required for mistral profile. Run: bun run profile:init -- --provider mistral --api-key <key>')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (profile === 'openai' && (!env.OPENAI_API_KEY || env.OPENAI_API_KEY === 'SUA_CHAVE')) {
|
||||
console.error('OPENAI_API_KEY is required for openai profile and cannot be SUA_CHAVE. Run: bun run profile:init -- --provider openai --api-key <key>')
|
||||
process.exit(1)
|
||||
|
||||
@@ -118,16 +118,12 @@ function isLocalBaseUrl(baseUrl: string): boolean {
|
||||
}
|
||||
|
||||
const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const MISTRAL_DEFAULT_BASE_URL = 'https://api.mistral.ai/v1'
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
|
||||
function currentBaseUrl(): string {
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return process.env.MISTRAL_BASE_URL ?? MISTRAL_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
}
|
||||
@@ -159,31 +155,6 @@ function checkGeminiEnv(): CheckResult[] {
|
||||
return results
|
||||
}
|
||||
|
||||
function checkMistralEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const model = process.env.MISTRAL_MODEL
|
||||
const key = process.env.MISTRAL_API_KEY
|
||||
const baseUrl = process.env.MISTRAL_BASE_URL ?? MISTRAL_DEFAULT_BASE_URL
|
||||
|
||||
results.push(pass('Provider mode', 'Mistral provider enabled.'))
|
||||
|
||||
if (!model) {
|
||||
results.push(pass('MISTRAL_MODEL', 'Not set. Default will be used at runtime.'))
|
||||
} else {
|
||||
results.push(pass('MISTRAL_MODEL', model))
|
||||
}
|
||||
|
||||
results.push(pass('MISTRAL_BASE_URL', baseUrl))
|
||||
|
||||
if (!key) {
|
||||
results.push(fail('MISTRAL_API_KEY', 'Missing. Set MISTRAL_API_KEY.'))
|
||||
} else {
|
||||
results.push(pass('MISTRAL_API_KEY', 'Configured.'))
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
function checkGithubEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
@@ -215,17 +186,12 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
|
||||
if (useGemini) {
|
||||
return checkGeminiEnv()
|
||||
}
|
||||
|
||||
if (useMistral) {
|
||||
return checkMistralEnv()
|
||||
}
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
return checkGithubEnv()
|
||||
}
|
||||
@@ -302,9 +268,8 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
|
||||
if (!useGemini && !useOpenAI && !useGithub && !useMistral) {
|
||||
if (!useGemini && !useOpenAI && !useGithub) {
|
||||
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
@@ -361,8 +326,6 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
})
|
||||
} else if (useGemini && (process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY)) {
|
||||
headers.Authorization = `Bearer ${process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY}`
|
||||
} else if (useMistral && process.env.MISTRAL_API_KEY) {
|
||||
headers.Authorization = `Bearer ${process.env.MISTRAL_API_KEY}`
|
||||
} else if (process.env.OPENAI_API_KEY) {
|
||||
headers.Authorization = `Bearer ${process.env.OPENAI_API_KEY}`
|
||||
}
|
||||
@@ -410,8 +373,7 @@ function checkOllamaProcessorMode(): CheckResult {
|
||||
if (
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
) {
|
||||
return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
@@ -463,14 +425,6 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
|
||||
GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY),
|
||||
}
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return {
|
||||
CLAUDE_CODE_USE_MISTRAL: true,
|
||||
MISTRAL_MODEL: process.env.MISTRAL_MODEL ?? '(unset, default: devstral-latest)',
|
||||
MISTRAL_BASE_URL: process.env.MISTRAL_BASE_URL ?? 'https://api.mistral.ai/v1',
|
||||
MISTRAL_API_KEY_SET: Boolean(process.env.MISTRAL_API_KEY),
|
||||
}
|
||||
}
|
||||
if (
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
|
||||
@@ -400,12 +400,12 @@ export async function update() {
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write('Try running with sudo or fix npm permissions\n')
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: openclaude install\n',
|
||||
'Or consider using native installation with: claude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
@@ -415,11 +415,11 @@ export async function update() {
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: openclaude install\n',
|
||||
'Or consider using native installation with: claude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
|
||||
@@ -32,7 +32,6 @@ import logout from './commands/logout/index.js'
|
||||
import installGitHubApp from './commands/install-github-app/index.js'
|
||||
import installSlackApp from './commands/install-slack-app/index.js'
|
||||
import breakCache from './commands/break-cache/index.js'
|
||||
import cacheProbe from './commands/cache-probe/index.js'
|
||||
import mcp from './commands/mcp/index.js'
|
||||
import mobile from './commands/mobile/index.js'
|
||||
import onboarding from './commands/onboarding/index.js'
|
||||
@@ -145,7 +144,6 @@ import heapDump from './commands/heapdump/index.js'
|
||||
import mockLimits from './commands/mock-limits/index.js'
|
||||
import bridgeKick from './commands/bridge-kick.js'
|
||||
import version from './commands/version.js'
|
||||
import wiki from './commands/wiki/index.js'
|
||||
import summary from './commands/summary/index.js'
|
||||
import {
|
||||
resetLimits,
|
||||
@@ -269,7 +267,6 @@ const COMMANDS = memoize((): Command[] => [
|
||||
autoFix,
|
||||
branch,
|
||||
btw,
|
||||
cacheProbe,
|
||||
chrome,
|
||||
clear,
|
||||
color,
|
||||
@@ -329,7 +326,6 @@ const COMMANDS = memoize((): Command[] => [
|
||||
usage,
|
||||
usageReport,
|
||||
vim,
|
||||
wiki,
|
||||
...(webCmd ? [webCmd] : []),
|
||||
...(forkCmd ? [forkCmd] : []),
|
||||
...(buddy ? [buddy] : []),
|
||||
|
||||
@@ -1,413 +0,0 @@
|
||||
import { getSessionId } from '../../bootstrap/state.js'
|
||||
import { resolveProviderRequest } from '../../services/api/providerConfig.js'
|
||||
import type { LocalCommandCall } from '../../types/command.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import { getMainLoopModel } from '../../utils/model/model.js'
|
||||
|
||||
const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
// Large system prompt (~6000 chars, ~1500 tokens) to cross the 1024-token cache threshold
|
||||
const SYSTEM_PROMPT = [
|
||||
'You are a coding assistant. Answer concisely.',
|
||||
'CONTEXT: User is working on a TypeScript project with Bun runtime.',
|
||||
...Array.from(
|
||||
{ length: 80 },
|
||||
(_, i) =>
|
||||
`Rule ${i + 1}: Follow best practices for TypeScript including strict typing, error handling, testing, and clean code. Prefer explicit types over any. Use const assertions. Await all async operations.`,
|
||||
),
|
||||
].join('\n\n')
|
||||
|
||||
const USER_MESSAGE = 'Say "hello" and nothing else.'
|
||||
const DELAY_MS = 3000
|
||||
|
||||
/**
|
||||
* Extract model family from a versioned model string.
|
||||
* e.g. "gpt-5.4-0626" → "gpt-5.4", "codex-mini-latest" → "codex-mini"
|
||||
*/
|
||||
function getModelFamily(model: string | undefined): string {
|
||||
if (!model) return 'unknown'
|
||||
return model
|
||||
.replace(/-\d{4,}$/, '')
|
||||
.replace(/-latest$/, '')
|
||||
.replace(/-preview$/, '')
|
||||
}
|
||||
|
||||
function getField(obj: unknown, path: string): unknown {
|
||||
return path
|
||||
.split('.')
|
||||
.reduce((o: any, k: string) => (o != null ? o[k] : undefined), obj)
|
||||
}
|
||||
|
||||
interface ProbeResult {
|
||||
label: string
|
||||
status: number
|
||||
elapsed: number
|
||||
headers: Record<string, string>
|
||||
usage: Record<string, unknown> | null
|
||||
responseId: string | null
|
||||
error: string | null
|
||||
}
|
||||
|
||||
async function sendProbe(
|
||||
url: string,
|
||||
headers: Record<string, string>,
|
||||
body: Record<string, unknown>,
|
||||
label: string,
|
||||
): Promise<ProbeResult> {
|
||||
const start = Date.now()
|
||||
let response: Response
|
||||
try {
|
||||
response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
} catch (err: any) {
|
||||
return {
|
||||
label,
|
||||
status: 0,
|
||||
elapsed: Date.now() - start,
|
||||
headers: {},
|
||||
usage: null,
|
||||
responseId: null,
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
const elapsed = Date.now() - start
|
||||
|
||||
const respHeaders: Record<string, string> = {}
|
||||
response.headers.forEach((value, key) => {
|
||||
respHeaders[key] = value
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorBody = await response.text().catch(() => '')
|
||||
return {
|
||||
label,
|
||||
status: response.status,
|
||||
elapsed,
|
||||
headers: respHeaders,
|
||||
usage: null,
|
||||
responseId: null,
|
||||
error: errorBody,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse SSE stream for usage data
|
||||
const text = await response.text()
|
||||
let usage: Record<string, unknown> | null = null
|
||||
let responseId: string | null = null
|
||||
|
||||
const isResponses = url.endsWith('/responses')
|
||||
for (const chunk of text.split('\n\n')) {
|
||||
const lines = chunk
|
||||
.split('\n')
|
||||
.map((l) => l.trim())
|
||||
.filter(Boolean)
|
||||
|
||||
if (isResponses) {
|
||||
const eventLine = lines.find((l) => l.startsWith('event: '))
|
||||
const dataLines = lines.filter((l) => l.startsWith('data: '))
|
||||
if (!eventLine || !dataLines.length) continue
|
||||
const event = eventLine.slice(7).trim()
|
||||
if (
|
||||
event === 'response.completed' ||
|
||||
event === 'response.incomplete'
|
||||
) {
|
||||
try {
|
||||
const data = JSON.parse(
|
||||
dataLines.map((l) => l.slice(6)).join('\n'),
|
||||
)
|
||||
usage = (data?.response?.usage as Record<string, unknown>) ?? null
|
||||
responseId = (data?.response?.id as string) ?? null
|
||||
} catch {}
|
||||
}
|
||||
} else {
|
||||
for (const line of lines) {
|
||||
if (!line.startsWith('data: ')) continue
|
||||
const raw = line.slice(6).trim()
|
||||
if (raw === '[DONE]') continue
|
||||
try {
|
||||
const data = JSON.parse(raw) as Record<string, unknown>
|
||||
if (data.usage) {
|
||||
usage = data.usage as Record<string, unknown>
|
||||
responseId = (data.id as string) ?? null
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { label, status: response.status, elapsed, headers: respHeaders, usage, responseId, error: null }
|
||||
}
|
||||
|
||||
function formatResult(r: ProbeResult): string {
|
||||
const lines: string[] = [`--- ${r.label} ---`]
|
||||
if (r.error) {
|
||||
lines.push(` ERROR (HTTP ${r.status}): ${r.error.slice(0, 200)}`)
|
||||
return lines.join('\n')
|
||||
}
|
||||
lines.push(` HTTP ${r.status} — ${r.elapsed}ms`)
|
||||
if (r.responseId) lines.push(` response.id: ${r.responseId}`)
|
||||
|
||||
if (r.usage) {
|
||||
lines.push(' Usage:')
|
||||
lines.push(` ${JSON.stringify(r.usage, null, 2).replace(/\n/g, '\n ')}`)
|
||||
} else {
|
||||
lines.push(' Usage: null')
|
||||
}
|
||||
|
||||
// Interesting headers
|
||||
for (const h of [
|
||||
'openai-processing-ms',
|
||||
'x-ratelimit-remaining',
|
||||
'x-ratelimit-limit',
|
||||
'x-ms-region',
|
||||
'x-github-request-id',
|
||||
'x-request-id',
|
||||
]) {
|
||||
if (r.headers[h]) lines.push(` ${h}: ${r.headers[h]}`)
|
||||
}
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
export const call: LocalCommandCall = async (args) => {
|
||||
const parts = (args ?? '').trim().split(/\s+/).filter(Boolean)
|
||||
const noKey = parts.includes('--no-key')
|
||||
const modelOverride = parts.find((p) => !p.startsWith('--')) || undefined
|
||||
const modelStr = modelOverride ?? getMainLoopModel()
|
||||
const request = resolveProviderRequest({ model: modelStr })
|
||||
const isGithub = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
|
||||
// Resolve API key the same way the OpenAI shim does
|
||||
let apiKey = process.env.OPENAI_API_KEY ?? ''
|
||||
if (!apiKey && isGithub) {
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
apiKey =
|
||||
process.env.OPENAI_API_KEY ??
|
||||
process.env.GITHUB_TOKEN ??
|
||||
process.env.GH_TOKEN ??
|
||||
''
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
return {
|
||||
type: 'text',
|
||||
value:
|
||||
'No API key found. Make sure you are in an active OpenAI-compatible or GitHub Copilot session.\n' +
|
||||
'For GitHub Copilot: run /onboard-github first.\n' +
|
||||
'For OpenAI-compatible: set OPENAI_API_KEY.',
|
||||
}
|
||||
}
|
||||
|
||||
const useResponses = request.transport === 'codex_responses'
|
||||
const endpoint = useResponses ? '/responses' : '/chat/completions'
|
||||
const url = `${request.baseUrl}${endpoint}`
|
||||
const family = getModelFamily(request.resolvedModel)
|
||||
const cacheKey = `${getSessionId()}:${family}`
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
originator: 'openclaude',
|
||||
}
|
||||
if (isGithub) {
|
||||
Object.assign(headers, COPILOT_HEADERS)
|
||||
}
|
||||
|
||||
let body: Record<string, unknown>
|
||||
if (useResponses) {
|
||||
body = {
|
||||
model: request.resolvedModel,
|
||||
instructions: SYSTEM_PROMPT,
|
||||
input: [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'user',
|
||||
content: [{ type: 'input_text', text: USER_MESSAGE }],
|
||||
},
|
||||
],
|
||||
stream: true,
|
||||
...(noKey ? {} : {
|
||||
store: false,
|
||||
prompt_cache_key: cacheKey,
|
||||
prompt_cache_retention: '24h',
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
body = {
|
||||
model: request.resolvedModel,
|
||||
messages: [
|
||||
{ role: 'system', content: SYSTEM_PROMPT },
|
||||
{ role: 'user', content: USER_MESSAGE },
|
||||
],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
max_tokens: 20,
|
||||
...(noKey ? {} : {
|
||||
store: false,
|
||||
prompt_cache_key: cacheKey,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Log configuration
|
||||
const config = [
|
||||
`[cache-probe] Starting cache probe${noKey ? ' (--no-key: cache params OMITTED)' : ''}`,
|
||||
` model: ${request.resolvedModel} (family: ${family})`,
|
||||
` transport: ${request.transport}`,
|
||||
` endpoint: ${url}`,
|
||||
` prompt_cache_key: ${noKey ? 'NOT SENT' : cacheKey}`,
|
||||
` store: ${noKey ? 'NOT SENT' : 'false'}`,
|
||||
` system prompt: ~${Math.round(SYSTEM_PROMPT.length / 4)} tokens`,
|
||||
` delay between calls: ${DELAY_MS}ms`,
|
||||
].join('\n')
|
||||
logForDebugging(config)
|
||||
|
||||
// Call 1 — Cold
|
||||
const r1 = await sendProbe(url, headers, body, 'CALL 1 — Cold (no cache)')
|
||||
logForDebugging(`[cache-probe]\n${formatResult(r1)}`)
|
||||
|
||||
if (r1.error) {
|
||||
return {
|
||||
type: 'text',
|
||||
value: `Cache probe failed on first call: HTTP ${r1.status}\n${r1.error.slice(0, 300)}\n\nFull details in debug log.`,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait
|
||||
await new Promise((r) => setTimeout(r, DELAY_MS))
|
||||
|
||||
// Call 2 — Warm
|
||||
const r2 = await sendProbe(url, headers, body, 'CALL 2 — Warm (cache expected)')
|
||||
logForDebugging(`[cache-probe]\n${formatResult(r2)}`)
|
||||
|
||||
// --- Comparison ---
|
||||
const fields = [
|
||||
'input_tokens',
|
||||
'output_tokens',
|
||||
'total_tokens',
|
||||
'prompt_tokens',
|
||||
'completion_tokens',
|
||||
'input_tokens_details.cached_tokens',
|
||||
'prompt_tokens_details.cached_tokens',
|
||||
'output_tokens_details.reasoning_tokens',
|
||||
]
|
||||
|
||||
const comparison: string[] = ['[cache-probe] COMPARISON']
|
||||
comparison.push(
|
||||
` ${'Field'.padEnd(42)} ${'Call 1'.padStart(8)} ${'Call 2'.padStart(8)} ${'Delta'.padStart(8)}`,
|
||||
)
|
||||
comparison.push(` ${'-'.repeat(72)}`)
|
||||
|
||||
for (const f of fields) {
|
||||
const v1 = getField(r1.usage, f)
|
||||
const v2 = getField(r2.usage, f)
|
||||
if (v1 === undefined && v2 === undefined) continue
|
||||
const d =
|
||||
typeof v1 === 'number' && typeof v2 === 'number' ? v2 - v1 : ''
|
||||
comparison.push(
|
||||
` ${f.padEnd(42)} ${String(v1 ?? '-').padStart(8)} ${String(v2 ?? '-').padStart(8)} ${String(d).padStart(8)}`,
|
||||
)
|
||||
}
|
||||
|
||||
comparison.push('')
|
||||
comparison.push(
|
||||
` Latency: ${r1.elapsed}ms → ${r2.elapsed}ms (${r2.elapsed - r1.elapsed > 0 ? '+' : ''}${r2.elapsed - r1.elapsed}ms)`,
|
||||
)
|
||||
|
||||
// Header comparison
|
||||
for (const h of ['openai-processing-ms', 'x-ms-region', 'x-ratelimit-remaining']) {
|
||||
const v1 = r1.headers[h]
|
||||
const v2 = r2.headers[h]
|
||||
if (v1 || v2) {
|
||||
comparison.push(` ${h}: ${v1 ?? '-'} → ${v2 ?? '-'}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Verdict
|
||||
const cached2 =
|
||||
(getField(r2.usage, 'input_tokens_details.cached_tokens') as number) ??
|
||||
(getField(r2.usage, 'prompt_tokens_details.cached_tokens') as number) ??
|
||||
0
|
||||
const input1 =
|
||||
((r1.usage?.input_tokens ?? r1.usage?.prompt_tokens) as number) ?? 0
|
||||
const input2 =
|
||||
((r2.usage?.input_tokens ?? r2.usage?.prompt_tokens) as number) ?? 0
|
||||
|
||||
let verdict: string
|
||||
if (cached2 > 0) {
|
||||
const rate = input2 > 0 ? Math.round((cached2 / input2) * 100) : '?'
|
||||
verdict = `CACHE HIT: ${cached2} cached tokens (${rate}% of input)`
|
||||
} else if (input1 === 0 && input2 === 0) {
|
||||
verdict = 'INCONCLUSIVE: Server returns 0 input_tokens — cannot measure'
|
||||
} else if (r2.elapsed < r1.elapsed * 0.6 && input1 > 100) {
|
||||
verdict = `POSSIBLE SILENT CACHING: Call 2 was ${Math.round((1 - r2.elapsed / r1.elapsed) * 100)}% faster but no cached_tokens reported`
|
||||
} else {
|
||||
verdict = 'NO CACHE DETECTED'
|
||||
}
|
||||
|
||||
comparison.push(`\n Verdict: ${verdict}`)
|
||||
|
||||
// --- Simulate what main's shim code does with this usage ---
|
||||
// codexShim.ts makeUsage() — used for Responses API (GPT-5+/Codex)
|
||||
function mainMakeUsage(u: any) {
|
||||
return {
|
||||
input_tokens: u?.input_tokens ?? 0,
|
||||
output_tokens: u?.output_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0, // ← main hardcodes this to 0
|
||||
}
|
||||
}
|
||||
// openaiShim.ts convertChunkUsage() — used for Chat Completions
|
||||
function mainConvertChunkUsage(u: any) {
|
||||
return {
|
||||
input_tokens: u?.prompt_tokens ?? 0,
|
||||
output_tokens: u?.completion_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: u?.prompt_tokens_details?.cached_tokens ?? 0,
|
||||
}
|
||||
}
|
||||
|
||||
const shimFn = useResponses ? mainMakeUsage : mainConvertChunkUsage
|
||||
const shim1 = shimFn(r1.usage)
|
||||
const shim2 = shimFn(r2.usage)
|
||||
|
||||
comparison.push('')
|
||||
comparison.push(` --- What main's shim reports (${useResponses ? 'codexShim.makeUsage' : 'openaiShim.convertChunkUsage'}) ---`)
|
||||
comparison.push(` Call 1: cache_read_input_tokens=${shim1.cache_read_input_tokens}`)
|
||||
comparison.push(` Call 2: cache_read_input_tokens=${shim2.cache_read_input_tokens}`)
|
||||
if (useResponses && cached2 > 0) {
|
||||
comparison.push(` BUG: Server returned ${cached2} cached tokens but main's makeUsage() drops it → reports 0`)
|
||||
} else if (!useResponses && shim2.cache_read_input_tokens > 0) {
|
||||
comparison.push(` OK: Chat Completions path on main correctly reads cached_tokens`)
|
||||
}
|
||||
|
||||
logForDebugging(comparison.join('\n'))
|
||||
|
||||
// User-facing summary
|
||||
const mode = noKey ? ' (NO cache key sent)' : ''
|
||||
const shimLabel = useResponses ? 'codexShim.makeUsage()' : 'openaiShim.convertChunkUsage()'
|
||||
const summary = [
|
||||
`Cache Probe — ${request.resolvedModel} via ${useResponses ? 'Responses API' : 'Chat Completions'}${mode}`,
|
||||
'',
|
||||
`Call 1: ${r1.elapsed}ms, input=${input1}, cached=${(getField(r1.usage, 'input_tokens_details.cached_tokens') as number) ?? (getField(r1.usage, 'prompt_tokens_details.cached_tokens') as number) ?? 0}`,
|
||||
`Call 2: ${r2.elapsed}ms, input=${input2}, cached=${cached2}`,
|
||||
'',
|
||||
verdict,
|
||||
'',
|
||||
`What main's ${shimLabel} reports:`,
|
||||
` Call 2 cache_read_input_tokens = ${shim2.cache_read_input_tokens}${useResponses && cached2 > 0 ? ' ← BUG: server sent ' + cached2 + ' but main drops it' : ''}`,
|
||||
'',
|
||||
'Full details written to debug log.',
|
||||
].join('\n')
|
||||
|
||||
return { type: 'text', value: summary }
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
|
||||
const cacheProbe: Command = {
|
||||
type: 'local',
|
||||
name: 'cache-probe',
|
||||
description:
|
||||
'Send identical requests to test prompt caching (results in debug log)',
|
||||
argumentHint: '[model] [--no-key]',
|
||||
isEnabled: () =>
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB),
|
||||
supportsNonInteractive: false,
|
||||
load: () => import('./cache-probe.js'),
|
||||
}
|
||||
|
||||
export default cacheProbe
|
||||
@@ -45,7 +45,7 @@ function getPromptContent(
|
||||
<!-- CHANGELOG:END -->`
|
||||
let slackStep = `
|
||||
|
||||
5. After creating/updating the PR, check if the user's AGENTS.md or CLAUDE.md mentions posting to Slack channels. If it does, use ToolSearch to search for "slack send message" tools. If ToolSearch finds a Slack tool, ask the user if they'd like you to post the PR URL to the relevant Slack channel. Only post if the user confirms. If ToolSearch returns no results or errors, skip this step silently—do not mention the failure, do not attempt workarounds, and do not try alternative approaches.`
|
||||
5. After creating/updating the PR, check if the user's CLAUDE.md mentions posting to Slack channels. If it does, use ToolSearch to search for "slack send message" tools. If ToolSearch finds a Slack tool, ask the user if they'd like you to post the PR URL to the relevant Slack channel. Only post if the user confirms. If ToolSearch returns no results or errors, skip this step silently—do not mention the failure, do not attempt workarounds, and do not try alternative approaches.`
|
||||
if (process.env.USER_TYPE === 'ant' && isUndercover()) {
|
||||
prefix = getUndercoverInstructions() + '\n'
|
||||
reviewerArg = ''
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
const originalClaudeCodeNewInit = process.env.CLAUDE_CODE_NEW_INIT
|
||||
|
||||
async function importInitCommand() {
|
||||
return (await import(`./init.ts?ts=${Date.now()}-${Math.random()}`)).default
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
|
||||
if (originalClaudeCodeNewInit === undefined) {
|
||||
delete process.env.CLAUDE_CODE_NEW_INIT
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_NEW_INIT = originalClaudeCodeNewInit
|
||||
}
|
||||
})
|
||||
|
||||
test('NEW_INIT prompt preserves existing root CLAUDE.md by default', async () => {
|
||||
process.env.CLAUDE_CODE_NEW_INIT = '1'
|
||||
|
||||
mock.module('../projectOnboardingState.js', () => ({
|
||||
maybeMarkProjectOnboardingComplete: () => {},
|
||||
}))
|
||||
mock.module('./initMode.js', () => ({
|
||||
isNewInitEnabled: () => true,
|
||||
}))
|
||||
|
||||
const command = await importInitCommand()
|
||||
const blocks = await command.getPromptForCommand()
|
||||
|
||||
expect(blocks).toHaveLength(1)
|
||||
expect(blocks[0]?.type).toBe('text')
|
||||
expect(String(blocks[0]?.text)).toContain(
|
||||
'checked-in root `CLAUDE.md` and does NOT already have a root `AGENTS.md`',
|
||||
)
|
||||
expect(String(blocks[0]?.text)).toContain(
|
||||
'do NOT silently create a second root instruction file',
|
||||
)
|
||||
expect(String(blocks[0]?.text)).toContain(
|
||||
'update the existing root `CLAUDE.md` in place by default',
|
||||
)
|
||||
})
|
||||
@@ -1,6 +1,7 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type { Command } from '../commands.js'
|
||||
import { maybeMarkProjectOnboardingComplete } from '../projectOnboardingState.js'
|
||||
import { isNewInitEnabled } from './initMode.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
|
||||
const OLD_INIT_PROMPT = `Please analyze this codebase and create a CLAUDE.md file, which will be given to future instances of Claude Code to operate in this repository.
|
||||
|
||||
@@ -24,19 +25,19 @@ Usage notes:
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
\`\`\``
|
||||
|
||||
const NEW_INIT_PROMPT = `Set up a minimal AGENTS.md (and optionally CLAUDE.local.md, skills, and hooks) for this repo. The root project instruction file is loaded into every Claude Code session, so it must be concise — only include what Claude would get wrong without it.
|
||||
const NEW_INIT_PROMPT = `Set up a minimal CLAUDE.md (and optionally skills and hooks) for this repo. CLAUDE.md is loaded into every Claude Code session, so it must be concise — only include what Claude would get wrong without it.
|
||||
|
||||
## Phase 1: Ask what to set up
|
||||
|
||||
Use AskUserQuestion to find out what the user wants:
|
||||
|
||||
- "Which instruction files should /init set up?"
|
||||
Options: "Project AGENTS.md" | "Personal CLAUDE.local.md" | "Both project + personal"
|
||||
- "Which CLAUDE.md files should /init set up?"
|
||||
Options: "Project CLAUDE.md" | "Personal CLAUDE.local.md" | "Both project + personal"
|
||||
Description for project: "Team-shared instructions checked into source control — architecture, coding standards, common workflows."
|
||||
Description for personal: "Your private preferences for this project (gitignored, not shared) — your role, sandbox URLs, preferred test data, workflow quirks."
|
||||
|
||||
- "Also set up skills and hooks?"
|
||||
Options: "Skills + hooks" | "Skills only" | "Hooks only" | "Neither, just the instruction file(s)"
|
||||
Options: "Skills + hooks" | "Skills only" | "Hooks only" | "Neither, just CLAUDE.md"
|
||||
Description for skills: "On-demand capabilities you or Claude invoke with \`/skill-name\` — good for repeatable workflows and reference knowledge."
|
||||
Description for hooks: "Deterministic shell commands that run on tool events (e.g., format after every edit). Claude can't skip them."
|
||||
|
||||
@@ -58,24 +59,24 @@ Note what you could NOT figure out from code alone — these become interview qu
|
||||
|
||||
## Phase 3: Fill in the gaps
|
||||
|
||||
Use AskUserQuestion to gather what you still need to write good instruction files and skills. Ask only things the code can't answer.
|
||||
Use AskUserQuestion to gather what you still need to write good CLAUDE.md files and skills. Ask only things the code can't answer.
|
||||
|
||||
If the user chose project AGENTS.md or both: ask about codebase practices — non-obvious commands, gotchas, branch/PR conventions, required env setup, testing quirks. Skip things already in README or obvious from manifest files. Do not mark any options as "recommended" — this is about how their team works, not best practices.
|
||||
If the user chose project CLAUDE.md or both: ask about codebase practices — non-obvious commands, gotchas, branch/PR conventions, required env setup, testing quirks. Skip things already in README or obvious from manifest files. Do not mark any options as "recommended" — this is about how their team works, not best practices.
|
||||
|
||||
If the user chose personal CLAUDE.local.md or both: ask about them, not the codebase. Do not mark any options as "recommended" — this is about their personal preferences, not best practices. Examples of questions:
|
||||
- What's their role on the team? (e.g., "backend engineer", "data scientist", "new hire onboarding")
|
||||
- How familiar are they with this codebase and its languages/frameworks? (so Claude can calibrate explanation depth)
|
||||
- Do they have personal sandbox URLs, test accounts, API key paths, or local setup details Claude should know?
|
||||
- Only if Phase 2 found multiple git worktrees: ask whether their worktrees are nested inside the main repo (e.g., \`.claude/worktrees/<name>/\`) or siblings/external (e.g., \`../myrepo-feature/\`). If nested, the upward file walk finds the main repo's CLAUDE.local.md automatically — no special handling needed. If sibling/external, the personal content should live in a home-directory file (e.g., \`~/.claude/<project-name>-instructions.md\`) and each worktree gets a one-line CLAUDE.local.md stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. Never put this import in the project AGENTS.md — that would check a personal reference into the team-shared file.
|
||||
- Only if Phase 2 found multiple git worktrees: ask whether their worktrees are nested inside the main repo (e.g., \`.claude/worktrees/<name>/\`) or siblings/external (e.g., \`../myrepo-feature/\`). If nested, the upward file walk finds the main repo's CLAUDE.local.md automatically — no special handling needed. If sibling/external, the personal content should live in a home-directory file (e.g., \`~/.claude/<project-name>-instructions.md\`) and each worktree gets a one-line CLAUDE.local.md stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. Never put this import in the project CLAUDE.md — that would check a personal reference into the team-shared file.
|
||||
- Any communication preferences? (e.g., "be terse", "always explain tradeoffs", "don't summarize at the end")
|
||||
|
||||
**Synthesize a proposal from Phase 2 findings** — e.g., format-on-edit if a formatter exists, a project verification workflow if tests exist, an AGENTS.md note for anything from the gap-fill answers that's a guideline rather than a workflow. For each, pick the artifact type that fits, **constrained by the Phase 1 skills+hooks choice**:
|
||||
**Synthesize a proposal from Phase 2 findings** — e.g., format-on-edit if a formatter exists, a project verification workflow if tests exist, a CLAUDE.md note for anything from the gap-fill answers that's a guideline rather than a workflow. For each, pick the artifact type that fits, **constrained by the Phase 1 skills+hooks choice**:
|
||||
|
||||
- **Hook** (stricter) — deterministic shell command on a tool event; Claude can't skip it. Fits mechanical, fast, per-edit steps: formatting, linting, running a quick test on the changed file.
|
||||
- **Skill** (on-demand) — you or Claude invoke \`/skill-name\` when you want it. Fits workflows that don't belong on every edit: deep verification, session reports, deploys.
|
||||
- **AGENTS.md note** (looser) — influences Claude's behavior but not enforced. Fits communication/thinking preferences: "plan before coding", "be terse", "explain tradeoffs".
|
||||
- **CLAUDE.md note** (looser) — influences Claude's behavior but not enforced. Fits communication/thinking preferences: "plan before coding", "be terse", "explain tradeoffs".
|
||||
|
||||
**Respect Phase 1's skills+hooks choice as a hard filter**: if the user picked "Skills only", downgrade any hook you'd suggest to a skill or an AGENTS.md note. If "Hooks only", downgrade skills to hooks (where mechanically possible) or notes. If "Neither", everything becomes an AGENTS.md note. Never propose an artifact type the user didn't opt into.
|
||||
**Respect Phase 1's skills+hooks choice as a hard filter**: if the user picked "Skills only", downgrade any hook you'd suggest to a skill or a CLAUDE.md note. If "Hooks only", downgrade skills to hooks (where mechanically possible) or notes. If "Neither", everything becomes a CLAUDE.md note. Never propose an artifact type the user didn't opt into.
|
||||
|
||||
**Show the proposal via AskUserQuestion's \`preview\` field, not as a separate text message** — the dialog overlays your output, so preceding text is hidden. The \`preview\` field renders markdown in a side-panel (like plan mode); the \`question\` field is plain-text-only. Structure it as:
|
||||
|
||||
@@ -85,19 +86,17 @@ If the user chose personal CLAUDE.local.md or both: ask about them, not the code
|
||||
|
||||
• **Format-on-edit hook** (automatic) — \`ruff format <file>\` via PostToolUse
|
||||
• **Verification workflow** (on-demand) — \`make lint && make typecheck && make test\`
|
||||
• **AGENTS.md note** (guideline) — "run lint/typecheck/test before marking done"
|
||||
• **CLAUDE.md note** (guideline) — "run lint/typecheck/test before marking done"
|
||||
|
||||
- Option labels stay short ("Looks good", "Drop the hook", "Drop the skill") — the tool auto-adds an "Other" free-text option, so don't add your own catch-all.
|
||||
|
||||
**Build the preference queue** from the accepted proposal. Each entry: {type: hook|skill|note, description, target file, any Phase-2-sourced details like the actual test/format command}. Phases 4-7 consume this queue.
|
||||
|
||||
## Phase 4: Write AGENTS.md (if user chose project or both)
|
||||
## Phase 4: Write CLAUDE.md (if user chose project or both)
|
||||
|
||||
Write a minimal AGENTS.md at the project root. Every line must pass this test: "Would removing this cause Claude to make mistakes?" If no, cut it.
|
||||
Write a minimal CLAUDE.md at the project root. Every line must pass this test: "Would removing this cause Claude to make mistakes?" If no, cut it.
|
||||
|
||||
If the repo already has a checked-in root \`CLAUDE.md\` and does NOT already have a root \`AGENTS.md\`, do NOT silently create a second root instruction file. In that case, update the existing root \`CLAUDE.md\` in place by default. Only create or migrate to root \`AGENTS.md\` if the user explicitly asks to migrate.
|
||||
|
||||
**Consume \`note\` entries from the Phase 3 preference queue whose target is AGENTS.md** (team-level notes) — add each as a concise line in the most relevant section. These are the behaviors the user wants Claude to follow but didn't need guaranteed (e.g., "propose a plan before implementing", "explain the tradeoffs when refactoring"). Leave personal-targeted notes for Phase 5.
|
||||
**Consume \`note\` entries from the Phase 3 preference queue whose target is CLAUDE.md** (team-level notes) — add each as a concise line in the most relevant section. These are the behaviors the user wants Claude to follow but didn't need guaranteed (e.g., "propose a plan before implementing", "explain the tradeoffs when refactoring"). Leave personal-targeted notes for Phase 5.
|
||||
|
||||
Include:
|
||||
- Build/test/lint commands Claude can't guess (non-standard scripts, flags, or sequences)
|
||||
@@ -112,7 +111,7 @@ Exclude:
|
||||
- File-by-file structure or component lists (Claude can discover these by reading the codebase)
|
||||
- Standard language conventions Claude already knows
|
||||
- Generic advice ("write clean code", "handle errors")
|
||||
- Detailed API docs or long references — use \`@path/to/import\` syntax instead (e.g., \`@docs/api-reference.md\`) to inline content on demand without bloating AGENTS.md
|
||||
- Detailed API docs or long references — use \`@path/to/import\` syntax instead (e.g., \`@docs/api-reference.md\`) to inline content on demand without bloating CLAUDE.md
|
||||
- Information that changes frequently — reference the source with \`@path/to/import\` so Claude always reads the current version
|
||||
- Long tutorials or walkthroughs (move to a separate file and reference with \`@path/to/import\`, or put in a skill)
|
||||
- Commands obvious from manifest files (e.g., standard "npm test", "cargo test", "pytest")
|
||||
@@ -124,20 +123,20 @@ Do not repeat yourself and do not make up sections like "Common Development Task
|
||||
Prefix the file with:
|
||||
|
||||
\`\`\`
|
||||
# AGENTS.md
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
\`\`\`
|
||||
|
||||
If AGENTS.md already exists: read it, propose specific changes as diffs, and explain why each change improves it. Do not silently overwrite.
|
||||
If CLAUDE.md already exists: read it, propose specific changes as diffs, and explain why each change improves it. Do not silently overwrite.
|
||||
|
||||
For projects with multiple concerns, suggest organizing instructions into \`.claude/rules/\` as separate focused files (e.g., \`code-style.md\`, \`testing.md\`, \`security.md\`). These are loaded automatically alongside AGENTS.md and can be scoped to specific file paths using \`paths\` frontmatter.
|
||||
For projects with multiple concerns, suggest organizing instructions into \`.claude/rules/\` as separate focused files (e.g., \`code-style.md\`, \`testing.md\`, \`security.md\`). These are loaded automatically alongside CLAUDE.md and can be scoped to specific file paths using \`paths\` frontmatter.
|
||||
|
||||
For projects with distinct subdirectories (monorepos, multi-module projects, etc.): mention that subdirectory AGENTS.md files can be added for module-specific instructions (they're loaded automatically when Claude works in those directories). Offer to create them if the user wants.
|
||||
For projects with distinct subdirectories (monorepos, multi-module projects, etc.): mention that subdirectory CLAUDE.md files can be added for module-specific instructions (they're loaded automatically when Claude works in those directories). Offer to create them if the user wants.
|
||||
|
||||
## Phase 5: Write CLAUDE.local.md (if user chose personal or both)
|
||||
|
||||
Write a minimal CLAUDE.local.md at the project root. This file is automatically loaded alongside AGENTS.md. After creating it, add \`CLAUDE.local.md\` to the project's .gitignore so it stays private.
|
||||
Write a minimal CLAUDE.local.md at the project root. This file is automatically loaded alongside CLAUDE.md. After creating it, add \`CLAUDE.local.md\` to the project's .gitignore so it stays private.
|
||||
|
||||
**Consume \`note\` entries from the Phase 3 preference queue whose target is CLAUDE.local.md** (personal-level notes) — add each as a concise line. If the user chose personal-only in Phase 1, this is the sole consumer of note entries.
|
||||
|
||||
@@ -148,7 +147,7 @@ Include:
|
||||
|
||||
Keep it short — only include what would make Claude's responses noticeably better for this user.
|
||||
|
||||
If Phase 2 found multiple git worktrees and the user confirmed they use sibling/external worktrees (not nested inside the main repo): the upward file walk won't find a single CLAUDE.local.md from all worktrees. Write the actual personal content to \`~/.claude/<project-name>-instructions.md\` and make CLAUDE.local.md a one-line stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. The user can copy this one-line stub to each sibling worktree. Never put this import in the project AGENTS.md. If worktrees are nested inside the main repo (e.g., \`.claude/worktrees/\`), no special handling is needed — the main repo's CLAUDE.local.md is found automatically.
|
||||
If Phase 2 found multiple git worktrees and the user confirmed they use sibling/external worktrees (not nested inside the main repo): the upward file walk won't find a single CLAUDE.local.md from all worktrees. Write the actual personal content to \`~/.claude/<project-name>-instructions.md\` and make CLAUDE.local.md a one-line stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. The user can copy this one-line stub to each sibling worktree. Never put this import in the project CLAUDE.md. If worktrees are nested inside the main repo (e.g., \`.claude/worktrees/\`), no special handling is needed — the main repo's CLAUDE.local.md is found automatically.
|
||||
|
||||
If CLAUDE.local.md already exists: read it, propose specific additions, and do not silently overwrite.
|
||||
|
||||
@@ -184,7 +183,7 @@ Both the user (\`/<skill-name>\`) and Claude can invoke skills by default. For w
|
||||
|
||||
## Phase 7: Suggest additional optimizations
|
||||
|
||||
Tell the user you're going to suggest a few additional optimizations now that AGENTS.md and skills (if chosen) are in place.
|
||||
Tell the user you're going to suggest a few additional optimizations now that CLAUDE.md and skills (if chosen) are in place.
|
||||
|
||||
Check the environment and ask about each gap you find (use AskUserQuestion):
|
||||
|
||||
@@ -196,7 +195,7 @@ Check the environment and ask about each gap you find (use AskUserQuestion):
|
||||
|
||||
For each hook preference (from the queue or the formatter fallback):
|
||||
|
||||
1. Target file: default based on the Phase 1 instruction-file choice — project → \`.claude/settings.json\` (team-shared, committed); personal → \`.claude/settings.local.json\`. Only ask if the user chose "both" in Phase 1 or the preference is ambiguous. Ask once for all hooks, not per-hook.
|
||||
1. Target file: default based on the Phase 1 CLAUDE.md choice — project → \`.claude/settings.json\` (team-shared, committed); personal → \`.claude/settings.local.json\`. Only ask if the user chose "both" in Phase 1 or the preference is ambiguous. Ask once for all hooks, not per-hook.
|
||||
|
||||
2. Pick the event and matcher from the preference:
|
||||
- "after every edit" → \`PostToolUse\` with matcher \`Write|Edit\`
|
||||
@@ -228,9 +227,11 @@ const command = {
|
||||
type: 'prompt',
|
||||
name: 'init',
|
||||
get description() {
|
||||
return isNewInitEnabled()
|
||||
? 'Initialize new project instruction file(s) and optional skills/hooks with codebase documentation'
|
||||
: 'Initialize a new project instruction file with codebase documentation'
|
||||
return feature('NEW_INIT') &&
|
||||
(process.env.USER_TYPE === 'ant' ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT))
|
||||
? 'Initialize new CLAUDE.md file(s) and optional skills/hooks with codebase documentation'
|
||||
: 'Initialize a new CLAUDE.md file with codebase documentation'
|
||||
},
|
||||
contentLength: 0, // Dynamic content
|
||||
progressMessage: 'analyzing your codebase',
|
||||
@@ -241,7 +242,12 @@ const command = {
|
||||
return [
|
||||
{
|
||||
type: 'text',
|
||||
text: isNewInitEnabled() ? NEW_INIT_PROMPT : OLD_INIT_PROMPT,
|
||||
text:
|
||||
feature('NEW_INIT') &&
|
||||
(process.env.USER_TYPE === 'ant' ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT))
|
||||
? NEW_INIT_PROMPT
|
||||
: OLD_INIT_PROMPT,
|
||||
},
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
|
||||
export function isNewInitEnabled(): boolean {
|
||||
if (feature('NEW_INIT')) {
|
||||
return (
|
||||
process.env.USER_TYPE === 'ant' ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT)
|
||||
)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -39,16 +39,16 @@ type InstallState = {
|
||||
message: string;
|
||||
warnings?: string[];
|
||||
};
|
||||
export function getInstallationPath(): string {
|
||||
function getInstallationPath(): string {
|
||||
const isWindows = env.platform === 'win32';
|
||||
const homeDir = homedir();
|
||||
if (isWindows) {
|
||||
// Convert to Windows-style path
|
||||
const windowsPath = join(homeDir, '.local', 'bin', 'openclaude.exe');
|
||||
const windowsPath = join(homeDir, '.local', 'bin', 'claude.exe');
|
||||
// Replace forward slashes with backslashes for Windows display
|
||||
return windowsPath.replace(/\//g, '\\');
|
||||
}
|
||||
return '~/.local/bin/openclaude';
|
||||
return '~/.local/bin/claude';
|
||||
}
|
||||
function SetupNotes(t0) {
|
||||
const $ = _c(5);
|
||||
|
||||
@@ -1,44 +1,20 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
CLAUDE_CODE_USE_MISTRAL: process.env.CLAUDE_CODE_USE_MISTRAL,
|
||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
||||
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
process.env.CLAUDE_CODE_USE_MISTRAL = originalEnv.CLAUDE_CODE_USE_MISTRAL
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
test('opens the model picker without awaiting local model discovery refresh', async () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.CLAUDE_CODE_USE_MISTRAL
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.OPENAI_API_BASE
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5-coder-7b-instruct'
|
||||
|
||||
@@ -54,9 +30,7 @@ test('opens the model picker without awaiting local model discovery refresh', as
|
||||
discoverOpenAICompatibleModelOptions,
|
||||
}))
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe('openai:http://127.0.0.1:8080/v1')
|
||||
|
||||
const { call } = await import('./model.js')
|
||||
const { call } = await import(`./model.js?ts=${Date.now()}-${Math.random()}`)
|
||||
const result = await Promise.race([
|
||||
call(() => {}, {} as never, ''),
|
||||
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
|
||||
|
||||
@@ -284,7 +284,7 @@ function haveSameModelOptions(left: ModelOption[], right: ModelOption[]): boolea
|
||||
});
|
||||
}
|
||||
async function refreshOpenAIModelOptionsCache(): Promise<void> {
|
||||
if (!getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
|
||||
if (getAPIProvider() !== 'openai') {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
|
||||
@@ -22,14 +22,11 @@ import {
|
||||
import {
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildMistralProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
createProfileFile,
|
||||
DEFAULT_GEMINI_BASE_URL,
|
||||
DEFAULT_GEMINI_MODEL,
|
||||
DEFAULT_MISTRAL_BASE_URL,
|
||||
DEFAULT_MISTRAL_MODEL,
|
||||
deleteProfileFile,
|
||||
loadProfileFile,
|
||||
maskSecretForDisplay,
|
||||
@@ -77,14 +74,6 @@ type Step =
|
||||
baseUrl: string | null
|
||||
defaultModel: string
|
||||
}
|
||||
| { name: 'mistral-key'; defaultModel: string }
|
||||
| { name: 'mistral-base'; apiKey: string; defaultModel: string }
|
||||
| {
|
||||
name: 'mistral-model'
|
||||
apiKey: string
|
||||
baseUrl: string | null
|
||||
defaultModel: string
|
||||
}
|
||||
| { name: 'gemini-auth-method' }
|
||||
| { name: 'gemini-key' }
|
||||
| { name: 'gemini-access-token' }
|
||||
@@ -127,8 +116,6 @@ type ProviderWizardDefaults = {
|
||||
openAIModel: string
|
||||
openAIBaseUrl: string
|
||||
geminiModel: string
|
||||
mistralModel: string
|
||||
mistralBaseUrl: string
|
||||
}
|
||||
|
||||
function isEnvTruthy(value: string | undefined): boolean {
|
||||
@@ -160,19 +147,11 @@ export function getProviderWizardDefaults(
|
||||
const safeGeminiModel =
|
||||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, processEnv) ||
|
||||
DEFAULT_GEMINI_MODEL
|
||||
const safeMistralModel =
|
||||
sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, processEnv) ||
|
||||
DEFAULT_MISTRAL_MODEL
|
||||
const safeMistralBaseUrl =
|
||||
sanitizeProviderConfigValue(processEnv.MISTRAL_BASE_URL, processEnv) ||
|
||||
DEFAULT_MISTRAL_BASE_URL
|
||||
|
||||
return {
|
||||
openAIModel: safeOpenAIModel,
|
||||
openAIBaseUrl: safeOpenAIBaseUrl,
|
||||
geminiModel: safeGeminiModel,
|
||||
mistralModel: safeMistralModel,
|
||||
mistralBaseUrl: safeMistralBaseUrl,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,21 +178,6 @@ export function buildCurrentProviderSummary(options?: {
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return {
|
||||
providerLabel: 'Mistral',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
processEnv.MISTRAL_MODEL ?? DEFAULT_MISTRAL_MODEL,
|
||||
processEnv
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
processEnv.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL,
|
||||
processEnv
|
||||
),
|
||||
savedProfileLabel,
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return {
|
||||
providerLabel: 'GitHub Models',
|
||||
@@ -295,24 +259,6 @@ function buildSavedProfileSummary(
|
||||
? 'configured'
|
||||
: undefined,
|
||||
}
|
||||
case 'mistral':
|
||||
return {
|
||||
providerLabel: 'Mistral',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
env.MISTRAL_MODEL ?? DEFAULT_MISTRAL_MODEL,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
credentialLabel:
|
||||
maskSecretForDisplay(env.MISTRAL_API_KEY) !== undefined
|
||||
? 'configured'
|
||||
: undefined,
|
||||
}
|
||||
case 'codex':
|
||||
return {
|
||||
providerLabel: 'Codex',
|
||||
@@ -527,11 +473,6 @@ function ProviderChooser({
|
||||
value: 'gemini',
|
||||
description: 'Use Google Gemini with API key, access token, or local ADC',
|
||||
},
|
||||
{
|
||||
label: 'Mistral',
|
||||
value: 'mistral',
|
||||
description: 'Use Mistral with API key'
|
||||
},
|
||||
{
|
||||
label: 'Codex',
|
||||
value: 'codex',
|
||||
@@ -1030,11 +971,6 @@ export function ProviderWizard({
|
||||
})
|
||||
} else if (value === 'gemini') {
|
||||
setStep({ name: 'gemini-auth-method' })
|
||||
} else if (value === 'mistral') {
|
||||
setStep({
|
||||
name: 'mistral-key',
|
||||
defaultModel: defaults.mistralModel,
|
||||
})
|
||||
} else if (value === 'clear') {
|
||||
const filePath = deleteProfileFile()
|
||||
onDone(`Removed saved provider profile at ${filePath}. Restart OpenClaude to go back to normal startup.`, {
|
||||
@@ -1174,101 +1110,6 @@ export function ProviderWizard({
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-key':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 1 of 3"
|
||||
description={
|
||||
process.env.MISTRAL_API_KEY
|
||||
? 'Enter an API key, or leave this blank to reuse the current MISTRAL_API_KEY from this session.'
|
||||
: 'Enter the API key for your Mistral provider.'
|
||||
}
|
||||
initialValue=""
|
||||
placeholder="..."
|
||||
mask="*"
|
||||
allowEmpty={Boolean(process.env.MISTRAL_API_KEY)}
|
||||
validate={value => {
|
||||
const candidate = value.trim() || process.env.MISTRAL_API_KEY || ''
|
||||
return sanitizeApiKey(candidate)
|
||||
? null
|
||||
: 'Enter a real API key. Placeholder values like SUA_CHAVE are not valid.'
|
||||
}}
|
||||
onSubmit={value => {
|
||||
const apiKey = value.trim() || process.env.MISTRAL_API_KEY || ''
|
||||
setStep({
|
||||
name: 'mistral-base',
|
||||
apiKey,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}}
|
||||
onCancel={() => setStep({ name: 'choose' })}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-base':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 2 of 3"
|
||||
description={`Optionally enter a base URL. Leave blank for ${DEFAULT_MISTRAL_BASE_URL}.`}
|
||||
initialValue={
|
||||
defaults.mistralBaseUrl === DEFAULT_MISTRAL_BASE_URL
|
||||
? ''
|
||||
: defaults.mistralBaseUrl
|
||||
}
|
||||
placeholder={DEFAULT_MISTRAL_BASE_URL}
|
||||
allowEmpty
|
||||
onSubmit={value => {
|
||||
setStep({
|
||||
name: 'mistral-model',
|
||||
apiKey: step.apiKey,
|
||||
baseUrl: value.trim() || null,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}}
|
||||
onCancel={() =>
|
||||
setStep({
|
||||
name: 'mistral-key',
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'mistral-model':
|
||||
return (
|
||||
<TextEntryDialog
|
||||
resetStateKey={step.name}
|
||||
title="Mistral setup"
|
||||
subtitle="Step 3 of 3"
|
||||
description={`Enter a model name. Leave blank for ${step.defaultModel}.`}
|
||||
initialValue={defaults.mistralModel ?? step.defaultModel}
|
||||
placeholder={step.defaultModel}
|
||||
allowEmpty
|
||||
onSubmit={value => {
|
||||
const env = buildMistralProfileEnv({
|
||||
model: value.trim() || step.defaultModel,
|
||||
baseUrl: step.baseUrl,
|
||||
apiKey: step.apiKey,
|
||||
processEnv: process.env,
|
||||
})
|
||||
if (env) {
|
||||
finishProfileSave(onDone, 'mistral', env)
|
||||
}
|
||||
}}
|
||||
onCancel={() =>
|
||||
setStep({
|
||||
name: 'mistral-base',
|
||||
apiKey: step.apiKey,
|
||||
defaultModel: step.defaultModel,
|
||||
})
|
||||
}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'gemini-auth-method': {
|
||||
const hasShellGeminiKey = Boolean(
|
||||
process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY,
|
||||
|
||||
@@ -65,7 +65,7 @@ export async function call(onDone: (result?: string) => void, _context: unknown,
|
||||
|
||||
// Get the local settings path and make it relative to cwd
|
||||
const localSettingsPath = getSettingsFilePathForSource('localSettings');
|
||||
const relativePath = localSettingsPath ? relative(getCwdState(), localSettingsPath) : '.openclaude/settings.local.json';
|
||||
const relativePath = localSettingsPath ? relative(getCwdState(), localSettingsPath) : '.claude/settings.local.json';
|
||||
const message = color('success', themeName)(`Added "${cleanPattern}" to excluded commands in ${relativePath}`);
|
||||
onDone(message);
|
||||
return null;
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
|
||||
const wiki = {
|
||||
type: 'local-jsx',
|
||||
name: 'wiki',
|
||||
description: 'Initialize and inspect the OpenClaude project wiki',
|
||||
argumentHint: '[init|status]',
|
||||
immediate: true,
|
||||
load: () => import('./wiki.js'),
|
||||
} satisfies Command
|
||||
|
||||
export default wiki
|
||||
@@ -1,123 +0,0 @@
|
||||
import React from 'react'
|
||||
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js'
|
||||
import { ingestLocalWikiSource } from '../../services/wiki/ingest.js'
|
||||
import { initializeWiki } from '../../services/wiki/init.js'
|
||||
import { getWikiStatus } from '../../services/wiki/status.js'
|
||||
import type {
|
||||
LocalJSXCommandCall,
|
||||
LocalJSXCommandOnDone,
|
||||
} from '../../types/command.js'
|
||||
import { getCwd } from '../../utils/cwd.js'
|
||||
|
||||
function renderHelp(): string {
|
||||
return `Usage: /wiki [init|status|ingest <path>]
|
||||
|
||||
Manage the OpenClaude project wiki stored in .openclaude/wiki.
|
||||
|
||||
Commands:
|
||||
/wiki init Initialize the wiki structure in the current project
|
||||
/wiki status Show wiki status and page/source counts
|
||||
/wiki ingest Ingest a local file into wiki sources
|
||||
|
||||
Examples:
|
||||
/wiki init
|
||||
/wiki status
|
||||
/wiki ingest README.md`
|
||||
}
|
||||
|
||||
function formatInitResult(result: Awaited<ReturnType<typeof initializeWiki>>): string {
|
||||
const lines = [`Initialized OpenClaude wiki at ${result.root}`]
|
||||
|
||||
if (result.alreadyExisted) {
|
||||
lines.push('', 'Wiki already existed. No new files were created.')
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
if (result.createdFiles.length > 0) {
|
||||
lines.push('', 'Created files:')
|
||||
for (const file of result.createdFiles) {
|
||||
lines.push(`- ${file}`)
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
function formatStatus(status: Awaited<ReturnType<typeof getWikiStatus>>): string {
|
||||
if (!status.initialized) {
|
||||
return `OpenClaude wiki is not initialized in this project.\n\nRun /wiki init to create ${status.root}.`
|
||||
}
|
||||
|
||||
return [
|
||||
'OpenClaude wiki status',
|
||||
'',
|
||||
`Root: ${status.root}`,
|
||||
`Pages: ${status.pageCount}`,
|
||||
`Sources: ${status.sourceCount}`,
|
||||
`Schema: ${status.hasSchema ? 'present' : 'missing'}`,
|
||||
`Index: ${status.hasIndex ? 'present' : 'missing'}`,
|
||||
`Log: ${status.hasLog ? 'present' : 'missing'}`,
|
||||
`Last updated: ${status.lastUpdatedAt ?? 'unknown'}`,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
function formatIngestResult(
|
||||
result: Awaited<ReturnType<typeof ingestLocalWikiSource>>,
|
||||
): string {
|
||||
return [
|
||||
`Ingested ${result.sourceFile} into the OpenClaude wiki.`,
|
||||
'',
|
||||
`Title: ${result.title}`,
|
||||
`Source note: ${result.sourceNote}`,
|
||||
`Summary: ${result.summary}`,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
async function runWikiCommand(
|
||||
onDone: LocalJSXCommandOnDone,
|
||||
args: string,
|
||||
): Promise<void> {
|
||||
const cwd = getCwd()
|
||||
const normalized = args.trim().toLowerCase()
|
||||
|
||||
if (COMMON_HELP_ARGS.includes(normalized) || COMMON_INFO_ARGS.includes(normalized)) {
|
||||
onDone(renderHelp(), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (!normalized || normalized === 'status') {
|
||||
onDone(formatStatus(await getWikiStatus(cwd)), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (normalized === 'init') {
|
||||
onDone(formatInitResult(await initializeWiki(cwd)), { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
if (normalized.startsWith('ingest')) {
|
||||
const pathArg = args.trim().slice('ingest'.length).trim()
|
||||
if (!pathArg) {
|
||||
onDone('Usage: /wiki ingest <local-file-path>', { display: 'system' })
|
||||
return
|
||||
}
|
||||
|
||||
onDone(formatIngestResult(await ingestLocalWikiSource(cwd, pathArg)), {
|
||||
display: 'system',
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
onDone(`Unknown wiki subcommand: ${args.trim()}\n\n${renderHelp()}`, {
|
||||
display: 'system',
|
||||
})
|
||||
}
|
||||
|
||||
export const call: LocalJSXCommandCall = async (
|
||||
onDone,
|
||||
_context,
|
||||
args,
|
||||
): Promise<React.ReactNode> => {
|
||||
await runWikiCommand(onDone, args ?? '')
|
||||
return null
|
||||
}
|
||||
@@ -188,9 +188,9 @@ export function AutoUpdater({
|
||||
✓ Update installed · Restart to apply
|
||||
</Text>}
|
||||
{(autoUpdaterResult?.status === 'install_failed' || autoUpdaterResult?.status === 'no_permissions') && <Text color="error" wrap="truncate">
|
||||
✗ Auto-update failed · Try <Text bold>openclaude doctor</Text> or{' '}
|
||||
✗ Auto-update failed · Try <Text bold>claude doctor</Text> or{' '}
|
||||
<Text bold>
|
||||
{hasLocalInstall ? `cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}` : `npm i -g ${MACRO.PACKAGE_URL}`}
|
||||
{hasLocalInstall ? `cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}` : `npm i -g ${MACRO.PACKAGE_URL}`}
|
||||
</Text>
|
||||
</Text>}
|
||||
</Box>;
|
||||
|
||||
@@ -31,11 +31,9 @@ export function BaseTextInput(t0) {
|
||||
} = t0;
|
||||
const {
|
||||
onInput,
|
||||
value,
|
||||
renderedValue,
|
||||
cursorLine,
|
||||
cursorColumn,
|
||||
offset,
|
||||
cursorColumn
|
||||
} = inputState;
|
||||
const t1 = Boolean(props.focus && props.showCursor && terminalFocus);
|
||||
let t2;
|
||||
@@ -80,7 +78,7 @@ export function BaseTextInput(t0) {
|
||||
renderedPlaceholder
|
||||
} = renderPlaceholder({
|
||||
placeholder: props.placeholder,
|
||||
value,
|
||||
value: props.value,
|
||||
showCursor: props.showCursor,
|
||||
focus: props.focus,
|
||||
terminalFocus,
|
||||
@@ -90,9 +88,9 @@ export function BaseTextInput(t0) {
|
||||
useInput(wrappedOnInput, {
|
||||
isActive: props.focus
|
||||
});
|
||||
const commandWithoutArgs = value && value.trim().indexOf(" ") === -1 || value && value.endsWith(" ");
|
||||
const showArgumentHint = Boolean(props.argumentHint && value && commandWithoutArgs && value.startsWith("/"));
|
||||
const cursorFiltered = props.showCursor && props.highlights ? props.highlights.filter(h => h.dimColor || offset < h.start || offset >= h.end) : props.highlights;
|
||||
const commandWithoutArgs = props.value && props.value.trim().indexOf(" ") === -1 || props.value && props.value.endsWith(" ");
|
||||
const showArgumentHint = Boolean(props.argumentHint && props.value && commandWithoutArgs && props.value.startsWith("/"));
|
||||
const cursorFiltered = props.showCursor && props.highlights ? props.highlights.filter(h => h.dimColor || props.cursorOffset < h.start || props.cursorOffset >= h.end) : props.highlights;
|
||||
const {
|
||||
viewportCharOffset,
|
||||
viewportCharEnd
|
||||
@@ -104,13 +102,13 @@ export function BaseTextInput(t0) {
|
||||
})) : cursorFiltered;
|
||||
const hasHighlights = filteredHighlights && filteredHighlights.length > 0;
|
||||
if (hasHighlights) {
|
||||
return <Box ref={cursorRef}><HighlightedInput text={renderedValue} highlights={filteredHighlights} />{showArgumentHint && <Text dimColor={true}>{value.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>}{children}</Box>;
|
||||
return <Box ref={cursorRef}><HighlightedInput text={renderedValue} highlights={filteredHighlights} />{showArgumentHint && <Text dimColor={true}>{props.value?.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>}{children}</Box>;
|
||||
}
|
||||
const T0 = Box;
|
||||
const T1 = Text;
|
||||
const t4 = "truncate-end";
|
||||
const t5 = showPlaceholder && props.placeholderElement ? props.placeholderElement : showPlaceholder && renderedPlaceholder ? <Ansi>{renderedPlaceholder}</Ansi> : <Ansi>{renderedValue}</Ansi>;
|
||||
const t6 = showArgumentHint && <Text dimColor={true}>{value.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>;
|
||||
const t6 = showArgumentHint && <Text dimColor={true}>{props.value?.endsWith(" ") ? "" : " "}{props.argumentHint}</Text>;
|
||||
let t7;
|
||||
if ($[4] !== T1 || $[5] !== children || $[6] !== props || $[7] !== t5 || $[8] !== t6) {
|
||||
t7 = <T1 wrap={t4} dimColor={props.dimColor}>{t5}{t6}{children}</T1>;
|
||||
|
||||
@@ -103,7 +103,7 @@ test('login picker shows the third-party platform option', async () => {
|
||||
expect(output).toContain('3rd-party platform')
|
||||
})
|
||||
|
||||
test('third-party provider branch opens the first-run provider manager', async () => {
|
||||
test('third-party provider branch opens the provider wizard', async () => {
|
||||
const output = await renderFrame(
|
||||
<ConsoleOAuthFlow
|
||||
initialStatus={{ state: 'platform_setup' }}
|
||||
@@ -111,9 +111,7 @@ test('third-party provider branch opens the first-run provider manager', async (
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(output).toContain('Set up provider')
|
||||
expect(output).toContain('Anthropic')
|
||||
expect(output).toContain('OpenAI')
|
||||
expect(output).toContain('Set up a provider profile')
|
||||
expect(output).toContain('OpenAI-compatible')
|
||||
expect(output).toContain('Ollama')
|
||||
expect(output).toContain('LM Studio')
|
||||
})
|
||||
|
||||
@@ -12,7 +12,7 @@ import { OAuthService } from '../services/oauth/index.js';
|
||||
import { getOauthAccountInfo, validateForceLoginOrg } from '../utils/auth.js';
|
||||
import { logError } from '../utils/log.js';
|
||||
import { getSettings_DEPRECATED } from '../utils/settings/settings.js';
|
||||
import { ProviderManager } from './ProviderManager.js';
|
||||
import { ProviderWizard } from '../commands/provider/provider.js';
|
||||
import { Select } from './CustomSelect/select.js';
|
||||
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js';
|
||||
import { Spinner } from './Spinner.js';
|
||||
@@ -450,17 +450,16 @@ function OAuthStatusMessage({
|
||||
|
||||
case 'platform_setup':
|
||||
return (
|
||||
<ProviderManager
|
||||
mode="first-run"
|
||||
<ProviderWizard
|
||||
onDone={result => {
|
||||
if (!result || result.action !== 'saved' || !result.message) {
|
||||
if (!result) {
|
||||
setOAuthStatus({ state: 'idle' })
|
||||
return
|
||||
}
|
||||
|
||||
setOAuthStatus({
|
||||
state: 'platform_setup_complete',
|
||||
message: result.message,
|
||||
message: result,
|
||||
})
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -285,7 +285,7 @@ export function Select(t0) {
|
||||
onChange,
|
||||
onCancel,
|
||||
onFocus,
|
||||
defaultFocusValue,
|
||||
focusValue: defaultFocusValue
|
||||
};
|
||||
$[7] = defaultFocusValue;
|
||||
$[8] = defaultValue;
|
||||
|
||||
@@ -35,11 +35,6 @@ export type UseSelectStateProps<T> = {
|
||||
*/
|
||||
onFocus?: (value: T) => void
|
||||
|
||||
/**
|
||||
* Initial value to focus when the component mounts.
|
||||
*/
|
||||
defaultFocusValue?: T
|
||||
|
||||
/**
|
||||
* Value to focus
|
||||
*/
|
||||
@@ -136,7 +131,6 @@ export function useSelectState<T>({
|
||||
onChange,
|
||||
onCancel,
|
||||
onFocus,
|
||||
defaultFocusValue,
|
||||
focusValue,
|
||||
}: UseSelectStateProps<T>): SelectState<T> {
|
||||
const [value, setValue] = useState<T | undefined>(defaultValue)
|
||||
@@ -144,7 +138,7 @@ export function useSelectState<T>({
|
||||
const navigation = useSelectNavigation<T>({
|
||||
visibleOptionCount,
|
||||
options,
|
||||
initialFocusValue: defaultFocusValue,
|
||||
initialFocusValue: undefined,
|
||||
onFocus,
|
||||
focusValue,
|
||||
})
|
||||
|
||||
@@ -252,24 +252,14 @@ function PromptInput({
|
||||
show: false
|
||||
});
|
||||
const [cursorOffset, setCursorOffset] = useState<number>(input.length);
|
||||
// Track the last input value set via internal handlers so external updates
|
||||
// (for example speech-to-text injection) can still move the cursor to end
|
||||
// without clobbering a pending internal keystroke during render.
|
||||
// Track the last input value set via internal handlers so we can detect
|
||||
// external input changes (e.g. speech-to-text injection) and move cursor to end.
|
||||
const lastInternalInputRef = React.useRef(input);
|
||||
const lastPropInputRef = React.useRef(input);
|
||||
React.useLayoutEffect(() => {
|
||||
if (input === lastPropInputRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
lastPropInputRef.current = input;
|
||||
if (input === lastInternalInputRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (input !== lastInternalInputRef.current) {
|
||||
// Input changed externally (not through any internal handler) — move cursor to end
|
||||
setCursorOffset(input.length);
|
||||
lastInternalInputRef.current = input;
|
||||
setCursorOffset(prev => prev === input.length ? prev : input.length);
|
||||
}, [input]);
|
||||
}
|
||||
// Wrap onInputChange to track internal changes before they trigger re-render
|
||||
const trackAndSetInput = React.useCallback((value: string) => {
|
||||
lastInternalInputRef.current = value;
|
||||
@@ -2211,7 +2201,7 @@ function PromptInput({
|
||||
multiline: true,
|
||||
onSubmit,
|
||||
onChange,
|
||||
value: isSearchingHistory && historyMatch ? getValueFromInput(typeof historyMatch === 'string' ? historyMatch : historyMatch.display) : input,
|
||||
value: historyMatch ? getValueFromInput(typeof historyMatch === 'string' ? historyMatch : historyMatch.display) : input,
|
||||
// History navigation is handled via TextInput props (onHistoryUp/onHistoryDown),
|
||||
// NOT via useKeybindings. This allows useTextInput's upOrHistoryUp/downOrHistoryDown
|
||||
// to try cursor movement first and only fall through to history navigation when the
|
||||
|
||||
@@ -6,7 +6,6 @@ import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
@@ -107,30 +106,19 @@ function createDeferred<T>(): {
|
||||
return { promise, resolve }
|
||||
}
|
||||
|
||||
function mockProviderProfilesModule(options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
}): void {
|
||||
function mockProviderProfilesModule(): void {
|
||||
mock.module('../utils/providerProfiles.js', () => ({
|
||||
addProviderProfile: options?.addProviderProfile ?? (() => null),
|
||||
addProviderProfile: () => null,
|
||||
applyActiveProviderProfileFromConfig: () => {},
|
||||
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
|
||||
getActiveProviderProfile: () => null,
|
||||
getProviderPresetDefaults: (preset: string) =>
|
||||
preset === 'ollama'
|
||||
? {
|
||||
provider: 'openai',
|
||||
name: 'Ollama',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'llama3.1:8b',
|
||||
apiKey: '',
|
||||
}
|
||||
: {
|
||||
getProviderPresetDefaults: () => ({
|
||||
provider: 'openai',
|
||||
name: 'Mock provider',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'mock-model',
|
||||
apiKey: '',
|
||||
},
|
||||
}),
|
||||
getProviderProfiles: () => [],
|
||||
setActiveProviderProfile: () => null,
|
||||
updateProviderProfile: () => null,
|
||||
@@ -140,27 +128,8 @@ function mockProviderProfilesModule(options?: {
|
||||
function mockProviderManagerDependencies(
|
||||
syncRead: () => string | undefined,
|
||||
asyncRead: () => Promise<string | undefined>,
|
||||
options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
hasLocalOllama?: () => Promise<boolean>
|
||||
listOllamaModels?: () => Promise<
|
||||
Array<{
|
||||
name: string
|
||||
sizeBytes?: number | null
|
||||
family?: string | null
|
||||
families?: string[]
|
||||
parameterSize?: string | null
|
||||
quantizationLevel?: string | null
|
||||
}>
|
||||
>
|
||||
},
|
||||
): void {
|
||||
mockProviderProfilesModule({ addProviderProfile: options?.addProviderProfile })
|
||||
|
||||
mock.module('../utils/providerDiscovery.js', () => ({
|
||||
hasLocalOllama: options?.hasLocalOllama ?? (async () => false),
|
||||
listOllamaModels: options?.listOllamaModels ?? (async () => []),
|
||||
}))
|
||||
mockProviderProfilesModule()
|
||||
|
||||
mock.module('../utils/githubModelsCredentials.js', () => ({
|
||||
clearGithubModelsToken: () => ({ success: true }),
|
||||
@@ -193,14 +162,9 @@ async function waitForFrameOutput(
|
||||
async function mountProviderManager(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: (result?: unknown) => void
|
||||
onDone: () => void
|
||||
}>,
|
||||
options?: {
|
||||
mode?: 'first-run' | 'manage'
|
||||
onDone?: (result?: unknown) => void
|
||||
},
|
||||
): Promise<{
|
||||
stdin: PassThrough
|
||||
getOutput: () => string
|
||||
dispose: () => Promise<void>
|
||||
}> {
|
||||
@@ -213,17 +177,14 @@ async function mountProviderManager(
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<KeybindingSetup>
|
||||
<ProviderManager
|
||||
mode={options?.mode ?? 'manage'}
|
||||
onDone={options?.onDone ?? (() => {})}
|
||||
mode="manage"
|
||||
onDone={() => {}}
|
||||
/>
|
||||
</KeybindingSetup>
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
return {
|
||||
stdin,
|
||||
getOutput,
|
||||
dispose: async () => {
|
||||
root.unmount()
|
||||
@@ -237,17 +198,14 @@ async function mountProviderManager(
|
||||
async function renderProviderManagerFrame(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: (result?: unknown) => void
|
||||
onDone: () => void
|
||||
}>,
|
||||
options?: {
|
||||
waitForOutput?: (output: string) => boolean
|
||||
timeoutMs?: number
|
||||
mode?: 'first-run' | 'manage'
|
||||
},
|
||||
): Promise<string> {
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: options?.mode,
|
||||
})
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
const output = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => {
|
||||
@@ -305,96 +263,6 @@ test('ProviderManager resolves GitHub virtual provider from async storage withou
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager first-run Ollama preset auto-detects installed models', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const onDone = mock(() => {})
|
||||
const addProviderProfile = mock((payload: {
|
||||
provider: string
|
||||
name: string
|
||||
baseUrl: string
|
||||
model: string
|
||||
apiKey?: string
|
||||
}) => ({
|
||||
id: 'provider_ollama',
|
||||
provider: payload.provider,
|
||||
name: payload.name,
|
||||
baseUrl: payload.baseUrl,
|
||||
model: payload.model,
|
||||
apiKey: payload.apiKey,
|
||||
}))
|
||||
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
addProviderProfile,
|
||||
hasLocalOllama: async () => true,
|
||||
listOllamaModels: async () => [
|
||||
{
|
||||
name: 'gemma4:31b-cloud',
|
||||
family: 'gemma',
|
||||
parameterSize: '31b',
|
||||
},
|
||||
{
|
||||
name: 'kimi-k2.5:cloud',
|
||||
family: 'kimi',
|
||||
parameterSize: '2.5b',
|
||||
},
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: 'first-run',
|
||||
onDone,
|
||||
})
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Set up provider') && frame.includes('Ollama'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(50)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
const modelFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame =>
|
||||
frame.includes('Choose an Ollama model') &&
|
||||
frame.includes('gemma4:31b-cloud') &&
|
||||
frame.includes('kimi-k2.5:cloud'),
|
||||
)
|
||||
|
||||
expect(modelFrame).toContain('Choose an Ollama model')
|
||||
expect(modelFrame).toContain('gemma4:31b-cloud')
|
||||
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => onDone.mock.calls.length > 0)
|
||||
|
||||
expect(addProviderProfile).toHaveBeenCalled()
|
||||
expect(addProviderProfile.mock.calls[0]?.[0]).toMatchObject({
|
||||
name: 'Ollama',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'gemma4:31b-cloud',
|
||||
})
|
||||
expect(onDone).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
action: 'saved',
|
||||
message: 'Provider configured: Ollama',
|
||||
}),
|
||||
)
|
||||
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
|
||||
@@ -3,7 +3,6 @@ import * as React from 'react'
|
||||
import { Box, Text } from '../ink.js'
|
||||
import { useKeybinding } from '../keybindings/useKeybinding.js'
|
||||
import type { ProviderProfile } from '../utils/config.js'
|
||||
import { hasLocalOllama, listOllamaModels } from '../utils/providerDiscovery.js'
|
||||
import {
|
||||
addProviderProfile,
|
||||
applyActiveProviderProfileFromConfig,
|
||||
@@ -16,10 +15,6 @@ import {
|
||||
type ProviderProfileInput,
|
||||
updateProviderProfile,
|
||||
} from '../utils/providerProfiles.js'
|
||||
import {
|
||||
rankOllamaModels,
|
||||
recommendOllamaModel,
|
||||
} from '../utils/providerRecommendation.js'
|
||||
import {
|
||||
clearGithubModelsToken,
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER,
|
||||
@@ -29,7 +24,7 @@ import {
|
||||
} from '../utils/githubModelsCredentials.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||
import { type OptionWithDescription, Select } from './CustomSelect/index.js'
|
||||
import { Select } from './CustomSelect/index.js'
|
||||
import { Pane } from './design-system/Pane.js'
|
||||
import TextInput from './TextInput.js'
|
||||
|
||||
@@ -47,7 +42,6 @@ type Props = {
|
||||
type Screen =
|
||||
| 'menu'
|
||||
| 'select-preset'
|
||||
| 'select-ollama-model'
|
||||
| 'form'
|
||||
| 'select-active'
|
||||
| 'select-edit'
|
||||
@@ -57,16 +51,6 @@ type DraftField = 'name' | 'baseUrl' | 'model' | 'apiKey'
|
||||
|
||||
type ProviderDraft = Record<DraftField, string>
|
||||
|
||||
type OllamaSelectionState =
|
||||
| { state: 'idle' }
|
||||
| { state: 'loading' }
|
||||
| {
|
||||
state: 'ready'
|
||||
options: OptionWithDescription<string>[]
|
||||
defaultValue?: string
|
||||
}
|
||||
| { state: 'unavailable'; message: string }
|
||||
|
||||
const FORM_STEPS: Array<{
|
||||
key: DraftField
|
||||
label: string
|
||||
@@ -226,9 +210,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const [statusMessage, setStatusMessage] = React.useState<string | undefined>()
|
||||
const [errorMessage, setErrorMessage] = React.useState<string | undefined>()
|
||||
const [ollamaSelection, setOllamaSelection] = React.useState<OllamaSelectionState>({
|
||||
state: 'idle',
|
||||
})
|
||||
|
||||
const currentStep = FORM_STEPS[formStepIndex] ?? FORM_STEPS[0]
|
||||
const currentStepKey = currentStep.key
|
||||
@@ -383,59 +364,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return null
|
||||
}
|
||||
|
||||
React.useEffect(() => {
|
||||
if (screen !== 'select-ollama-model') {
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
|
||||
void (async () => {
|
||||
const available = await hasLocalOllama(draft.baseUrl)
|
||||
if (!available) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Could not reach Ollama. Start Ollama first, or enter the endpoint manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels(draft.baseUrl)
|
||||
if (models.length === 0) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const ranked = rankOllamaModels(models, 'balanced')
|
||||
const recommended = recommendOllamaModel(models, 'balanced')
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'ready',
|
||||
defaultValue: recommended?.name ?? ranked[0]?.name,
|
||||
options: ranked.map(model => ({
|
||||
label: model.name,
|
||||
value: model.name,
|
||||
description: model.summary,
|
||||
})),
|
||||
})
|
||||
}
|
||||
})()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [draft.baseUrl, screen])
|
||||
|
||||
function startCreateFromPreset(preset: ProviderPreset): void {
|
||||
const defaults = getProviderPresetDefaults(preset)
|
||||
const nextDraft = {
|
||||
@@ -450,13 +378,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setFormStepIndex(0)
|
||||
setCursorOffset(nextDraft.name.length)
|
||||
setErrorMessage(undefined)
|
||||
|
||||
if (preset === 'ollama') {
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
setScreen('select-ollama-model')
|
||||
return
|
||||
}
|
||||
|
||||
setScreen('form')
|
||||
}
|
||||
|
||||
@@ -476,13 +397,13 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('form')
|
||||
}
|
||||
|
||||
function persistDraft(nextDraft: ProviderDraft = draft): void {
|
||||
function persistDraft(): void {
|
||||
const payload: ProviderProfileInput = {
|
||||
provider: draftProvider,
|
||||
name: nextDraft.name,
|
||||
baseUrl: nextDraft.baseUrl,
|
||||
model: nextDraft.model,
|
||||
apiKey: nextDraft.apiKey,
|
||||
name: draft.name,
|
||||
baseUrl: draft.baseUrl,
|
||||
model: draft.model,
|
||||
apiKey: draft.apiKey,
|
||||
}
|
||||
|
||||
const saved = editingProfileId
|
||||
@@ -525,83 +446,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('menu')
|
||||
}
|
||||
|
||||
function renderOllamaSelection(): React.ReactNode {
|
||||
if (ollamaSelection.state === 'loading' || ollamaSelection.state === 'idle') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Checking Ollama
|
||||
</Text>
|
||||
<Text dimColor>Looking for installed Ollama models...</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
if (ollamaSelection.state === 'unavailable') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Ollama setup
|
||||
</Text>
|
||||
<Text dimColor>{ollamaSelection.message}</Text>
|
||||
<Select
|
||||
options={[
|
||||
{
|
||||
value: 'manual',
|
||||
label: 'Enter manually',
|
||||
description: 'Fill in the base URL and model yourself',
|
||||
},
|
||||
{
|
||||
value: 'back',
|
||||
label: 'Back',
|
||||
description: 'Choose another provider preset',
|
||||
},
|
||||
]}
|
||||
onChange={value => {
|
||||
if (value === 'manual') {
|
||||
setFormStepIndex(0)
|
||||
setCursorOffset(draft.name.length)
|
||||
setScreen('form')
|
||||
return
|
||||
}
|
||||
setScreen('select-preset')
|
||||
}}
|
||||
onCancel={() => setScreen('select-preset')}
|
||||
visibleOptionCount={2}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Choose an Ollama model
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
Pick one of the installed Ollama models to save into a local provider
|
||||
profile.
|
||||
</Text>
|
||||
<Select
|
||||
options={ollamaSelection.options}
|
||||
defaultValue={ollamaSelection.defaultValue}
|
||||
defaultFocusValue={ollamaSelection.defaultValue}
|
||||
inlineDescriptions
|
||||
visibleOptionCount={Math.min(8, ollamaSelection.options.length)}
|
||||
onChange={value => {
|
||||
const nextDraft = {
|
||||
...draft,
|
||||
model: value,
|
||||
}
|
||||
setDraft(nextDraft)
|
||||
persistDraft(nextDraft)
|
||||
}}
|
||||
onCancel={() => setScreen('select-preset')}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function handleFormSubmit(value: string): void {
|
||||
const trimmed = value.trim()
|
||||
|
||||
@@ -626,7 +470,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return
|
||||
}
|
||||
|
||||
persistDraft(nextDraft)
|
||||
persistDraft()
|
||||
}
|
||||
|
||||
function handleBackFromForm(): void {
|
||||
@@ -979,9 +823,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
case 'select-preset':
|
||||
content = renderPresetSelection()
|
||||
break
|
||||
case 'select-ollama-model':
|
||||
content = renderOllamaSelection()
|
||||
break
|
||||
case 'form':
|
||||
content = renderForm()
|
||||
break
|
||||
|
||||
@@ -7,8 +7,6 @@
|
||||
|
||||
import { isLocalProviderUrl } from '../services/api/providerConfig.js'
|
||||
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
|
||||
import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
|
||||
import { parseUserSpecifiedModel } from '../utils/model/model.js'
|
||||
|
||||
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
|
||||
|
||||
@@ -87,7 +85,6 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
||||
const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true'
|
||||
const useMistral = process.env.CLAUDE_CODE_USE_MISTRAL === '1' || process.env.CLAUDE_CODE_USE_MISTRAL === 'true'
|
||||
|
||||
if (useGemini) {
|
||||
const model = process.env.GEMINI_MODEL || 'gemini-2.0-flash'
|
||||
@@ -95,12 +92,6 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
return { name: 'Google Gemini', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useMistral) {
|
||||
const model = process.env.MISTRAL_MODEL || 'devstral-latest'
|
||||
const baseUrl = process.env.MISTRAL_BASE_URL || 'https://api.mistral.ai/v1'
|
||||
return { name: 'Mistral', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useGithub) {
|
||||
const model = process.env.OPENAI_MODEL || 'github:copilot'
|
||||
const baseUrl =
|
||||
@@ -148,11 +139,9 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
return { name, model: displayModel, baseUrl, isLocal }
|
||||
}
|
||||
|
||||
// Default: Anthropic - check settings.model first, then env vars
|
||||
const settings = getSettings_DEPRECATED() || {}
|
||||
const modelSetting = settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
|
||||
const resolvedModel = parseUserSpecifiedModel(modelSetting)
|
||||
return { name: 'Anthropic', model: resolvedModel, baseUrl: 'https://api.anthropic.com', isLocal: false }
|
||||
// Default: Anthropic
|
||||
const model = process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
|
||||
return { name: 'Anthropic', model, baseUrl: 'https://api.anthropic.com', isLocal: false }
|
||||
}
|
||||
|
||||
// ─── Box drawing ──────────────────────────────────────────────────────────────
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { expect, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import TextInput from './TextInput.js'
|
||||
import VimTextInput from './VimTextInput.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
function DelayedControlledTextInput(): React.ReactNode {
|
||||
const [value, setValue] = React.useState('')
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const valueTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
const offsetTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
React.useEffect(() => {
|
||||
return () => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
}
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<AppStateProvider>
|
||||
<TextInput
|
||||
value={value}
|
||||
onChange={nextValue => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
valueTimerRef.current = setTimeout(() => {
|
||||
setValue(nextValue)
|
||||
}, 200)
|
||||
}}
|
||||
onSubmit={() => {}}
|
||||
placeholder="Type here..."
|
||||
columns={60}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={nextOffset => {
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
offsetTimerRef.current = setTimeout(() => {
|
||||
setCursorOffset(nextOffset)
|
||||
}, 200)
|
||||
}}
|
||||
focus
|
||||
showCursor
|
||||
multiline
|
||||
/>
|
||||
</AppStateProvider>
|
||||
)
|
||||
}
|
||||
|
||||
function DelayedControlledVimTextInput(): React.ReactNode {
|
||||
const [value, setValue] = React.useState('')
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const valueTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
const offsetTimerRef = React.useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
React.useEffect(() => {
|
||||
return () => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
}
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<AppStateProvider>
|
||||
<VimTextInput
|
||||
value={value}
|
||||
onChange={nextValue => {
|
||||
if (valueTimerRef.current) {
|
||||
clearTimeout(valueTimerRef.current)
|
||||
}
|
||||
valueTimerRef.current = setTimeout(() => {
|
||||
setValue(nextValue)
|
||||
}, 200)
|
||||
}}
|
||||
onSubmit={() => {}}
|
||||
placeholder="Type here..."
|
||||
columns={60}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={nextOffset => {
|
||||
if (offsetTimerRef.current) {
|
||||
clearTimeout(offsetTimerRef.current)
|
||||
}
|
||||
offsetTimerRef.current = setTimeout(() => {
|
||||
setCursorOffset(nextOffset)
|
||||
}, 200)
|
||||
}}
|
||||
initialMode="INSERT"
|
||||
focus
|
||||
showCursor
|
||||
multiline
|
||||
/>
|
||||
</AppStateProvider>
|
||||
)
|
||||
}
|
||||
|
||||
test('TextInput renders typed characters before delayed parent value commits', async () => {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(<DelayedControlledTextInput />)
|
||||
|
||||
await Bun.sleep(50)
|
||||
stdin.write('a')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('b')
|
||||
await Bun.sleep(25)
|
||||
|
||||
const output = stripAnsi(extractLastFrame(getOutput()))
|
||||
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
|
||||
expect(output).toContain('ab')
|
||||
expect(output).not.toContain('Type here...')
|
||||
})
|
||||
|
||||
test('VimTextInput preserves rapid typed characters before delayed parent value commits', async () => {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(<DelayedControlledVimTextInput />)
|
||||
|
||||
await Bun.sleep(50)
|
||||
stdin.write('a')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('s')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('d')
|
||||
await Bun.sleep(25)
|
||||
stdin.write('f')
|
||||
await Bun.sleep(25)
|
||||
|
||||
const output = stripAnsi(extractLastFrame(getOutput()))
|
||||
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
|
||||
expect(output).toContain('asdf')
|
||||
expect(output).not.toContain('Type here...')
|
||||
})
|
||||
@@ -1,161 +1,113 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
import { describe, expect, it, mock } from 'bun:test'
|
||||
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot, Text, useTheme } from '../ink.js'
|
||||
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import { ThemeProvider } from './design-system/ThemeProvider.js'
|
||||
|
||||
mock.module('./StructuredDiff.js', () => ({
|
||||
StructuredDiff: function StructuredDiffPreview(): React.ReactNode {
|
||||
const [theme] = useTheme()
|
||||
return <Text>{`Preview theme: ${theme}`}</Text>
|
||||
},
|
||||
}))
|
||||
|
||||
mock.module('./StructuredDiff/colorDiff.js', () => ({
|
||||
getColorModuleUnavailableReason: () => 'env',
|
||||
getSyntaxTheme: () => null,
|
||||
}))
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
// We can't fully render ThemePicker due to complex dependencies
|
||||
// But we can test the theme options generation logic
|
||||
describe('ThemePicker', () => {
|
||||
describe('theme options', () => {
|
||||
it('generates correct theme options without AUTO_THEME feature flag', () => {
|
||||
// Since we can't easily mock bun:bundle, test the options structure
|
||||
// The real test would require integration testing
|
||||
const expectedOptions = [
|
||||
{ label: "Dark mode", value: "dark" },
|
||||
{ label: "Light mode", value: "light" },
|
||||
{ label: "Dark mode (colorblind-friendly)", value: "dark-daltonized" },
|
||||
{ label: "Light mode (colorblind-friendly)", value: "light-daltonized" },
|
||||
{ label: "Dark mode (ANSI colors only)", value: "dark-ansi" },
|
||||
{ label: "Light mode (ANSI colors only)", value: "light-ansi" },
|
||||
]
|
||||
expect(expectedOptions.length).toBe(6)
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
timeoutMs = 2000,
|
||||
): Promise<void> {
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for ThemePicker test condition')
|
||||
}
|
||||
|
||||
async function waitForFrame(
|
||||
getOutput: () => string,
|
||||
predicate: (frame: string) => boolean,
|
||||
): Promise<string> {
|
||||
let frame = ''
|
||||
|
||||
await waitForCondition(() => {
|
||||
frame = stripAnsi(extractLastFrame(getOutput()))
|
||||
return predicate(frame)
|
||||
it('includes auto theme when AUTO_THEME feature is enabled', () => {
|
||||
// Test the structure when auto is present
|
||||
const optionsWithAuto = [
|
||||
{ label: "Auto (match terminal)", value: "auto" },
|
||||
{ label: "Dark mode", value: "dark" },
|
||||
]
|
||||
expect(optionsWithAuto[0].value).toBe('auto')
|
||||
})
|
||||
})
|
||||
|
||||
return frame
|
||||
}
|
||||
describe('handleRowFocus callback', () => {
|
||||
it('setPreviewTheme is called with theme setting', () => {
|
||||
const setPreviewTheme = mock()
|
||||
const handleRowFocus = (setting: string) => setPreviewTheme(setting)
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
test('updates the preview when keyboard focus moves to another theme', async () => {
|
||||
const { ThemePicker } = await import('./ThemePicker.js')
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<KeybindingSetup>
|
||||
<ThemeProvider initialState="dark">
|
||||
<ThemePicker onThemeSelect={() => {}} />
|
||||
</ThemeProvider>
|
||||
</KeybindingSetup>
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
try {
|
||||
const initialFrame = await waitForFrame(
|
||||
getOutput,
|
||||
frame => frame.includes('Preview theme: dark'),
|
||||
)
|
||||
expect(initialFrame).toContain('Preview theme: dark')
|
||||
|
||||
stdin.write('j')
|
||||
|
||||
const updatedFrame = await waitForFrame(
|
||||
getOutput,
|
||||
frame => frame.includes('Preview theme: light'),
|
||||
)
|
||||
expect(updatedFrame).toContain('Preview theme: light')
|
||||
} finally {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(0)
|
||||
}
|
||||
handleRowFocus('dark')
|
||||
expect(setPreviewTheme).toHaveBeenCalledWith('dark')
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleSelect callback', () => {
|
||||
it('calls savePreview and onThemeSelect', () => {
|
||||
const savePreview = mock()
|
||||
const onThemeSelect = mock()
|
||||
const handleSelect = (setting: string) => {
|
||||
savePreview()
|
||||
onThemeSelect(setting)
|
||||
}
|
||||
|
||||
handleSelect('light')
|
||||
expect(savePreview).toHaveBeenCalled()
|
||||
expect(onThemeSelect).toHaveBeenCalledWith('light')
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleCancel callback', () => {
|
||||
it('calls cancelPreview and gracefulShutdown when not skipExitHandling', () => {
|
||||
const cancelPreview = mock()
|
||||
const gracefulShutdown = mock()
|
||||
const handleCancel = (skipExitHandling: boolean, onCancelProp?: () => void) => {
|
||||
cancelPreview()
|
||||
if (skipExitHandling) {
|
||||
onCancelProp?.()
|
||||
} else {
|
||||
gracefulShutdown(0)
|
||||
}
|
||||
}
|
||||
|
||||
handleCancel(false)
|
||||
expect(cancelPreview).toHaveBeenCalled()
|
||||
expect(gracefulShutdown).toHaveBeenCalledWith(0)
|
||||
})
|
||||
|
||||
it('calls onCancelProp when skipExitHandling is true', () => {
|
||||
const cancelPreview = mock()
|
||||
const onCancelProp = mock()
|
||||
const handleCancel = (skipExitHandling: boolean, onCancelProp?: () => void) => {
|
||||
cancelPreview()
|
||||
if (skipExitHandling) {
|
||||
onCancelProp?.()
|
||||
}
|
||||
}
|
||||
|
||||
handleCancel(true, onCancelProp)
|
||||
expect(cancelPreview).toHaveBeenCalled()
|
||||
expect(onCancelProp).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('syntax hint logic', () => {
|
||||
it('shows disabled hint when syntax highlighting is disabled', () => {
|
||||
const syntaxHighlightingDisabled = true
|
||||
const syntaxToggleShortcut = 'Ctrl+T'
|
||||
|
||||
const hint = syntaxHighlightingDisabled
|
||||
? `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
|
||||
: `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
|
||||
|
||||
expect(hint).toContain('disabled')
|
||||
})
|
||||
|
||||
it('shows enabled hint when syntax highlighting is active', () => {
|
||||
const syntaxHighlightingDisabled = false
|
||||
const syntaxToggleShortcut = 'Ctrl+T'
|
||||
|
||||
const hint = !syntaxHighlightingDisabled
|
||||
? `Syntax highlighting enabled (${syntaxToggleShortcut} to disable)`
|
||||
: `Syntax highlighting disabled (${syntaxToggleShortcut} to enable)`
|
||||
|
||||
expect(hint).toContain('enabled')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -2,7 +2,7 @@ import { c as _c } from "react-compiler-runtime";
|
||||
import { feature } from 'bun:bundle';
|
||||
import chalk from 'chalk';
|
||||
import { mkdir } from 'fs/promises';
|
||||
import { basename, join } from 'path';
|
||||
import { join } from 'path';
|
||||
import * as React from 'react';
|
||||
import { use, useEffect, useState } from 'react';
|
||||
import { getOriginalCwd } from '../../bootstrap/state.js';
|
||||
@@ -24,7 +24,6 @@ import { projectIsInGitRepo } from '../../utils/memory/versions.js';
|
||||
import { updateSettingsForSource } from '../../utils/settings/settings.js';
|
||||
import { Select } from '../CustomSelect/index.js';
|
||||
import { ListItem } from '../design-system/ListItem.js';
|
||||
import { getProjectMemoryPathForSelector } from './memoryFileSelectorPaths.js';
|
||||
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const teamMemPaths = feature('TEAMMEM') ? require('../../memdir/teamMemPaths.js') as typeof import('../../memdir/teamMemPaths.js') : null;
|
||||
@@ -49,10 +48,8 @@ export function MemoryFileSelector(t0) {
|
||||
onCancel
|
||||
} = t0;
|
||||
const existingMemoryFiles = use(getMemoryFiles());
|
||||
const originalCwd = getOriginalCwd();
|
||||
const userMemoryPath = join(getClaudeConfigHomeDir(), "CLAUDE.md");
|
||||
const projectMemoryPath = getProjectMemoryPathForSelector(existingMemoryFiles, originalCwd);
|
||||
const projectMemoryFileName = basename(projectMemoryPath);
|
||||
const projectMemoryPath = join(getOriginalCwd(), "CLAUDE.md");
|
||||
const hasUserMemory = existingMemoryFiles.some(f => f.path === userMemoryPath);
|
||||
const hasProjectMemory = existingMemoryFiles.some(f_0 => f_0.path === projectMemoryPath);
|
||||
const allMemoryFiles = [...existingMemoryFiles.filter(_temp).map(_temp2), ...(hasUserMemory ? [] : [{
|
||||
@@ -88,12 +85,12 @@ export function MemoryFileSelector(t0) {
|
||||
}
|
||||
}
|
||||
let description;
|
||||
const isGit = projectIsInGitRepo(originalCwd);
|
||||
const isGit = projectIsInGitRepo(getOriginalCwd());
|
||||
if (file.type === "User" && !file.isNested) {
|
||||
description = "Saved in ~/.claude/CLAUDE.md";
|
||||
} else {
|
||||
if (file.type === "Project" && !file.isNested && file.path === projectMemoryPath) {
|
||||
description = `${isGit ? "Checked in at" : "Saved in"} ./${projectMemoryFileName}`;
|
||||
description = `${isGit ? "Checked in at" : "Saved in"} ./CLAUDE.md`;
|
||||
} else {
|
||||
if (file.parent) {
|
||||
description = "@-imported";
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { join } from 'node:path'
|
||||
|
||||
import type { MemoryFileInfo } from '../../utils/claudemd.js'
|
||||
import { getProjectMemoryPathForSelector } from './memoryFileSelectorPaths.js'
|
||||
|
||||
function projectFile(path: string): MemoryFileInfo {
|
||||
return {
|
||||
path,
|
||||
type: 'Project',
|
||||
content: '',
|
||||
}
|
||||
}
|
||||
|
||||
describe('getProjectMemoryPathForSelector', () => {
|
||||
test('uses the loaded repo-level AGENTS.md from a nested cwd', () => {
|
||||
const repoDir = '/repo'
|
||||
const nestedDir = join(repoDir, 'packages', 'app')
|
||||
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[projectFile(join(repoDir, 'AGENTS.md'))],
|
||||
nestedDir,
|
||||
),
|
||||
).toBe(join(repoDir, 'AGENTS.md'))
|
||||
})
|
||||
|
||||
test('uses the loaded repo-level CLAUDE.md fallback from a nested cwd', () => {
|
||||
const repoDir = '/repo'
|
||||
const nestedDir = join(repoDir, 'packages', 'app')
|
||||
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[projectFile(join(repoDir, 'CLAUDE.md'))],
|
||||
nestedDir,
|
||||
),
|
||||
).toBe(join(repoDir, 'CLAUDE.md'))
|
||||
})
|
||||
|
||||
test('prefers the closest loaded ancestor instruction file', () => {
|
||||
const repoDir = '/repo'
|
||||
const nestedProjectDir = join(repoDir, 'packages', 'app')
|
||||
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[
|
||||
projectFile(join(repoDir, 'AGENTS.md')),
|
||||
projectFile(join(nestedProjectDir, 'CLAUDE.md')),
|
||||
],
|
||||
join(nestedProjectDir, 'src'),
|
||||
),
|
||||
).toBe(join(nestedProjectDir, 'CLAUDE.md'))
|
||||
})
|
||||
|
||||
test('defaults to a new AGENTS.md in the current cwd when no project file is loaded', () => {
|
||||
expect(getProjectMemoryPathForSelector([], '/repo/packages/app')).toBe(
|
||||
'/repo/packages/app/AGENTS.md',
|
||||
)
|
||||
})
|
||||
|
||||
test('ignores loaded project instruction files outside the current cwd ancestry', () => {
|
||||
expect(
|
||||
getProjectMemoryPathForSelector(
|
||||
[projectFile('/other-worktree/AGENTS.md')],
|
||||
'/repo/packages/app',
|
||||
),
|
||||
).toBe('/repo/packages/app/AGENTS.md')
|
||||
})
|
||||
})
|
||||
@@ -1,34 +0,0 @@
|
||||
import { basename, join } from 'path'
|
||||
|
||||
import type { MemoryFileInfo } from '../../utils/claudemd.js'
|
||||
import {
|
||||
findProjectInstructionFilePathInAncestors,
|
||||
isProjectInstructionFileName,
|
||||
PRIMARY_PROJECT_INSTRUCTION_FILE,
|
||||
} from '../../utils/projectInstructions.js'
|
||||
|
||||
function isLoadedProjectInstructionFile(file: MemoryFileInfo): boolean {
|
||||
return (
|
||||
file.type === 'Project' &&
|
||||
file.parent === undefined &&
|
||||
isProjectInstructionFileName(basename(file.path))
|
||||
)
|
||||
}
|
||||
|
||||
export function getProjectMemoryPathForSelector(
|
||||
existingMemoryFiles: MemoryFileInfo[],
|
||||
cwd: string,
|
||||
): string {
|
||||
const loadedProjectInstructionPaths = new Set(
|
||||
existingMemoryFiles
|
||||
.filter(isLoadedProjectInstructionFile)
|
||||
.map(file => file.path),
|
||||
)
|
||||
|
||||
return (
|
||||
findProjectInstructionFilePathInAncestors(
|
||||
cwd,
|
||||
path => loadedProjectInstructionPaths.has(path),
|
||||
) ?? join(cwd, PRIMARY_PROJECT_INSTRUCTION_FILE)
|
||||
)
|
||||
}
|
||||
@@ -32,7 +32,7 @@ export function optionForPermissionSaveDestination(saveDestination: EditableSett
|
||||
case 'userSettings':
|
||||
return {
|
||||
label: 'User settings',
|
||||
description: `Saved in ~/.openclaude/settings.json`,
|
||||
description: `Saved in at ~/.claude/settings.json`,
|
||||
value: saveDestination
|
||||
};
|
||||
}
|
||||
|
||||
@@ -33,14 +33,14 @@ export const IMAGE_TARGET_RAW_SIZE = (API_IMAGE_MAX_BASE64_SIZE * 3) / 4 // 3.75
|
||||
*
|
||||
* Note: The API internally resizes images larger than 1568px (source:
|
||||
* encoding/full_encoding.py), but this is handled server-side and doesn't
|
||||
* cause errors. These client-side limits (1568px) are slightly larger to
|
||||
* cause errors. These client-side limits (2000px) are slightly larger to
|
||||
* preserve quality when beneficial.
|
||||
*
|
||||
* The API_IMAGE_MAX_BASE64_SIZE (5MB) is the actual hard limit that causes
|
||||
* API errors if exceeded.
|
||||
*/
|
||||
export const IMAGE_MAX_WIDTH = 1568
|
||||
export const IMAGE_MAX_HEIGHT = 1568
|
||||
export const IMAGE_MAX_WIDTH = 2000
|
||||
export const IMAGE_MAX_HEIGHT = 2000
|
||||
|
||||
// =============================================================================
|
||||
// PDF LIMITS
|
||||
|
||||
@@ -2,11 +2,8 @@ import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { getSystemPrompt, DEFAULT_AGENT_PROMPT } from './prompts.js'
|
||||
import { CLI_SYSPROMPT_PREFIXES, getCLISyspromptPrefix } from './system.js'
|
||||
import { CLAUDE_CODE_GUIDE_AGENT } from '../tools/AgentTool/built-in/claudeCodeGuideAgent.js'
|
||||
import { GENERAL_PURPOSE_AGENT } from '../tools/AgentTool/built-in/generalPurposeAgent.js'
|
||||
import { EXPLORE_AGENT } from '../tools/AgentTool/built-in/exploreAgent.js'
|
||||
import { PLAN_AGENT } from '../tools/AgentTool/built-in/planAgent.js'
|
||||
import { STATUSLINE_SETUP_AGENT } from '../tools/AgentTool/built-in/statuslineSetup.js'
|
||||
|
||||
const originalSimpleEnv = process.env.CLAUDE_CODE_SIMPLE
|
||||
|
||||
@@ -16,12 +13,10 @@ afterEach(() => {
|
||||
|
||||
test('CLI identity prefixes describe OpenClaude instead of Claude Code', () => {
|
||||
expect(getCLISyspromptPrefix()).toContain('OpenClaude')
|
||||
expect(getCLISyspromptPrefix()).not.toContain('Claude Code')
|
||||
expect(getCLISyspromptPrefix()).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
for (const prefix of CLI_SYSPROMPT_PREFIXES) {
|
||||
expect(prefix).toContain('OpenClaude')
|
||||
expect(prefix).not.toContain('Claude Code')
|
||||
expect(prefix).not.toContain("Anthropic's official CLI for Claude")
|
||||
}
|
||||
})
|
||||
@@ -32,53 +27,22 @@ test('simple mode identity describes OpenClaude instead of Claude Code', async (
|
||||
const prompt = await getSystemPrompt([], 'gpt-4o')
|
||||
|
||||
expect(prompt[0]).toContain('OpenClaude')
|
||||
expect(prompt[0]).not.toContain('Claude Code')
|
||||
expect(prompt[0]).not.toContain("Anthropic's official CLI for Claude")
|
||||
})
|
||||
|
||||
test('built-in agent prompts describe OpenClaude instead of Claude Code', () => {
|
||||
expect(DEFAULT_AGENT_PROMPT).toContain('OpenClaude')
|
||||
expect(DEFAULT_AGENT_PROMPT).not.toContain('Claude Code')
|
||||
expect(DEFAULT_AGENT_PROMPT).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const generalPrompt = GENERAL_PURPOSE_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(generalPrompt).toContain('OpenClaude')
|
||||
expect(generalPrompt).not.toContain('Claude Code')
|
||||
expect(generalPrompt).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const explorePrompt = EXPLORE_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(explorePrompt).toContain('OpenClaude')
|
||||
expect(explorePrompt).not.toContain('Claude Code')
|
||||
expect(explorePrompt).not.toContain("Anthropic's official CLI for Claude")
|
||||
|
||||
const planPrompt = PLAN_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(planPrompt).toContain('OpenClaude')
|
||||
expect(planPrompt).not.toContain('Claude Code')
|
||||
|
||||
const statuslinePrompt = STATUSLINE_SETUP_AGENT.getSystemPrompt({
|
||||
toolUseContext: { options: {} as never },
|
||||
})
|
||||
expect(statuslinePrompt).toContain('OpenClaude')
|
||||
expect(statuslinePrompt).not.toContain('Claude Code')
|
||||
|
||||
const guidePrompt = CLAUDE_CODE_GUIDE_AGENT.getSystemPrompt({
|
||||
toolUseContext: {
|
||||
options: {
|
||||
commands: [],
|
||||
agentDefinitions: { activeAgents: [] },
|
||||
mcpClients: [],
|
||||
} as never,
|
||||
},
|
||||
})
|
||||
expect(guidePrompt).toContain('OpenClaude')
|
||||
expect(guidePrompt).toContain('You are the OpenClaude guide agent.')
|
||||
expect(guidePrompt).toContain('**OpenClaude** (the CLI tool)')
|
||||
expect(guidePrompt).not.toContain('You are the Claude guide agent.')
|
||||
expect(guidePrompt).not.toContain('**Claude Code** (the CLI tool)')
|
||||
})
|
||||
|
||||
@@ -214,7 +214,7 @@ function getSimpleDoingTasksSection(): string {
|
||||
]
|
||||
|
||||
const userHelpSubitems = [
|
||||
`/help: Get help with using OpenClaude`,
|
||||
`/help: Get help with using Claude Code`,
|
||||
`To give feedback, users should ${MACRO.ISSUES_EXPLAINER}`,
|
||||
]
|
||||
|
||||
@@ -242,7 +242,7 @@ function getSimpleDoingTasksSection(): string {
|
||||
: []),
|
||||
...(process.env.USER_TYPE === 'ant'
|
||||
? [
|
||||
`If the user reports a bug, slowness, or unexpected behavior with OpenClaude itself (as opposed to asking you to fix their own code), recommend the appropriate slash command: /issue for model-related problems (odd outputs, wrong tool choices, hallucinations, refusals), or /share to upload the full session transcript for product bugs, crashes, slowness, or general issues. Only recommend these when the user is describing a problem with OpenClaude.`,
|
||||
`If the user reports a bug, slowness, or unexpected behavior with Claude Code itself (as opposed to asking you to fix their own code), recommend the appropriate slash command: /issue for model-related problems (odd outputs, wrong tool choices, hallucinations, refusals), or /share to upload the full session transcript for product bugs, crashes, slowness, or general issues. Only recommend these when the user is describing a problem with Claude Code.`,
|
||||
]
|
||||
: []),
|
||||
`If the user asks for help or wants to give feedback inform them of the following:`,
|
||||
@@ -449,7 +449,7 @@ export async function getSystemPrompt(
|
||||
): Promise<string[]> {
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_SIMPLE)) {
|
||||
return [
|
||||
`You are OpenClaude, an open-source coding agent and CLI.\n\nCWD: ${getCwd()}\nDate: ${getSessionStartDate()}`,
|
||||
`You are OpenClaude, an open-source fork of Claude Code.\n\nCWD: ${getCwd()}\nDate: ${getSessionStartDate()}`,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -696,10 +696,10 @@ export async function computeSimpleEnvInfo(
|
||||
: `The most recent Claude model family is Claude 4.5/4.6. Model IDs — Opus 4.6: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.opus}', Sonnet 4.6: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.sonnet}', Haiku 4.5: '${CLAUDE_4_5_OR_4_6_MODEL_IDS.haiku}'. When building AI applications, default to the latest and most capable Claude models.`,
|
||||
process.env.USER_TYPE === 'ant' && isUndercover()
|
||||
? null
|
||||
: `OpenClaude is available as a CLI in the terminal and can be used across local development environments and IDE workflows.`,
|
||||
: `Claude Code is available as a CLI in the terminal, desktop app (Mac/Windows), web app (claude.ai/code), and IDE extensions (VS Code, JetBrains).`,
|
||||
process.env.USER_TYPE === 'ant' && isUndercover()
|
||||
? null
|
||||
: `Fast mode for OpenClaude uses the same ${FRONTIER_MODEL_NAME} model with faster output. It does NOT switch to a different model. It can be toggled with /fast.`,
|
||||
: `Fast mode for Claude Code uses the same ${FRONTIER_MODEL_NAME} model with faster output. It does NOT switch to a different model. It can be toggled with /fast.`,
|
||||
].filter(item => item !== null)
|
||||
|
||||
return [
|
||||
@@ -755,7 +755,7 @@ export function getUnameSR(): string {
|
||||
return `${osType()} ${osRelease()}`
|
||||
}
|
||||
|
||||
export const DEFAULT_AGENT_PROMPT = `You are an agent for OpenClaude, an open-source coding agent and CLI. Given the user's message, you should use the tools available to complete the task. Complete the task fully—don't gold-plate, but don't leave it half-done. When you complete the task, respond with a concise report covering what was done and any key findings — the caller will relay this to the user, so it only needs the essentials.`
|
||||
export const DEFAULT_AGENT_PROMPT = `You are an agent for OpenClaude, an open-source fork of Claude Code. Given the user's message, you should use the tools available to complete the task. Complete the task fully—don't gold-plate, but don't leave it half-done. When you complete the task, respond with a concise report covering what was done and any key findings — the caller will relay this to the user, so it only needs the essentials.`
|
||||
|
||||
export async function enhanceSystemPromptWithEnvDetails(
|
||||
existingSystemPrompt: string[],
|
||||
|
||||
@@ -8,11 +8,11 @@ import { getAPIProvider } from '../utils/model/providers.js'
|
||||
import { getWorkload } from '../utils/workloadContext.js'
|
||||
|
||||
const DEFAULT_PREFIX =
|
||||
`You are OpenClaude, an open-source coding agent and CLI.`
|
||||
`You are OpenClaude, an open-source fork of Claude Code.`
|
||||
const AGENT_SDK_CLAUDE_CODE_PRESET_PREFIX =
|
||||
`You are OpenClaude, an open-source coding agent and CLI running within the Claude Agent SDK.`
|
||||
`You are OpenClaude, an open-source fork of Claude Code, running within the Claude Agent SDK.`
|
||||
const AGENT_SDK_PREFIX =
|
||||
`You are OpenClaude, built on the Claude Agent SDK.`
|
||||
`You are a Claude agent running in OpenClaude, built on the Claude Agent SDK.`
|
||||
|
||||
const CLI_SYSPROMPT_PREFIX_VALUES = [
|
||||
DEFAULT_PREFIX,
|
||||
|
||||
@@ -181,7 +181,7 @@ function formatCost(cost: number, maxDecimalPlaces: number = 4): string {
|
||||
function formatModelUsage(): string {
|
||||
const modelUsageMap = getModelUsage()
|
||||
if (Object.keys(modelUsageMap).length === 0) {
|
||||
return 'Usage: 0 input, 0 output'
|
||||
return 'Usage: 0 input, 0 output, 0 cache read, 0 cache write'
|
||||
}
|
||||
|
||||
// Accumulate usage by short name
|
||||
@@ -211,19 +211,15 @@ function formatModelUsage(): string {
|
||||
|
||||
let result = 'Usage by model:'
|
||||
for (const [shortName, usage] of Object.entries(usageByShortName)) {
|
||||
let usageString =
|
||||
const usageString =
|
||||
` ${formatNumber(usage.inputTokens)} input, ` +
|
||||
`${formatNumber(usage.outputTokens)} output`
|
||||
if (usage.cacheReadInputTokens > 0) {
|
||||
usageString += `, ${formatNumber(usage.cacheReadInputTokens)} cache read`
|
||||
}
|
||||
if (usage.cacheCreationInputTokens > 0) {
|
||||
usageString += `, ${formatNumber(usage.cacheCreationInputTokens)} cache write`
|
||||
}
|
||||
if (usage.webSearchRequests > 0) {
|
||||
usageString += `, ${formatNumber(usage.webSearchRequests)} web search`
|
||||
}
|
||||
usageString += ` (${formatCost(usage.costUSD)})`
|
||||
`${formatNumber(usage.outputTokens)} output, ` +
|
||||
`${formatNumber(usage.cacheReadInputTokens)} cache read, ` +
|
||||
`${formatNumber(usage.cacheCreationInputTokens)} cache write` +
|
||||
(usage.webSearchRequests > 0
|
||||
? `, ${formatNumber(usage.webSearchRequests)} web search`
|
||||
: '') +
|
||||
` (${formatCost(usage.costUSD)})`
|
||||
result += `\n` + `${shortName}:`.padStart(21) + usageString
|
||||
}
|
||||
return result
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useCallback, useEffect, useSyncExternalStore } from 'react'
|
||||
import { useCallback, useEffect } from 'react'
|
||||
import type { Command } from '../commands.js'
|
||||
import { useNotifications } from '../context/notifications.js'
|
||||
import {
|
||||
@@ -7,11 +7,6 @@ import {
|
||||
} from '../services/analytics/index.js'
|
||||
import { reinitializeLspServerManager } from '../services/lsp/manager.js'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import {
|
||||
getPluginCommandsState,
|
||||
setPluginCommandsState,
|
||||
subscribePluginCommands,
|
||||
} from '../state/pluginCommandsStore.js'
|
||||
import type { AgentDefinition } from '../tools/AgentTool/loadAgentsDir.js'
|
||||
import { count } from '../utils/array.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
@@ -44,11 +39,6 @@ export function useManagePlugins({
|
||||
}: {
|
||||
enabled?: boolean
|
||||
} = {}) {
|
||||
const pluginCommands = useSyncExternalStore(
|
||||
subscribePluginCommands,
|
||||
getPluginCommandsState,
|
||||
getPluginCommandsState,
|
||||
)
|
||||
const setAppState = useSetAppState()
|
||||
const needsRefresh = useAppState(s => s.plugins.needsRefresh)
|
||||
const { addNotification } = useNotifications()
|
||||
@@ -84,7 +74,6 @@ export function useManagePlugins({
|
||||
|
||||
try {
|
||||
commands = await getPluginCommands()
|
||||
setPluginCommandsState(commands)
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error)
|
||||
@@ -93,7 +82,6 @@ export function useManagePlugins({
|
||||
source: 'plugin-commands',
|
||||
error: `Failed to load plugin commands: ${errorMessage}`,
|
||||
})
|
||||
setPluginCommandsState([])
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -185,7 +173,7 @@ export function useManagePlugins({
|
||||
...prevState.plugins,
|
||||
enabled,
|
||||
disabled,
|
||||
commands: [],
|
||||
commands,
|
||||
errors: mergedErrors,
|
||||
},
|
||||
}
|
||||
@@ -238,7 +226,6 @@ export function useManagePlugins({
|
||||
logError(errorObj)
|
||||
logForDebugging(`Error loading plugins: ${error}`)
|
||||
// Set empty state on error, but preserve LSP errors and add the new error
|
||||
setPluginCommandsState([])
|
||||
setAppState(prevState => {
|
||||
// Keep existing LSP/non-plugin-loading errors
|
||||
const existingLspErrors = prevState.plugins.errors.filter(
|
||||
@@ -297,11 +284,6 @@ export function useManagePlugins({
|
||||
})
|
||||
}, [initialPluginLoad, enabled])
|
||||
|
||||
useEffect(() => {
|
||||
if (enabled) return
|
||||
setPluginCommandsState([])
|
||||
}, [enabled])
|
||||
|
||||
// Plugin state changed on disk (background reconcile, /plugin menu,
|
||||
// external settings edit). Show a notification; user runs /reload-plugins
|
||||
// to apply. The previous auto-refresh here had a stale-cache bug (only
|
||||
@@ -319,6 +301,4 @@ export function useManagePlugins({
|
||||
// Do NOT auto-refresh. Do NOT reset needsRefresh — /reload-plugins
|
||||
// consumes it via refreshActivePlugins().
|
||||
}, [enabled, needsRefresh, addNotification])
|
||||
|
||||
return enabled ? pluginCommands : []
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { useLayoutEffect, useRef, useState } from 'react'
|
||||
import { isInputModeCharacter } from 'src/components/PromptInput/inputModes.js'
|
||||
import { useNotifications } from 'src/context/notifications.js'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
@@ -101,74 +100,9 @@ export function useTextInput({
|
||||
prewarmModifiers()
|
||||
}
|
||||
|
||||
// Keep a local text/cursor mirror so consecutive keystrokes can advance
|
||||
// immediately even if the controlled parent value hasn't committed yet.
|
||||
const [renderState, setRenderState] = useState(() => ({
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
}))
|
||||
const liveValueRef = useRef(originalValue)
|
||||
const liveOffsetRef = useRef(externalOffset)
|
||||
const lastSeenPropsRef = useRef({
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
})
|
||||
const updateRenderedInput = (nextValue: string, nextOffset: number): void => {
|
||||
liveValueRef.current = nextValue
|
||||
liveOffsetRef.current = nextOffset
|
||||
setRenderState(prev =>
|
||||
prev.value === nextValue && prev.offset === nextOffset
|
||||
? prev
|
||||
: { value: nextValue, offset: nextOffset },
|
||||
)
|
||||
}
|
||||
useLayoutEffect(() => {
|
||||
if (
|
||||
lastSeenPropsRef.current.value === originalValue &&
|
||||
lastSeenPropsRef.current.offset === externalOffset
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
lastSeenPropsRef.current = {
|
||||
value: originalValue,
|
||||
offset: externalOffset,
|
||||
}
|
||||
updateRenderedInput(originalValue, externalOffset)
|
||||
}, [originalValue, externalOffset])
|
||||
|
||||
const value = renderState.value
|
||||
const offset = renderState.offset
|
||||
const getLiveValue = (): string => liveValueRef.current
|
||||
const getLiveCursor = (): Cursor =>
|
||||
Cursor.fromText(liveValueRef.current, columns, liveOffsetRef.current)
|
||||
const setValue = (nextValue: string, nextOffset = liveOffsetRef.current): void => {
|
||||
const previousValue = liveValueRef.current
|
||||
const previousOffset = liveOffsetRef.current
|
||||
|
||||
if (previousValue === nextValue && previousOffset === nextOffset) {
|
||||
return
|
||||
}
|
||||
|
||||
updateRenderedInput(nextValue, nextOffset)
|
||||
|
||||
if (previousValue !== nextValue) {
|
||||
onChange(nextValue)
|
||||
}
|
||||
|
||||
if (previousOffset !== nextOffset) {
|
||||
onOffsetChange(nextOffset)
|
||||
}
|
||||
}
|
||||
const setOffset = (nextOffset: number): void => {
|
||||
if (nextOffset === liveOffsetRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
updateRenderedInput(liveValueRef.current, nextOffset)
|
||||
onOffsetChange(nextOffset)
|
||||
}
|
||||
const cursor = Cursor.fromText(value, columns, offset)
|
||||
const offset = externalOffset
|
||||
const setOffset = onOffsetChange
|
||||
const cursor = Cursor.fromText(originalValue, columns, offset)
|
||||
const { addNotification, removeNotification } = useNotifications()
|
||||
|
||||
const handleCtrlC = useDoublePress(
|
||||
@@ -177,11 +111,9 @@ export function useTextInput({
|
||||
},
|
||||
() => onExit?.(),
|
||||
() => {
|
||||
const currentValue = getLiveValue()
|
||||
if (currentValue) {
|
||||
updateRenderedInput('', 0)
|
||||
if (originalValue) {
|
||||
onChange('')
|
||||
onOffsetChange(0)
|
||||
setOffset(0)
|
||||
onHistoryReset?.()
|
||||
}
|
||||
},
|
||||
@@ -193,8 +125,7 @@ export function useTextInput({
|
||||
// not dialog dismissal, and needs the double-press safety mechanism.
|
||||
const handleEscape = useDoublePress(
|
||||
(show: boolean) => {
|
||||
const currentValue = getLiveValue()
|
||||
if (!currentValue || !show) {
|
||||
if (!originalValue || !show) {
|
||||
return
|
||||
}
|
||||
addNotification({
|
||||
@@ -205,19 +136,17 @@ export function useTextInput({
|
||||
})
|
||||
},
|
||||
() => {
|
||||
const currentValue = getLiveValue()
|
||||
// Remove the "Esc again to clear" notification immediately
|
||||
removeNotification('escape-again-to-clear')
|
||||
onClearInput?.()
|
||||
if (currentValue) {
|
||||
if (originalValue) {
|
||||
// Track double-escape usage for feature discovery
|
||||
// Save to history before clearing
|
||||
if (currentValue.trim() !== '') {
|
||||
addToHistory(currentValue)
|
||||
if (originalValue.trim() !== '') {
|
||||
addToHistory(originalValue)
|
||||
}
|
||||
updateRenderedInput('', 0)
|
||||
onChange('')
|
||||
onOffsetChange(0)
|
||||
setOffset(0)
|
||||
onHistoryReset?.()
|
||||
}
|
||||
},
|
||||
@@ -225,13 +154,13 @@ export function useTextInput({
|
||||
|
||||
const handleEmptyCtrlD = useDoublePress(
|
||||
show => {
|
||||
if (getLiveValue() !== '') {
|
||||
if (originalValue !== '') {
|
||||
return
|
||||
}
|
||||
onExitMessage?.(show, 'Ctrl-D')
|
||||
},
|
||||
() => {
|
||||
if (getLiveValue() !== '') {
|
||||
if (originalValue !== '') {
|
||||
return
|
||||
}
|
||||
onExit?.()
|
||||
@@ -239,7 +168,6 @@ export function useTextInput({
|
||||
)
|
||||
|
||||
function handleCtrlD(): MaybeCursor {
|
||||
const cursor = getLiveCursor()
|
||||
if (cursor.text === '') {
|
||||
// When input is empty, handle double-press
|
||||
handleEmptyCtrlD()
|
||||
@@ -250,28 +178,24 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function killToLineEnd(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineEnd()
|
||||
pushToKillRing(killed, 'append')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function killToLineStart(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineStart()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function killWordBefore(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const { cursor: newCursor, killed } = cursor.deleteWordBefore()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function yank(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const text = getLastKill()
|
||||
if (text.length > 0) {
|
||||
const startOffset = cursor.offset
|
||||
@@ -283,7 +207,6 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function handleYankPop(): Cursor {
|
||||
const cursor = getLiveCursor()
|
||||
const popResult = yankPop()
|
||||
if (!popResult) {
|
||||
return cursor
|
||||
@@ -299,16 +222,13 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
const handleCtrl = mapInput([
|
||||
['a', () => getLiveCursor().startOfLine()],
|
||||
['b', () => getLiveCursor().left()],
|
||||
['a', () => cursor.startOfLine()],
|
||||
['b', () => cursor.left()],
|
||||
['c', handleCtrlC],
|
||||
['d', handleCtrlD],
|
||||
['e', () => getLiveCursor().endOfLine()],
|
||||
['f', () => getLiveCursor().right()],
|
||||
['h', () => {
|
||||
const cursor = getLiveCursor()
|
||||
return cursor.deleteTokenBefore() ?? cursor.backspace()
|
||||
}],
|
||||
['e', () => cursor.endOfLine()],
|
||||
['f', () => cursor.right()],
|
||||
['h', () => cursor.deleteTokenBefore() ?? cursor.backspace()],
|
||||
['k', killToLineEnd],
|
||||
['n', () => downOrHistoryDown()],
|
||||
['p', () => upOrHistoryUp()],
|
||||
@@ -318,15 +238,13 @@ export function useTextInput({
|
||||
])
|
||||
|
||||
const handleMeta = mapInput([
|
||||
['b', () => getLiveCursor().prevWord()],
|
||||
['f', () => getLiveCursor().nextWord()],
|
||||
['d', () => getLiveCursor().deleteWordAfter()],
|
||||
['b', () => cursor.prevWord()],
|
||||
['f', () => cursor.nextWord()],
|
||||
['d', () => cursor.deleteWordAfter()],
|
||||
['y', handleYankPop],
|
||||
])
|
||||
|
||||
function handleEnter(key: Key) {
|
||||
const cursor = getLiveCursor()
|
||||
const currentValue = getLiveValue()
|
||||
if (
|
||||
multiline &&
|
||||
cursor.offset > 0 &&
|
||||
@@ -345,11 +263,10 @@ export function useTextInput({
|
||||
if (env.terminal === 'Apple_Terminal' && isModifierPressed('shift')) {
|
||||
return cursor.insert('\n')
|
||||
}
|
||||
onSubmit?.(currentValue)
|
||||
onSubmit?.(originalValue)
|
||||
}
|
||||
|
||||
function upOrHistoryUp() {
|
||||
const cursor = getLiveCursor()
|
||||
if (disableCursorMovementForUpDownKeys) {
|
||||
onHistoryUp?.()
|
||||
return cursor
|
||||
@@ -374,7 +291,6 @@ export function useTextInput({
|
||||
return cursor
|
||||
}
|
||||
function downOrHistoryDown() {
|
||||
const cursor = getLiveCursor()
|
||||
if (disableCursorMovementForUpDownKeys) {
|
||||
onHistoryDown?.()
|
||||
return cursor
|
||||
@@ -399,7 +315,7 @@ export function useTextInput({
|
||||
return cursor
|
||||
}
|
||||
|
||||
function mapKey(key: Key, cursor: Cursor): InputMapper {
|
||||
function mapKey(key: Key): InputMapper {
|
||||
switch (true) {
|
||||
case key.escape:
|
||||
return () => {
|
||||
@@ -513,7 +429,6 @@ export function useTextInput({
|
||||
}
|
||||
|
||||
function onInput(input: string, key: Key): void {
|
||||
const currentCursor = getLiveCursor()
|
||||
// Note: Image paste shortcut (chat:imagePaste) is handled via useKeybindings in PromptInput
|
||||
|
||||
// Apply filter if provided
|
||||
@@ -531,15 +446,18 @@ export function useTextInput({
|
||||
|
||||
// Apply all DEL characters as backspace operations synchronously
|
||||
// Try to delete tokens first, fall back to character backspace
|
||||
let nextCursor = currentCursor
|
||||
let currentCursor = cursor
|
||||
for (let i = 0; i < delCount; i++) {
|
||||
nextCursor =
|
||||
nextCursor.deleteTokenBefore() ?? nextCursor.backspace()
|
||||
currentCursor =
|
||||
currentCursor.deleteTokenBefore() ?? currentCursor.backspace()
|
||||
}
|
||||
|
||||
// Update state once with the final result
|
||||
if (!currentCursor.equals(nextCursor)) {
|
||||
setValue(nextCursor.text, nextCursor.offset)
|
||||
if (!cursor.equals(currentCursor)) {
|
||||
if (cursor.text !== currentCursor.text) {
|
||||
onChange(currentCursor.text)
|
||||
}
|
||||
setOffset(currentCursor.offset)
|
||||
}
|
||||
resetKillAccumulation()
|
||||
resetYankState()
|
||||
@@ -556,10 +474,13 @@ export function useTextInput({
|
||||
resetYankState()
|
||||
}
|
||||
|
||||
const nextCursor = mapKey(key, currentCursor)(filteredInput)
|
||||
const nextCursor = mapKey(key)(filteredInput)
|
||||
if (nextCursor) {
|
||||
if (!currentCursor.equals(nextCursor)) {
|
||||
setValue(nextCursor.text, nextCursor.offset)
|
||||
if (!cursor.equals(nextCursor)) {
|
||||
if (cursor.text !== nextCursor.text) {
|
||||
onChange(nextCursor.text)
|
||||
}
|
||||
setOffset(nextCursor.offset)
|
||||
}
|
||||
// SSH-coalesced Enter: on slow links, "o" + Enter can arrive as one
|
||||
// chunk "o\r". parseKeypress only matches s === '\r', so it hit the
|
||||
@@ -591,7 +512,6 @@ export function useTextInput({
|
||||
|
||||
return {
|
||||
onInput,
|
||||
value,
|
||||
renderedValue: cursor.render(
|
||||
cursorChar,
|
||||
mask,
|
||||
@@ -600,7 +520,6 @@ export function useTextInput({
|
||||
maxVisibleLines,
|
||||
),
|
||||
offset,
|
||||
setValue,
|
||||
setOffset,
|
||||
cursorLine: cursorPos.line - cursor.getViewportStartLine(maxVisibleLines),
|
||||
cursorColumn: cursorPos.column,
|
||||
|
||||
@@ -70,14 +70,14 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
// Vim behavior: move cursor left by 1 when exiting insert mode
|
||||
// (unless at beginning of line or at offset 0)
|
||||
const offset = textInput.offset
|
||||
if (offset > 0 && textInput.value[offset - 1] !== '\n') {
|
||||
if (offset > 0 && props.value[offset - 1] !== '\n') {
|
||||
textInput.setOffset(offset - 1)
|
||||
}
|
||||
|
||||
vimStateRef.current = { mode: 'NORMAL', command: { type: 'idle' } }
|
||||
setMode('NORMAL')
|
||||
onModeChange?.('NORMAL')
|
||||
}, [onModeChange, textInput])
|
||||
}, [onModeChange, textInput, props.value])
|
||||
|
||||
function createOperatorContext(
|
||||
cursor: Cursor,
|
||||
@@ -85,8 +85,8 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
): OperatorContext {
|
||||
return {
|
||||
cursor,
|
||||
text: textInput.value,
|
||||
setText: (newText: string) => textInput.setValue(newText),
|
||||
text: props.value,
|
||||
setText: (newText: string) => props.onChange(newText),
|
||||
setOffset: (offset: number) => textInput.setOffset(offset),
|
||||
enterInsert: (offset: number) => switchToInsertMode(offset),
|
||||
getRegister: () => persistentRef.current.register,
|
||||
@@ -110,18 +110,15 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
const change = persistentRef.current.lastChange
|
||||
if (!change) return
|
||||
|
||||
const cursor = Cursor.fromText(
|
||||
textInput.value,
|
||||
props.columns,
|
||||
textInput.offset,
|
||||
)
|
||||
const cursor = Cursor.fromText(props.value, props.columns, textInput.offset)
|
||||
const ctx = createOperatorContext(cursor, true)
|
||||
|
||||
switch (change.type) {
|
||||
case 'insert':
|
||||
if (change.text) {
|
||||
const newCursor = cursor.insert(change.text)
|
||||
textInput.setValue(newCursor.text, newCursor.offset)
|
||||
props.onChange(newCursor.text)
|
||||
textInput.setOffset(newCursor.offset)
|
||||
}
|
||||
break
|
||||
|
||||
@@ -182,11 +179,7 @@ export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
// lookups expect single chars and a prepended space would break them.
|
||||
const filtered = inputFilter ? inputFilter(rawInput, key) : rawInput
|
||||
const input = state.mode === 'INSERT' ? filtered : rawInput
|
||||
const cursor = Cursor.fromText(
|
||||
textInput.value,
|
||||
props.columns,
|
||||
textInput.offset,
|
||||
)
|
||||
const cursor = Cursor.fromText(props.value, props.columns, textInput.offset)
|
||||
|
||||
if (key.ctrl) {
|
||||
textInput.onInput(input, key)
|
||||
|
||||
@@ -115,10 +115,7 @@ export default class App extends PureComponent<Props, State> {
|
||||
keyParseState = INITIAL_STATE;
|
||||
// Timer for flushing incomplete escape sequences
|
||||
incompleteEscapeTimer: NodeJS.Timeout | null = null;
|
||||
// Default to readable-mode stdin (legacy Ink behavior). The data-mode path
|
||||
// is kept as an explicit opt-in because some terminals can enter a state
|
||||
// where startup input appears frozen when data mode is the default.
|
||||
stdinMode: 'readable' | 'data' = process.env.OPENCLAUDE_USE_DATA_STDIN === '1' || process.env.OPENCLAUDE_USE_READABLE_STDIN === '0' ? 'data' : 'readable';
|
||||
stdinMode: 'readable' | 'data' = process.env.OPENCLAUDE_USE_READABLE_STDIN === '1' ? 'readable' : 'data';
|
||||
// Timeout durations for incomplete sequences (ms)
|
||||
readonly NORMAL_TIMEOUT = 50; // Short timeout for regular esc sequences
|
||||
readonly PASTE_TIMEOUT = 500; // Longer timeout for paste operations
|
||||
|
||||
@@ -33,7 +33,7 @@ import createRenderer, { type Renderer } from './renderer.js';
|
||||
import { CellWidth, CharPool, cellAt, createScreen, HyperlinkPool, isEmptyCellAt, migrateScreenPools, StylePool } from './screen.js';
|
||||
import { applySearchHighlight } from './searchHighlight.js';
|
||||
import { applySelectionOverlay, captureScrolledRows, clearSelection, createSelectionState, extendSelection, type FocusMove, findPlainTextUrlAt, getSelectedText, hasSelection, moveFocus, type SelectionState, selectLineAt, selectWordAt, shiftAnchor, shiftSelection, shiftSelectionForFollow, startSelection, updateSelection } from './selection.js';
|
||||
import { shouldSkipMainScreenSyncMarkers, shouldUseMainScreenRewrite, SYNC_OUTPUT_SUPPORTED, supportsExtendedKeys, type Terminal, writeDiffToTerminal } from './terminal.js';
|
||||
import { SYNC_OUTPUT_SUPPORTED, supportsExtendedKeys, type Terminal, writeDiffToTerminal } from './terminal.js';
|
||||
import { CURSOR_HOME, cursorMove, cursorPosition, DISABLE_KITTY_KEYBOARD, DISABLE_MODIFY_OTHER_KEYS, ENABLE_KITTY_KEYBOARD, ENABLE_MODIFY_OTHER_KEYS, ERASE_SCREEN } from './termio/csi.js';
|
||||
import { DBP, DFE, DISABLE_MOUSE_TRACKING, ENABLE_MOUSE_TRACKING, ENTER_ALT_SCREEN, EXIT_ALT_SCREEN, SHOW_CURSOR } from './termio/dec.js';
|
||||
import { CLEAR_ITERM2_PROGRESS, CLEAR_TAB_STATUS, setClipboard, supportsTabStatus, wrapForMultiplexer } from './termio/osc.js';
|
||||
@@ -609,13 +609,12 @@ export default class Ink {
|
||||
};
|
||||
}
|
||||
const tDiff = performance.now();
|
||||
const rewriteMainScreen = !this.altScreenActive && shouldUseMainScreenRewrite();
|
||||
const diff = this.log.render(prevFrame, frame, this.altScreenActive,
|
||||
// DECSTBM needs BSU/ESU atomicity — without it the outer terminal
|
||||
// renders the scrolled-but-not-yet-repainted intermediate state.
|
||||
// tmux is the main case (re-emits DECSTBM with its own timing and
|
||||
// doesn't implement DEC 2026, so SYNC_OUTPUT_SUPPORTED is false).
|
||||
SYNC_OUTPUT_SUPPORTED, rewriteMainScreen);
|
||||
SYNC_OUTPUT_SUPPORTED);
|
||||
const diffMs = performance.now() - tDiff;
|
||||
// Swap buffers
|
||||
this.backFrame = this.frontFrame;
|
||||
@@ -760,8 +759,7 @@ export default class Ink {
|
||||
}
|
||||
}
|
||||
const tWrite = performance.now();
|
||||
const skipSyncMarkers = this.altScreenActive ? !SYNC_OUTPUT_SUPPORTED : rewriteMainScreen || shouldSkipMainScreenSyncMarkers();
|
||||
writeDiffToTerminal(this.terminal, optimized, skipSyncMarkers);
|
||||
writeDiffToTerminal(this.terminal, optimized, this.altScreenActive && !SYNC_OUTPUT_SUPPORTED);
|
||||
const writeMs = performance.now() - tWrite;
|
||||
|
||||
// Update blit safety for the NEXT frame. The frame just rendered
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
|
||||
import type { Frame } from './frame.ts'
|
||||
import { LogUpdate } from './log-update.ts'
|
||||
import {
|
||||
CellWidth,
|
||||
CharPool,
|
||||
createScreen,
|
||||
HyperlinkPool,
|
||||
setCellAt,
|
||||
StylePool,
|
||||
} from './screen.ts'
|
||||
|
||||
function collectStdout(diff: ReturnType<LogUpdate['render']>): string {
|
||||
return diff
|
||||
.filter((patch): patch is Extract<(typeof diff)[number], { type: 'stdout' }> => patch.type === 'stdout')
|
||||
.map(patch => patch.content)
|
||||
.join('')
|
||||
}
|
||||
|
||||
function createHarness() {
|
||||
const stylePool = new StylePool()
|
||||
const charPool = new CharPool()
|
||||
const hyperlinkPool = new HyperlinkPool()
|
||||
|
||||
return {
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
log: new LogUpdate({ isTTY: true, stylePool }),
|
||||
}
|
||||
}
|
||||
|
||||
function frameFromLines(
|
||||
stylePool: StylePool,
|
||||
charPool: CharPool,
|
||||
hyperlinkPool: HyperlinkPool,
|
||||
lines: string[],
|
||||
cursor = { x: 0, y: lines.length, visible: true },
|
||||
): Frame {
|
||||
const width = lines.reduce((max, line) => Math.max(max, line.length), 0)
|
||||
const screen = createScreen(width, lines.length, stylePool, charPool, hyperlinkPool)
|
||||
|
||||
for (const [y, line] of lines.entries()) {
|
||||
for (const [x, char] of [...line].entries()) {
|
||||
setCellAt(screen, x, y, {
|
||||
char,
|
||||
styleId: stylePool.none,
|
||||
width: CellWidth.Narrow,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
screen,
|
||||
viewport: {
|
||||
width: Math.max(width, 1),
|
||||
height: 10,
|
||||
},
|
||||
cursor,
|
||||
}
|
||||
}
|
||||
|
||||
test('ghostty main-screen rewrite paints prompt content without full terminal reset when width is stable', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(stylePool, charPool, hyperlinkPool, [' '])
|
||||
const next = frameFromLines(stylePool, charPool, hyperlinkPool, ['prompt'])
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clearTerminal')).toBe(false)
|
||||
expect(diff.some(patch => patch.type === 'clear' && patch.count === 1)).toBe(
|
||||
true,
|
||||
)
|
||||
expect(stdout).toContain('prompt')
|
||||
})
|
||||
|
||||
test('ghostty main-screen rewrite clears only the changed prompt tail before repainting', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['status', '> abc'],
|
||||
)
|
||||
const next = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['status', '> abcd'],
|
||||
)
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clearTerminal')).toBe(false)
|
||||
expect(diff.some(patch => patch.type === 'clear' && patch.count === 1)).toBe(
|
||||
true,
|
||||
)
|
||||
expect(stdout).toContain('abcd')
|
||||
})
|
||||
|
||||
test('ghostty main-screen rewrite falls back to incremental diff for larger changes', () => {
|
||||
const { stylePool, charPool, hyperlinkPool, log } = createHarness()
|
||||
const prev = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['row 0', 'row 1', 'row 2', 'row 3', 'row 4', '> abc'],
|
||||
)
|
||||
const next = frameFromLines(
|
||||
stylePool,
|
||||
charPool,
|
||||
hyperlinkPool,
|
||||
['row 0 updated', 'row 1', 'row 2', 'row 3', 'row 4', '> abcd'],
|
||||
)
|
||||
|
||||
const diff = log.render(prev, next, false, true, true)
|
||||
const stdout = collectStdout(diff)
|
||||
|
||||
expect(diff.some(patch => patch.type === 'clear')).toBe(false)
|
||||
expect(stdout).toContain('updated')
|
||||
expect(stdout).toContain('abcd')
|
||||
})
|
||||
@@ -125,7 +125,6 @@ export class LogUpdate {
|
||||
next: Frame,
|
||||
altScreen = false,
|
||||
decstbmSafe = true,
|
||||
rewriteMainScreen = false,
|
||||
): Diff {
|
||||
if (!this.options.isTTY) {
|
||||
return this.renderFullFrame(next)
|
||||
@@ -147,13 +146,6 @@ export class LogUpdate {
|
||||
return fullResetSequence_CAUSES_FLICKER(next, 'resize', stylePool)
|
||||
}
|
||||
|
||||
if (!altScreen && rewriteMainScreen) {
|
||||
const rewriteStartY = findMainScreenRewriteStart(prev.screen, next.screen)
|
||||
if (rewriteStartY !== null) {
|
||||
return rewriteMainScreenFrame(prev, next, stylePool, rewriteStartY)
|
||||
}
|
||||
}
|
||||
|
||||
// DECSTBM scroll optimization: when a ScrollBox's scrollTop changed,
|
||||
// shift content with a hardware scroll (CSI top;bot r + CSI n S/T)
|
||||
// instead of rewriting the whole scroll region. The shiftRows on
|
||||
@@ -428,8 +420,34 @@ export class LogUpdate {
|
||||
// Main screen: if cursor needs to be past the last line of content
|
||||
// (typical: cursor.y = screen.height), emit \n to create that line
|
||||
// since cursor movement can't create new lines.
|
||||
if (!altScreen) {
|
||||
restoreMainScreenCursor(screen, next)
|
||||
if (altScreen) {
|
||||
// no-op; next frame's CSI H anchors cursor
|
||||
} else if (next.cursor.y >= next.screen.height) {
|
||||
// Move to column 0 of current line, then emit newlines to reach target row
|
||||
screen.txn(prev => {
|
||||
const rowsToCreate = next.cursor.y - prev.y
|
||||
if (rowsToCreate > 0) {
|
||||
// Use CR to resolve pending wrap (if any) without advancing
|
||||
// to the next line, then LF to create each new row.
|
||||
const patches: Diff = new Array<Diff[number]>(1 + rowsToCreate)
|
||||
patches[0] = CARRIAGE_RETURN
|
||||
for (let i = 0; i < rowsToCreate; i++) {
|
||||
patches[1 + i] = NEWLINE
|
||||
}
|
||||
return [patches, { dx: -prev.x, dy: rowsToCreate }]
|
||||
}
|
||||
// At or past target row - need to move cursor to correct position
|
||||
const dy = next.cursor.y - prev.y
|
||||
if (dy !== 0 || prev.x !== next.cursor.x) {
|
||||
// Use CR to clear pending wrap (if any), then cursor move
|
||||
const patches: Diff = [CARRIAGE_RETURN]
|
||||
patches.push({ type: 'cursorMove', x: next.cursor.x, y: dy })
|
||||
return [patches, { dx: next.cursor.x - prev.x, dy }]
|
||||
}
|
||||
return [[], { dx: 0, dy: 0 }]
|
||||
})
|
||||
} else {
|
||||
moveCursorTo(screen, next.cursor.x, next.cursor.y)
|
||||
}
|
||||
|
||||
const elapsed = performance.now() - startTime
|
||||
@@ -449,77 +467,6 @@ export class LogUpdate {
|
||||
}
|
||||
}
|
||||
|
||||
function rewriteMainScreenFrame(
|
||||
prev: Frame,
|
||||
next: Frame,
|
||||
stylePool: StylePool,
|
||||
startY: number,
|
||||
): Diff {
|
||||
const diff: Diff = []
|
||||
const clearCount = prev.screen.height - startY
|
||||
|
||||
if (clearCount > 0) {
|
||||
const clearStartY = prev.screen.height - 1
|
||||
const clearCursor = new VirtualScreen(prev.cursor, next.viewport.width)
|
||||
moveCursorTo(clearCursor, 0, clearStartY)
|
||||
diff.push(...clearCursor.diff)
|
||||
diff.push({ type: 'clear', count: clearCount })
|
||||
}
|
||||
|
||||
const screen = new VirtualScreen(
|
||||
clearCount > 0 ? { x: 0, y: startY } : prev.cursor,
|
||||
next.viewport.width,
|
||||
)
|
||||
renderFrameSlice(screen, next, startY, next.screen.height, stylePool)
|
||||
restoreMainScreenCursor(screen, next)
|
||||
|
||||
return [...diff, ...screen.diff]
|
||||
}
|
||||
|
||||
const MAX_MAIN_SCREEN_REWRITE_ROWS = 6
|
||||
|
||||
function findMainScreenRewriteStart(prev: Screen, next: Screen): number | null {
|
||||
const commonHeight = Math.min(prev.height, next.height)
|
||||
let firstChangedY = commonHeight
|
||||
|
||||
for (let y = 0; y < commonHeight; y += 1) {
|
||||
if (!rowsEqual(prev, next, y)) {
|
||||
firstChangedY = y
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
const rewriteRows = Math.max(prev.height, next.height) - firstChangedY
|
||||
if (rewriteRows <= 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return rewriteRows <= MAX_MAIN_SCREEN_REWRITE_ROWS ? firstChangedY : null
|
||||
}
|
||||
|
||||
function rowsEqual(prev: Screen, next: Screen, y: number): boolean {
|
||||
if (prev.width !== next.width) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (prev.softWrap[y] !== next.softWrap[y]) {
|
||||
return false
|
||||
}
|
||||
|
||||
const rowStart = y * prev.width
|
||||
const rowEnd = rowStart + prev.width
|
||||
for (let index = rowStart; index < rowEnd; index += 1) {
|
||||
if (
|
||||
prev.cells64[index] !== next.cells64[index] ||
|
||||
prev.noSelect[index] !== next.noSelect[index]
|
||||
) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
function transitionHyperlink(
|
||||
diff: Diff,
|
||||
current: Hyperlink,
|
||||
@@ -675,37 +622,6 @@ function renderFrameSlice(
|
||||
return screen
|
||||
}
|
||||
|
||||
function restoreMainScreenCursor(screen: VirtualScreen, next: Frame): void {
|
||||
if (next.cursor.y >= next.screen.height) {
|
||||
// Move to column 0 of current line, then emit newlines to reach target row
|
||||
screen.txn(prev => {
|
||||
const rowsToCreate = next.cursor.y - prev.y
|
||||
if (rowsToCreate > 0) {
|
||||
// Use CR to resolve pending wrap (if any) without advancing
|
||||
// to the next line, then LF to create each new row.
|
||||
const patches: Diff = new Array<Diff[number]>(1 + rowsToCreate)
|
||||
patches[0] = CARRIAGE_RETURN
|
||||
for (let i = 0; i < rowsToCreate; i++) {
|
||||
patches[1 + i] = NEWLINE
|
||||
}
|
||||
return [patches, { dx: -prev.x, dy: rowsToCreate }]
|
||||
}
|
||||
// At or past target row - need to move cursor to correct position
|
||||
const dy = next.cursor.y - prev.y
|
||||
if (dy !== 0 || prev.x !== next.cursor.x) {
|
||||
// Use CR to clear pending wrap (if any), then cursor move
|
||||
const patches: Diff = [CARRIAGE_RETURN]
|
||||
patches.push({ type: 'cursorMove', x: next.cursor.x, y: dy })
|
||||
return [patches, { dx: next.cursor.x - prev.x, dy }]
|
||||
}
|
||||
return [[], { dx: 0, dy: 0 }]
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
moveCursorTo(screen, next.cursor.x, next.cursor.y)
|
||||
}
|
||||
|
||||
type Delta = { dx: number; dy: number }
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,369 +0,0 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { expect, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
|
||||
import type { DOMElement, ElementNames } from './dom.ts'
|
||||
import instances from './instances.ts'
|
||||
import { LayoutEdge } from './layout/node.ts'
|
||||
import type { ParsedKey } from './parse-keypress.ts'
|
||||
import { createRoot } from './root.ts'
|
||||
|
||||
type TestStdin = PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
const RAW_TEXT_STYLE = {
|
||||
flexDirection: 'row',
|
||||
flexGrow: 0,
|
||||
flexShrink: 1,
|
||||
textWrap: 'wrap',
|
||||
} as const
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: TestStdin
|
||||
} {
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as TestStdin
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
;(stdout as unknown as { rows: number }).rows = 24
|
||||
;(stdout as unknown as { isTTY: boolean }).isTTY = true
|
||||
|
||||
return { stdout, stdin }
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
errorMessage: string,
|
||||
timeoutMs = 2000,
|
||||
): Promise<void> {
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
|
||||
throw new Error(errorMessage)
|
||||
}
|
||||
|
||||
function getRootNode(stdout: PassThrough): DOMElement {
|
||||
const instance = getInkInstance(stdout)
|
||||
|
||||
if (!instance.rootNode) {
|
||||
throw new Error('Ink instance root node not found')
|
||||
}
|
||||
|
||||
return instance.rootNode
|
||||
}
|
||||
|
||||
function getInkInstance(stdout: PassThrough): {
|
||||
rootNode?: DOMElement
|
||||
dispatchKeyboardEvent: (parsedKey: ParsedKey) => void
|
||||
} {
|
||||
const instance = instances.get(
|
||||
stdout as unknown as NodeJS.WriteStream,
|
||||
) as
|
||||
| {
|
||||
rootNode?: DOMElement
|
||||
dispatchKeyboardEvent: (parsedKey: ParsedKey) => void
|
||||
}
|
||||
| undefined
|
||||
|
||||
if (!instance) {
|
||||
throw new Error('Ink instance not found')
|
||||
}
|
||||
|
||||
return instance
|
||||
}
|
||||
|
||||
function findElement(
|
||||
node: DOMElement,
|
||||
nodeName: ElementNames,
|
||||
): DOMElement | undefined {
|
||||
if (node.nodeName === nodeName) {
|
||||
return node
|
||||
}
|
||||
|
||||
for (const child of node.childNodes) {
|
||||
if (child.nodeName === '#text') {
|
||||
continue
|
||||
}
|
||||
|
||||
const found = findElement(child, nodeName)
|
||||
if (found) {
|
||||
return found
|
||||
}
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
function requireElement(stdout: PassThrough, nodeName: ElementNames): DOMElement {
|
||||
const found = findElement(getRootNode(stdout), nodeName)
|
||||
|
||||
if (!found) {
|
||||
throw new Error(`Expected to find ${nodeName} in Ink root tree`)
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
async function createHarness(): Promise<{
|
||||
stdout: PassThrough
|
||||
stdin: TestStdin
|
||||
root: Awaited<ReturnType<typeof createRoot>>
|
||||
dispose: () => Promise<void>
|
||||
}> {
|
||||
const { stdout, stdin } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
root,
|
||||
dispose: async () => {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(25)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
test('raw ink-box updates keyboard handlers and attributes in place across rerenders', async () => {
|
||||
const calls: string[] = []
|
||||
const firstHandler = () => calls.push('first')
|
||||
const secondHandler = () => calls.push('second')
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: firstHandler,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'first render',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const firstBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(firstBox.attributes.tabIndex).toBe(0)
|
||||
expect(firstBox._eventHandlers?.onKeyDown).toBe(firstHandler)
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: secondHandler,
|
||||
tabIndex: 1,
|
||||
},
|
||||
'second render',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const secondBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(secondBox).toBe(firstBox)
|
||||
expect(secondBox.attributes.tabIndex).toBe(1)
|
||||
expect(secondBox._eventHandlers?.onKeyDown).toBe(secondHandler)
|
||||
|
||||
getInkInstance(harness.stdout).dispatchKeyboardEvent({
|
||||
kind: 'key',
|
||||
name: 'a',
|
||||
fn: false,
|
||||
ctrl: false,
|
||||
meta: false,
|
||||
shift: false,
|
||||
option: false,
|
||||
super: false,
|
||||
sequence: 'a',
|
||||
raw: 'a',
|
||||
isPasted: false,
|
||||
})
|
||||
|
||||
await waitForCondition(
|
||||
() => calls.length === 1,
|
||||
'Timed out waiting for rerendered onKeyDown handler to fire',
|
||||
)
|
||||
|
||||
expect(calls).toEqual(['second'])
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-text updates textStyles in place across rerenders', async () => {
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-text',
|
||||
{
|
||||
style: RAW_TEXT_STYLE,
|
||||
textStyles: { color: 'ansi:red' },
|
||||
},
|
||||
'host text',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const firstText = requireElement(harness.stdout, 'ink-text')
|
||||
expect(firstText.textStyles).toEqual({ color: 'ansi:red' })
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-text',
|
||||
{
|
||||
style: RAW_TEXT_STYLE,
|
||||
textStyles: { color: 'ansi:blue' },
|
||||
},
|
||||
'host text',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const secondText = requireElement(harness.stdout, 'ink-text')
|
||||
expect(secondText).toBe(firstText)
|
||||
expect(secondText.textStyles).toEqual({ color: 'ansi:blue' })
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-box removes event handler when set to undefined', async () => {
|
||||
const calls: string[] = []
|
||||
const handler = () => calls.push('fired')
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
onKeyDown: handler,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'with handler',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const box = requireElement(harness.stdout, 'ink-box')
|
||||
expect(box._eventHandlers?.onKeyDown).toBe(handler)
|
||||
|
||||
// Remove the handler
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
autoFocus: true,
|
||||
tabIndex: 0,
|
||||
},
|
||||
'without handler',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const sameBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(sameBox).toBe(box)
|
||||
expect(sameBox._eventHandlers?.onKeyDown).toBeUndefined()
|
||||
|
||||
// Dispatch a key event and verify the removed handler is NOT called
|
||||
getInkInstance(harness.stdout).dispatchKeyboardEvent({
|
||||
kind: 'key',
|
||||
name: 'a',
|
||||
fn: false,
|
||||
ctrl: false,
|
||||
meta: false,
|
||||
shift: false,
|
||||
option: false,
|
||||
super: false,
|
||||
sequence: 'a',
|
||||
raw: 'a',
|
||||
isPasted: false,
|
||||
})
|
||||
|
||||
await Bun.sleep(50)
|
||||
expect(calls).toEqual([])
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
|
||||
test('raw ink-box updates layout style in place across rerenders', async () => {
|
||||
const harness = await createHarness()
|
||||
|
||||
try {
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
style: { flexDirection: 'row', paddingLeft: 1 },
|
||||
},
|
||||
'styled box',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const box = requireElement(harness.stdout, 'ink-box')
|
||||
expect(box.style.flexDirection).toBe('row')
|
||||
expect(box.style.paddingLeft).toBe(1)
|
||||
|
||||
harness.root.render(
|
||||
React.createElement(
|
||||
'ink-box',
|
||||
{
|
||||
style: { flexDirection: 'column', paddingLeft: 2 },
|
||||
},
|
||||
'styled box',
|
||||
),
|
||||
)
|
||||
|
||||
await Bun.sleep(25)
|
||||
|
||||
const sameBox = requireElement(harness.stdout, 'ink-box')
|
||||
expect(sameBox).toBe(box)
|
||||
expect(sameBox.style.flexDirection).toBe('column')
|
||||
expect(sameBox.style.paddingLeft).toBe(2)
|
||||
|
||||
// Verify the update reached the layout engine, not just the style object
|
||||
const yogaNode = sameBox.yogaNode!
|
||||
expect(yogaNode).toBeDefined()
|
||||
yogaNode.calculateLayout(120)
|
||||
expect(yogaNode.getComputedPadding(LayoutEdge.Left)).toBe(2)
|
||||
} finally {
|
||||
await harness.dispose()
|
||||
}
|
||||
})
|
||||
@@ -449,25 +449,17 @@ const reconciler = createReconciler<
|
||||
},
|
||||
commitUpdate(
|
||||
node: DOMElement,
|
||||
updatePayload: UpdatePayload | null,
|
||||
_type: ElementNames,
|
||||
oldProps: Props,
|
||||
newProps: Props,
|
||||
_oldProps: Props,
|
||||
_newProps: Props,
|
||||
): void {
|
||||
// React 19 mutation mode calls commitUpdate as
|
||||
// (instance, type, oldProps, newProps, fiber) and does not pass the
|
||||
// prepareUpdate() payload here. This renderer used to treat the second
|
||||
// argument as updatePayload, which left mounted ink-* nodes with stale
|
||||
// attributes, event handlers, and textStyles until something forced a
|
||||
// remount. Recompute the prop/style diff here so host nodes update
|
||||
// correctly in place on rerender.
|
||||
const props = diff(oldProps, newProps)
|
||||
const style = diff(oldProps['style'] as Styles, newProps['style'] as Styles)
|
||||
const nextStyle = newProps['style'] as Styles | undefined
|
||||
|
||||
if (!props && !style) {
|
||||
if (!updatePayload) {
|
||||
return
|
||||
}
|
||||
|
||||
const { props, style, nextStyle } = updatePayload
|
||||
|
||||
if (props) {
|
||||
for (const [key, value] of Object.entries(props)) {
|
||||
if (key === 'style') {
|
||||
|
||||
@@ -135,13 +135,6 @@ export function setXtversionName(name: string): void {
|
||||
if (xtversionName === undefined) xtversionName = name
|
||||
}
|
||||
|
||||
export function isGhosttyTerminal(): boolean {
|
||||
if (process.env.NODE_ENV === 'test') return false
|
||||
if (process.env.TERM_PROGRAM === 'ghostty') return true
|
||||
if (process.env.TERM === 'xterm-ghostty') return true
|
||||
return xtversionName?.toLowerCase().startsWith('ghostty') ?? false
|
||||
}
|
||||
|
||||
/** True if running in an xterm.js-based terminal (VS Code, Cursor, Windsurf
|
||||
* integrated terminals). Combines TERM_PROGRAM env check (fast, sync, but
|
||||
* not forwarded over SSH) with the XTVERSION probe result (async, survives
|
||||
@@ -152,20 +145,6 @@ export function isXtermJs(): boolean {
|
||||
return xtversionName?.startsWith('xterm.js') ?? false
|
||||
}
|
||||
|
||||
/** Ghostty currently repaints main-screen prompt updates more reliably
|
||||
* without DEC 2026 synchronized output. Prefer explicit terminal identity
|
||||
* (TERM_PROGRAM/TERM or XTVERSION) in real sessions, but keep tests
|
||||
* deterministic by disabling the env-based detection under NODE_ENV=test. */
|
||||
export function shouldSkipMainScreenSyncMarkers(): boolean {
|
||||
return isGhosttyTerminal()
|
||||
}
|
||||
|
||||
/** Ghostty's main-screen prompt updates are currently more reliable when we
|
||||
* bypass the incremental diff path and rewrite the visible prompt block. */
|
||||
export function shouldUseMainScreenRewrite(): boolean {
|
||||
return isGhosttyTerminal()
|
||||
}
|
||||
|
||||
// Terminals known to correctly implement the Kitty keyboard protocol
|
||||
// (CSI >1u) and/or xterm modifyOtherKeys (CSI >4;2m) for ctrl+shift+<letter>
|
||||
// disambiguation. We previously enabled unconditionally (#23350), assuming
|
||||
|
||||
@@ -13,7 +13,6 @@ const execFileNoThrowMock = mock(
|
||||
|
||||
mock.module('../../utils/execFileNoThrow.js', () => ({
|
||||
execFileNoThrow: execFileNoThrowMock,
|
||||
execFileNoThrowWithCwd: execFileNoThrowMock,
|
||||
}))
|
||||
|
||||
mock.module('../../utils/tempfile.js', () => ({
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
import { afterEach, describe, expect, test } from 'bun:test'
|
||||
import { mkdir, mkdtemp, rm, writeFile } from 'node:fs/promises'
|
||||
import { tmpdir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
|
||||
import {
|
||||
getSteps,
|
||||
isProjectOnboardingComplete,
|
||||
} from './projectOnboardingSteps.js'
|
||||
import { runWithCwdOverride } from './utils/cwd.js'
|
||||
|
||||
let tempDir: string | undefined
|
||||
|
||||
afterEach(async () => {
|
||||
if (tempDir) {
|
||||
await rm(tempDir, { recursive: true, force: true })
|
||||
tempDir = undefined
|
||||
}
|
||||
})
|
||||
|
||||
describe('project onboarding completion', () => {
|
||||
test('is incomplete when neither AGENTS.md nor CLAUDE.md exists', async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
|
||||
|
||||
await runWithCwdOverride(tempDir, async () => {
|
||||
expect(isProjectOnboardingComplete()).toBe(false)
|
||||
expect(getSteps()[1]?.text).toContain('/init')
|
||||
expect(getSteps()[1]?.text).toContain('AGENTS.md')
|
||||
expect(getSteps()[1]?.text).toContain('CLAUDE.md')
|
||||
})
|
||||
})
|
||||
|
||||
test('is complete when only CLAUDE.md exists', async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
|
||||
await writeFile(join(tempDir, 'CLAUDE.md'), '# CLAUDE.md\n')
|
||||
|
||||
await runWithCwdOverride(tempDir, async () => {
|
||||
expect(isProjectOnboardingComplete()).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
test('is complete when only AGENTS.md exists', async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
|
||||
await writeFile(join(tempDir, 'AGENTS.md'), '# AGENTS.md\n')
|
||||
|
||||
await runWithCwdOverride(tempDir, async () => {
|
||||
expect(isProjectOnboardingComplete()).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
test('is complete from a nested cwd when repo instructions exist in an ancestor directory', async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
|
||||
const nestedDir = join(tempDir, 'packages', 'app')
|
||||
await writeFile(join(tempDir, 'AGENTS.md'), '# AGENTS.md\n')
|
||||
await mkdir(nestedDir, { recursive: true })
|
||||
await writeFile(join(nestedDir, 'index.ts'), 'export {}\n')
|
||||
|
||||
await runWithCwdOverride(nestedDir, async () => {
|
||||
expect(isProjectOnboardingComplete()).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,14 +1,50 @@
|
||||
import memoize from 'lodash-es/memoize.js'
|
||||
import { join } from 'path'
|
||||
import {
|
||||
getCurrentProjectConfig,
|
||||
saveCurrentProjectConfig,
|
||||
} from './utils/config.js'
|
||||
export {
|
||||
getSteps,
|
||||
isProjectOnboardingComplete,
|
||||
type Step,
|
||||
} from './projectOnboardingSteps.js'
|
||||
import { isProjectOnboardingComplete } from './projectOnboardingSteps.js'
|
||||
import { getCwd } from './utils/cwd.js'
|
||||
import { isDirEmpty } from './utils/file.js'
|
||||
import { getFsImplementation } from './utils/fsOperations.js'
|
||||
|
||||
export type Step = {
|
||||
key: string
|
||||
text: string
|
||||
isComplete: boolean
|
||||
isCompletable: boolean
|
||||
isEnabled: boolean
|
||||
}
|
||||
|
||||
export function getSteps(): Step[] {
|
||||
const hasClaudeMd = getFsImplementation().existsSync(
|
||||
join(getCwd(), 'CLAUDE.md'),
|
||||
)
|
||||
const isWorkspaceDirEmpty = isDirEmpty(getCwd())
|
||||
|
||||
return [
|
||||
{
|
||||
key: 'workspace',
|
||||
text: 'Ask Claude to create a new app or clone a repository',
|
||||
isComplete: false,
|
||||
isCompletable: true,
|
||||
isEnabled: isWorkspaceDirEmpty,
|
||||
},
|
||||
{
|
||||
key: 'claudemd',
|
||||
text: 'Run /init to create a CLAUDE.md file with instructions for Claude',
|
||||
isComplete: hasClaudeMd,
|
||||
isCompletable: true,
|
||||
isEnabled: !isWorkspaceDirEmpty,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
export function isProjectOnboardingComplete(): boolean {
|
||||
return getSteps()
|
||||
.filter(({ isCompletable, isEnabled }) => isCompletable && isEnabled)
|
||||
.every(({ isComplete }) => isComplete)
|
||||
}
|
||||
|
||||
export function maybeMarkProjectOnboardingComplete(): void {
|
||||
// Short-circuit on cached config — isProjectOnboardingComplete() hits
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
import { getCwd } from './utils/cwd.js'
|
||||
import { isDirEmpty } from './utils/file.js'
|
||||
import { getFsImplementation } from './utils/fsOperations.js'
|
||||
import { findProjectInstructionFilePathInAncestors } from './utils/projectInstructions.js'
|
||||
|
||||
export type Step = {
|
||||
key: string
|
||||
text: string
|
||||
isComplete: boolean
|
||||
isCompletable: boolean
|
||||
isEnabled: boolean
|
||||
}
|
||||
|
||||
export function getSteps(): Step[] {
|
||||
const hasRepoInstructions =
|
||||
findProjectInstructionFilePathInAncestors(
|
||||
getCwd(),
|
||||
getFsImplementation().existsSync,
|
||||
) !== null
|
||||
const isWorkspaceDirEmpty = isDirEmpty(getCwd())
|
||||
|
||||
return [
|
||||
{
|
||||
key: 'workspace',
|
||||
text: 'Ask Claude to create a new app or clone a repository',
|
||||
isComplete: false,
|
||||
isCompletable: true,
|
||||
isEnabled: isWorkspaceDirEmpty,
|
||||
},
|
||||
{
|
||||
key: 'claudemd',
|
||||
text: 'Set up repo instructions (/init creates AGENTS.md or updates existing CLAUDE.md; either file counts)',
|
||||
isComplete: hasRepoInstructions,
|
||||
isCompletable: true,
|
||||
isEnabled: !isWorkspaceDirEmpty,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
export function isProjectOnboardingComplete(): boolean {
|
||||
return getSteps()
|
||||
.filter(({ isCompletable, isEnabled }) => isCompletable && isEnabled)
|
||||
.every(({ isComplete }) => isComplete)
|
||||
}
|
||||
@@ -617,6 +617,7 @@ export function REPL({
|
||||
const toolPermissionContext = useAppState(s => s.toolPermissionContext);
|
||||
const verbose = useAppState(s => s.verbose);
|
||||
const mcp = useAppState(s => s.mcp);
|
||||
const plugins = useAppState(s => s.plugins);
|
||||
const agentDefinitions = useAppState(s => s.agentDefinitions);
|
||||
const fileHistory = useAppState(s => s.fileHistory);
|
||||
const initialMessage = useAppState(s => s.initialMessage);
|
||||
@@ -779,7 +780,7 @@ export function REPL({
|
||||
}, [localTools, initialTools]);
|
||||
|
||||
// Initialize plugin management
|
||||
const pluginCommands = useManagePlugins({
|
||||
useManagePlugins({
|
||||
enabled: !isRemoteSession
|
||||
});
|
||||
const tasksV2 = useTasksV2WithCollapseEffect();
|
||||
@@ -825,16 +826,10 @@ export function REPL({
|
||||
}, [mainThreadAgentDefinition, mergedTools]);
|
||||
|
||||
// Merge commands from local state, plugins, and MCP
|
||||
const commandsWithPlugins = useMergedCommands(localCommands, pluginCommands as Command[]);
|
||||
const commandsWithPlugins = useMergedCommands(localCommands, plugins.commands as Command[]);
|
||||
const mergedCommands = useMergedCommands(commandsWithPlugins, mcp.commands as Command[]);
|
||||
// Keep plugin commands out of render-time command props. Feeding the full
|
||||
// execution set into PromptInput/Messages reintroduced the startup repaint
|
||||
// freeze, while transcript rendering still round-trips plugin skills via the
|
||||
// SkillTool's `skill` payload without needing plugin command objects here.
|
||||
const renderMergedCommands = useMergedCommands(localCommands, mcp.commands as Command[]);
|
||||
// Filter out all commands if disableSlashCommands is true
|
||||
const commands = useMemo(() => disableSlashCommands ? [] : mergedCommands, [disableSlashCommands, mergedCommands]);
|
||||
const renderCommands = useMemo(() => disableSlashCommands ? [] : renderMergedCommands, [disableSlashCommands, renderMergedCommands]);
|
||||
useIdeLogging(isRemoteSession ? EMPTY_MCP_CLIENTS : mcp.clients);
|
||||
useIdeSelection(isRemoteSession ? EMPTY_MCP_CLIENTS : mcp.clients, setIDESelection);
|
||||
const [streamMode, setStreamMode] = useState<SpinnerMode>('responding');
|
||||
@@ -4432,7 +4427,7 @@ export function REPL({
|
||||
// and transcript-mode are mutually exclusive (this early return), so
|
||||
// only one ScrollBox is ever mounted at a time.
|
||||
const transcriptScrollRef = isFullscreenEnvEnabled() && !disableVirtualScroll && !dumpMode ? scrollRef : undefined;
|
||||
const transcriptMessagesElement = <Messages messages={transcriptMessages} tools={tools} commands={renderCommands} verbose={true} toolJSX={null} toolUseConfirmQueue={[]} inProgressToolUseIDs={inProgressToolUseIDs} isMessageSelectorVisible={false} conversationId={conversationId} screen={screen} agentDefinitions={agentDefinitions} streamingToolUses={transcriptStreamingToolUses} showAllInTranscript={showAllInTranscript} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} hidePastThinking={true} streamingThinking={streamingThinking} scrollRef={transcriptScrollRef} jumpRef={jumpRef} onSearchMatchesChange={onSearchMatchesChange} scanElement={scanElement} setPositions={setPositions} disableRenderCap={dumpMode} />;
|
||||
const transcriptMessagesElement = <Messages messages={transcriptMessages} tools={tools} commands={commands} verbose={true} toolJSX={null} toolUseConfirmQueue={[]} inProgressToolUseIDs={inProgressToolUseIDs} isMessageSelectorVisible={false} conversationId={conversationId} screen={screen} agentDefinitions={agentDefinitions} streamingToolUses={transcriptStreamingToolUses} showAllInTranscript={showAllInTranscript} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} hidePastThinking={true} streamingThinking={streamingThinking} scrollRef={transcriptScrollRef} jumpRef={jumpRef} onSearchMatchesChange={onSearchMatchesChange} scanElement={scanElement} setPositions={setPositions} disableRenderCap={dumpMode} />;
|
||||
const transcriptToolJSX = toolJSX && <Box flexDirection="column" width="100%">
|
||||
{toolJSX.jsx}
|
||||
</Box>;
|
||||
@@ -4600,7 +4595,7 @@ export function REPL({
|
||||
jumpToNew(scrollRef.current);
|
||||
}} scrollable={<>
|
||||
<TeammateViewHeader />
|
||||
<Messages messages={displayedMessages} tools={tools} commands={renderCommands} verbose={verbose} toolJSX={toolJSX} toolUseConfirmQueue={toolUseConfirmQueue} inProgressToolUseIDs={viewedTeammateTask ? viewedTeammateTask.inProgressToolUseIDs ?? new Set() : inProgressToolUseIDs} isMessageSelectorVisible={isMessageSelectorVisible} conversationId={conversationId} screen={screen} streamingToolUses={streamingToolUses} showAllInTranscript={showAllInTranscript} agentDefinitions={agentDefinitions} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} streamingText={isLoading && !viewedAgentTask ? visibleStreamingText : null} isBriefOnly={viewedAgentTask ? false : isBriefOnly} unseenDivider={viewedAgentTask ? undefined : unseenDivider} scrollRef={isFullscreenEnvEnabled() ? scrollRef : undefined} trackStickyPrompt={isFullscreenEnvEnabled() ? true : undefined} cursor={cursor} setCursor={setCursor} cursorNavRef={cursorNavRef} />
|
||||
<Messages messages={displayedMessages} tools={tools} commands={commands} verbose={verbose} toolJSX={toolJSX} toolUseConfirmQueue={toolUseConfirmQueue} inProgressToolUseIDs={viewedTeammateTask ? viewedTeammateTask.inProgressToolUseIDs ?? new Set() : inProgressToolUseIDs} isMessageSelectorVisible={isMessageSelectorVisible} conversationId={conversationId} screen={screen} streamingToolUses={streamingToolUses} showAllInTranscript={showAllInTranscript} agentDefinitions={agentDefinitions} onOpenRateLimitOptions={handleOpenRateLimitOptions} isLoading={isLoading} streamingText={isLoading && !viewedAgentTask ? visibleStreamingText : null} isBriefOnly={viewedAgentTask ? false : isBriefOnly} unseenDivider={viewedAgentTask ? undefined : unseenDivider} scrollRef={isFullscreenEnvEnabled() ? scrollRef : undefined} trackStickyPrompt={isFullscreenEnvEnabled() ? true : undefined} cursor={cursor} setCursor={setCursor} cursorNavRef={cursorNavRef} />
|
||||
<AwsAuthStatusBox />
|
||||
{/* Hide the processing placeholder while a modal is showing —
|
||||
it would sit at the last visible transcript row right above
|
||||
@@ -4933,7 +4928,7 @@ export function REPL({
|
||||
{"external" === 'ant' && skillImprovementSurvey.suggestion && <SkillImprovementSurvey isOpen={skillImprovementSurvey.isOpen} skillName={skillImprovementSurvey.suggestion.skillName} updates={skillImprovementSurvey.suggestion.updates} handleSelect={skillImprovementSurvey.handleSelect} inputValue={inputValue} setInputValue={setInputValue} />}
|
||||
{showIssueFlagBanner && <IssueFlagBanner />}
|
||||
{ }
|
||||
<PromptInput debug={debug} ideSelection={ideSelection} hasSuppressedDialogs={!!hasSuppressedDialogs} isLocalJSXCommandActive={isShowingLocalJSXCommand} getToolUseContext={getToolUseContext} toolPermissionContext={toolPermissionContext} setToolPermissionContext={setToolPermissionContext} apiKeyStatus={apiKeyStatus} commands={renderCommands} agents={agentDefinitions.activeAgents} isLoading={isLoading} onExit={handleExit} verbose={verbose} messages={messages} onAutoUpdaterResult={setAutoUpdaterResult} autoUpdaterResult={autoUpdaterResult} input={inputValue} onInputChange={setInputValue} mode={inputMode} onModeChange={setInputMode} stashedPrompt={stashedPrompt} setStashedPrompt={setStashedPrompt} submitCount={submitCount} onShowMessageSelector={handleShowMessageSelector} onMessageActionsEnter={
|
||||
<PromptInput debug={debug} ideSelection={ideSelection} hasSuppressedDialogs={!!hasSuppressedDialogs} isLocalJSXCommandActive={isShowingLocalJSXCommand} getToolUseContext={getToolUseContext} toolPermissionContext={toolPermissionContext} setToolPermissionContext={setToolPermissionContext} apiKeyStatus={apiKeyStatus} commands={commands} agents={agentDefinitions.activeAgents} isLoading={isLoading} onExit={handleExit} verbose={verbose} messages={messages} onAutoUpdaterResult={setAutoUpdaterResult} autoUpdaterResult={autoUpdaterResult} input={inputValue} onInputChange={setInputValue} mode={inputMode} onModeChange={setInputMode} stashedPrompt={stashedPrompt} setStashedPrompt={setStashedPrompt} submitCount={submitCount} onShowMessageSelector={handleShowMessageSelector} onMessageActionsEnter={
|
||||
// Works during isLoading — edit cancels first; uuid selection survives appends.
|
||||
feature('MESSAGE_ACTIONS') && isFullscreenEnvEnabled() && !disableMessageActions ? enterMessageActions : undefined} mcpClients={mcpClients} pastedContents={pastedContents} setPastedContents={setPastedContents} vimMode={vimMode} setVimMode={setVimMode} showBashesDialog={showBashesDialog} setShowBashesDialog={setShowBashesDialog} onSubmit={onSubmit} onAgentSubmit={onAgentSubmit} isSearchingHistory={isSearchingHistory} setIsSearchingHistory={setIsSearchingHistory} helpOpen={isHelpOpen} setHelpOpen={setIsHelpOpen} insertTextRef={feature('VOICE_MODE') ? insertTextRef : undefined} voiceInterimRange={voice.interimRange} />
|
||||
<SessionBackgroundHint onBackgroundSession={handleBackgroundSession} isLoading={isLoading} />
|
||||
|
||||
@@ -14,7 +14,6 @@ type ShimClient = {
|
||||
const originalFetch = globalThis.fetch
|
||||
const originalMacro = (globalThis as Record<string, unknown>).MACRO
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
GEMINI_API_KEY: process.env.GEMINI_API_KEY,
|
||||
GEMINI_MODEL: process.env.GEMINI_MODEL,
|
||||
@@ -26,15 +25,6 @@ const originalEnv = {
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
|
||||
ANTHROPIC_AUTH_TOKEN: process.env.ANTHROPIC_AUTH_TOKEN,
|
||||
ANTHROPIC_CUSTOM_HEADERS: process.env.ANTHROPIC_CUSTOM_HEADERS,
|
||||
}
|
||||
|
||||
function restoreEnv(key: string, value: string | undefined): void {
|
||||
if (value === undefined) {
|
||||
delete process.env[key]
|
||||
} else {
|
||||
process.env[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -45,31 +35,27 @@ beforeEach(() => {
|
||||
process.env.GEMINI_BASE_URL = 'https://gemini.example/v1beta/openai'
|
||||
process.env.GEMINI_AUTH_MODE = 'api-key'
|
||||
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.GOOGLE_API_KEY
|
||||
delete process.env.OPENAI_API_KEY
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.ANTHROPIC_API_KEY
|
||||
delete process.env.ANTHROPIC_AUTH_TOKEN
|
||||
delete process.env.ANTHROPIC_CUSTOM_HEADERS
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
;(globalThis as Record<string, unknown>).MACRO = originalMacro
|
||||
restoreEnv('CLAUDE_CODE_USE_OPENAI', originalEnv.CLAUDE_CODE_USE_OPENAI)
|
||||
restoreEnv('CLAUDE_CODE_USE_GEMINI', originalEnv.CLAUDE_CODE_USE_GEMINI)
|
||||
restoreEnv('GEMINI_API_KEY', originalEnv.GEMINI_API_KEY)
|
||||
restoreEnv('GEMINI_MODEL', originalEnv.GEMINI_MODEL)
|
||||
restoreEnv('GEMINI_BASE_URL', originalEnv.GEMINI_BASE_URL)
|
||||
restoreEnv('GEMINI_AUTH_MODE', originalEnv.GEMINI_AUTH_MODE)
|
||||
restoreEnv('GOOGLE_API_KEY', originalEnv.GOOGLE_API_KEY)
|
||||
restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY)
|
||||
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
|
||||
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
|
||||
restoreEnv('ANTHROPIC_API_KEY', originalEnv.ANTHROPIC_API_KEY)
|
||||
restoreEnv('ANTHROPIC_AUTH_TOKEN', originalEnv.ANTHROPIC_AUTH_TOKEN)
|
||||
restoreEnv('ANTHROPIC_CUSTOM_HEADERS', originalEnv.ANTHROPIC_CUSTOM_HEADERS)
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.GEMINI_API_KEY = originalEnv.GEMINI_API_KEY
|
||||
process.env.GEMINI_MODEL = originalEnv.GEMINI_MODEL
|
||||
process.env.GEMINI_BASE_URL = originalEnv.GEMINI_BASE_URL
|
||||
process.env.GEMINI_AUTH_MODE = originalEnv.GEMINI_AUTH_MODE
|
||||
process.env.GOOGLE_API_KEY = originalEnv.GOOGLE_API_KEY
|
||||
process.env.OPENAI_API_KEY = originalEnv.OPENAI_API_KEY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
process.env.ANTHROPIC_API_KEY = originalEnv.ANTHROPIC_API_KEY
|
||||
process.env.ANTHROPIC_AUTH_TOKEN = originalEnv.ANTHROPIC_AUTH_TOKEN
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
@@ -136,135 +122,3 @@ test('routes Gemini provider requests through the OpenAI-compatible shim', async
|
||||
model: 'gemini-2.0-flash',
|
||||
})
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific custom headers before sending OpenAI-compatible shim requests', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_API_KEY = 'openai-test-key'
|
||||
process.env.OPENAI_BASE_URL = 'http://example.test/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
||||
process.env.ANTHROPIC_CUSTOM_HEADERS = [
|
||||
'anthropic-version: 2023-06-01',
|
||||
'anthropic-beta: prompt-caching-2024-07-31',
|
||||
'x-anthropic-additional-protection: true',
|
||||
'x-claude-remote-session-id: remote-123',
|
||||
'x-app: cli',
|
||||
'x-safe-header: keep-me',
|
||||
].join('\n')
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-openai',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = (await getAnthropicClient({
|
||||
maxRetries: 0,
|
||||
model: 'gpt-4o',
|
||||
})) as unknown as ShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'gpt-4o',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-app')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer openai-test-key')
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific custom headers on providerOverride shim requests too', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.ANTHROPIC_CUSTOM_HEADERS = [
|
||||
'anthropic-version: 2023-06-01',
|
||||
'anthropic-beta: prompt-caching-2024-07-31',
|
||||
'x-claude-remote-session-id: remote-123',
|
||||
'x-safe-header: keep-me',
|
||||
].join('\n')
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-provider-override',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = (await getAnthropicClient({
|
||||
maxRetries: 0,
|
||||
providerOverride: {
|
||||
model: 'gpt-4o',
|
||||
baseURL: 'http://example.test/v1',
|
||||
apiKey: 'provider-test-key',
|
||||
},
|
||||
})) as unknown as ShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'unused',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer provider-test-key')
|
||||
})
|
||||
|
||||
@@ -177,8 +177,7 @@ export async function getAnthropicClient({
|
||||
if (
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
) {
|
||||
const { createOpenAIShimClient } = await import('./openaiShim.js')
|
||||
return createOpenAIShimClient({
|
||||
|
||||
@@ -465,37 +465,6 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('strips leaked reasoning preamble from completed Codex text responses', () => {
|
||||
const message = convertCodexResponseToAnthropicMessage(
|
||||
{
|
||||
id: 'resp_1',
|
||||
model: 'gpt-5.4',
|
||||
output: [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'output_text',
|
||||
text:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
usage: { input_tokens: 12, output_tokens: 4 },
|
||||
},
|
||||
'gpt-5.4',
|
||||
)
|
||||
|
||||
expect(message.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'Hey! How can I help you today?',
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('translates Codex SSE text stream into Anthropic events', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
@@ -526,44 +495,4 @@ describe('Codex request translation', () => {
|
||||
'message_stop',
|
||||
])
|
||||
})
|
||||
|
||||
test('strips leaked reasoning preamble from Codex SSE text stream', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
|
||||
'',
|
||||
'event: response.content_part.added',
|
||||
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
|
||||
'',
|
||||
'event: response.output_text.delta',
|
||||
'data: {"type":"response.output_text.delta","content_index":0,"delta":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?","item_id":"msg_1","output_index":0,"sequence_number":2}',
|
||||
'',
|
||||
'event: response.output_item.done',
|
||||
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}],"role":"assistant"},"output_index":0,"sequence_number":3}',
|
||||
'',
|
||||
'event: response.completed',
|
||||
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
|
||||
'',
|
||||
].join('\n')
|
||||
|
||||
const stream = new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(responseText))
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of codexStreamToAnthropic(
|
||||
new Response(stream),
|
||||
'gpt-5.4',
|
||||
)) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -4,11 +4,6 @@ import type {
|
||||
ResolvedProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
import { sanitizeSchemaForOpenAICompat } from './openaiSchemaSanitizer.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
|
||||
export interface AnthropicUsage {
|
||||
input_tokens: number
|
||||
@@ -80,17 +75,12 @@ type CodexSseEvent = {
|
||||
function makeUsage(usage?: {
|
||||
input_tokens?: number
|
||||
output_tokens?: number
|
||||
input_tokens_details?: { cached_tokens?: number }
|
||||
prompt_tokens_details?: { cached_tokens?: number }
|
||||
}): AnthropicUsage {
|
||||
return {
|
||||
input_tokens: usage?.input_tokens ?? 0,
|
||||
output_tokens: usage?.output_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens:
|
||||
usage?.input_tokens_details?.cached_tokens ??
|
||||
usage?.prompt_tokens_details?.cached_tokens ??
|
||||
0,
|
||||
cache_read_input_tokens: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -688,34 +678,17 @@ export async function* codexStreamToAnthropic(
|
||||
{ index: number; toolUseId: string }
|
||||
>()
|
||||
let activeTextBlockIndex: number | null = null
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
let nextContentBlockIndex = 0
|
||||
let sawToolUse = false
|
||||
let finalResponse: Record<string, any> | undefined
|
||||
|
||||
const closeActiveTextBlock = async function* () {
|
||||
if (activeTextBlockIndex === null) return
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: sanitized,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: activeTextBlockIndex,
|
||||
}
|
||||
activeTextBlockIndex = null
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
const startTextBlockIfNeeded = async function* () {
|
||||
@@ -791,36 +764,7 @@ export async function* codexStreamToAnthropic(
|
||||
|
||||
if (event.event === 'response.output_text.delta') {
|
||||
yield* startTextBlockIfNeeded()
|
||||
activeTextBuffer += payload.delta ?? ''
|
||||
if (activeTextBlockIndex !== null) {
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
},
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: activeTextBlockIndex,
|
||||
@@ -895,16 +839,8 @@ export async function* codexStreamToAnthropic(
|
||||
stop_sequence: null,
|
||||
},
|
||||
usage: {
|
||||
// Subtract cached tokens: OpenAI includes them in input_tokens,
|
||||
// but Anthropic convention treats input_tokens as non-cached only.
|
||||
input_tokens: (finalResponse?.usage?.input_tokens ?? 0) -
|
||||
(finalResponse?.usage?.input_tokens_details?.cached_tokens ??
|
||||
finalResponse?.usage?.prompt_tokens_details?.cached_tokens ?? 0),
|
||||
input_tokens: finalResponse?.usage?.input_tokens ?? 0,
|
||||
output_tokens: finalResponse?.usage?.output_tokens ?? 0,
|
||||
cache_read_input_tokens:
|
||||
finalResponse?.usage?.input_tokens_details?.cached_tokens ??
|
||||
finalResponse?.usage?.prompt_tokens_details?.cached_tokens ??
|
||||
0,
|
||||
},
|
||||
}
|
||||
yield { type: 'message_stop' }
|
||||
@@ -923,7 +859,7 @@ export function convertCodexResponseToAnthropicMessage(
|
||||
if (part?.type === 'output_text') {
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(part.text ?? ''),
|
||||
text: part.text ?? '',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,10 +7,6 @@ const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
GEMINI_API_KEY: process.env.GEMINI_API_KEY,
|
||||
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
|
||||
@@ -19,7 +15,6 @@ const originalEnv = {
|
||||
GEMINI_BASE_URL: process.env.GEMINI_BASE_URL,
|
||||
GEMINI_MODEL: process.env.GEMINI_MODEL,
|
||||
GOOGLE_CLOUD_PROJECT: process.env.GOOGLE_CLOUD_PROJECT,
|
||||
ANTHROPIC_CUSTOM_HEADERS: process.env.ANTHROPIC_CUSTOM_HEADERS,
|
||||
}
|
||||
|
||||
const originalFetch = globalThis.fetch
|
||||
@@ -75,10 +70,6 @@ beforeEach(() => {
|
||||
process.env.OPENAI_BASE_URL = 'http://example.test/v1'
|
||||
process.env.OPENAI_API_KEY = 'test-key'
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.GEMINI_API_KEY
|
||||
delete process.env.GOOGLE_API_KEY
|
||||
@@ -87,17 +78,12 @@ beforeEach(() => {
|
||||
delete process.env.GEMINI_BASE_URL
|
||||
delete process.env.GEMINI_MODEL
|
||||
delete process.env.GOOGLE_CLOUD_PROJECT
|
||||
delete process.env.ANTHROPIC_CUSTOM_HEADERS
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
|
||||
restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY)
|
||||
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
|
||||
restoreEnv('CLAUDE_CODE_USE_GITHUB', originalEnv.CLAUDE_CODE_USE_GITHUB)
|
||||
restoreEnv('GITHUB_TOKEN', originalEnv.GITHUB_TOKEN)
|
||||
restoreEnv('GH_TOKEN', originalEnv.GH_TOKEN)
|
||||
restoreEnv('CLAUDE_CODE_USE_OPENAI', originalEnv.CLAUDE_CODE_USE_OPENAI)
|
||||
restoreEnv('CLAUDE_CODE_USE_GEMINI', originalEnv.CLAUDE_CODE_USE_GEMINI)
|
||||
restoreEnv('GEMINI_API_KEY', originalEnv.GEMINI_API_KEY)
|
||||
restoreEnv('GOOGLE_API_KEY', originalEnv.GOOGLE_API_KEY)
|
||||
@@ -106,227 +92,9 @@ afterEach(() => {
|
||||
restoreEnv('GEMINI_BASE_URL', originalEnv.GEMINI_BASE_URL)
|
||||
restoreEnv('GEMINI_MODEL', originalEnv.GEMINI_MODEL)
|
||||
restoreEnv('GOOGLE_CLOUD_PROJECT', originalEnv.GOOGLE_CLOUD_PROJECT)
|
||||
restoreEnv('ANTHROPIC_CUSTOM_HEADERS', originalEnv.ANTHROPIC_CUSTOM_HEADERS)
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
test('strips canonical Anthropic headers from direct shim defaultHeaders', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({
|
||||
defaultHeaders: {
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
'x-anthropic-additional-protection': 'true',
|
||||
'x-claude-remote-session-id': 'remote-123',
|
||||
'x-app': 'cli',
|
||||
'x-client-app': 'sdk',
|
||||
'x-safe-header': 'keep-me',
|
||||
},
|
||||
}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'gpt-4o',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-app')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-client-app')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
})
|
||||
|
||||
test('strips canonical Anthropic headers from per-request shim headers too', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'ok',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 3,
|
||||
total_tokens: 11,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create(
|
||||
{
|
||||
model: 'gpt-4o',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
'x-safe-header': 'keep-me',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific headers on GitHub Codex transport requests', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_API_KEY = 'github-test-key'
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response('', {
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
},
|
||||
})
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create(
|
||||
{
|
||||
model: 'github:gpt-5-codex',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
'x-anthropic-additional-protection': 'true',
|
||||
'x-safe-header': 'keep-me',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('anthropic-beta')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-anthropic-additional-protection')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer github-test-key')
|
||||
expect(capturedHeaders?.get('editor-plugin-version')).toBe('copilot-chat/0.26.7')
|
||||
})
|
||||
|
||||
test('strips Anthropic-specific headers on GitHub Codex transport with providerOverride API key', async () => {
|
||||
let capturedHeaders: Headers | undefined
|
||||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_API_KEY = 'env-should-not-win'
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
capturedHeaders = new Headers(init?.headers)
|
||||
|
||||
return new Response('', {
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
},
|
||||
})
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({
|
||||
providerOverride: {
|
||||
model: 'github:gpt-5-codex',
|
||||
baseURL: 'https://api.githubcopilot.com',
|
||||
apiKey: 'provider-override-key',
|
||||
},
|
||||
}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create(
|
||||
{
|
||||
model: 'ignored',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
'anthropic-version': '2023-06-01',
|
||||
'x-claude-remote-session-id': 'remote-123',
|
||||
'x-safe-header': 'keep-me',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
expect(capturedHeaders?.get('anthropic-version')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-claude-remote-session-id')).toBeNull()
|
||||
expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me')
|
||||
expect(capturedHeaders?.get('authorization')).toBe('Bearer provider-override-key')
|
||||
expect(capturedHeaders?.get('editor-plugin-version')).toBe('copilot-chat/0.26.7')
|
||||
})
|
||||
|
||||
test('preserves usage from final OpenAI stream chunk with empty choices', async () => {
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
const url = typeof _input === 'string' ? _input : _input.url
|
||||
@@ -2178,7 +1946,7 @@ test('coalesces consecutive assistant messages preserving tool_calls (issue #202
|
||||
expect(assistantMsgs?.[0]?.tool_calls?.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
test('non-streaming: reasoning_content emitted as thinking block only when content is null', async () => {
|
||||
test('non-streaming: reasoning_content emitted as thinking block, used as text when content is null', async () => {
|
||||
globalThis.fetch = (async (_input, _init) => {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
@@ -2220,6 +1988,7 @@ test('non-streaming: reasoning_content emitted as thinking block only when conte
|
||||
|
||||
expect(result.content).toEqual([
|
||||
{ type: 'thinking', thinking: 'Let me think about this step by step.' },
|
||||
{ type: 'text', text: 'Let me think about this step by step.' },
|
||||
])
|
||||
})
|
||||
|
||||
@@ -2265,6 +2034,7 @@ test('non-streaming: empty string content does not fall through to reasoning_con
|
||||
|
||||
expect(result.content).toEqual([
|
||||
{ type: 'thinking', thinking: 'Chain of thought here.' },
|
||||
{ type: 'text', text: 'Chain of thought here.' },
|
||||
])
|
||||
})
|
||||
|
||||
@@ -2314,46 +2084,6 @@ test('non-streaming: real content takes precedence over reasoning_content', asyn
|
||||
])
|
||||
})
|
||||
|
||||
test('non-streaming: strips leaked reasoning preamble from assistant content', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 10,
|
||||
completion_tokens: 20,
|
||||
total_tokens: 30,
|
||||
},
|
||||
}),
|
||||
{ headers: { 'Content-Type': 'application/json' } },
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
const result = (await client.beta.messages.create({
|
||||
model: 'gpt-5-mini',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hey' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})) as { content: Array<Record<string, unknown>> }
|
||||
|
||||
expect(result.content).toEqual([
|
||||
{ type: 'text', text: 'Hey! How can I help you today?' },
|
||||
])
|
||||
})
|
||||
|
||||
test('streaming: thinking block closed before tool call', async () => {
|
||||
globalThis.fetch = (async (_input, _init) => {
|
||||
const chunks = makeStreamChunks([
|
||||
@@ -2445,134 +2175,3 @@ test('streaming: thinking block closed before tool call', async () => {
|
||||
}
|
||||
expect(thinkingStart?.content_block?.type).toBe('thinking')
|
||||
})
|
||||
|
||||
test('streaming: strips leaked reasoning preamble from assistant content deltas', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
const chunks = makeStreamChunks([
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
role: 'assistant',
|
||||
content:
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
return makeSseResponse(chunks)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
const result = await client.beta.messages
|
||||
.create({
|
||||
model: 'gpt-5-mini',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hey' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
})
|
||||
.withResponse()
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of result.data) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
})
|
||||
|
||||
test('streaming: strips leaked reasoning preamble when split across multiple content chunks', async () => {
|
||||
globalThis.fetch = (async () => {
|
||||
const chunks = makeStreamChunks([
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
role: 'assistant',
|
||||
content: 'The user said "hey" - this is a simple greeting. ',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
content:
|
||||
'I should respond in a friendly, concise way.\n\nHey! How can I help you today?',
|
||||
},
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'chatcmpl-1',
|
||||
object: 'chat.completion.chunk',
|
||||
model: 'gpt-5-mini',
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: {},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
return makeSseResponse(chunks)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
const result = await client.beta.messages
|
||||
.create({
|
||||
model: 'gpt-5-mini',
|
||||
system: 'test system',
|
||||
messages: [{ role: 'user', content: 'hey' }],
|
||||
max_tokens: 64,
|
||||
stream: true,
|
||||
})
|
||||
.withResponse()
|
||||
|
||||
const textDeltas: string[] = []
|
||||
for await (const event of result.data) {
|
||||
const delta = (event as { delta?: { type?: string; text?: string } }).delta
|
||||
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
|
||||
textDeltas.push(delta.text)
|
||||
}
|
||||
}
|
||||
|
||||
expect(textDeltas).toEqual(['Hey! How can I help you today?'])
|
||||
})
|
||||
|
||||
@@ -26,11 +26,6 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { resolveGeminiCredential } from '../../utils/geminiAuth.js'
|
||||
import { hydrateGeminiAccessTokenFromSecureStorage } from '../../utils/geminiCredentials.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.js'
|
||||
import {
|
||||
codexStreamToAnthropic,
|
||||
collectCodexCompletedResponse,
|
||||
@@ -61,7 +56,6 @@ type SecretValueSource = Partial<{
|
||||
GEMINI_API_KEY: string
|
||||
GOOGLE_API_KEY: string
|
||||
GEMINI_ACCESS_TOKEN: string
|
||||
MISTRAL_API_KEY: string
|
||||
}>
|
||||
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
@@ -81,36 +75,6 @@ function isGithubModelsMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
}
|
||||
|
||||
function isMistralMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
}
|
||||
|
||||
function filterAnthropicHeaders(
|
||||
headers: Record<string, string> | undefined,
|
||||
): Record<string, string> {
|
||||
if (!headers) return {}
|
||||
|
||||
const filtered: Record<string, string> = {}
|
||||
for (const [key, value] of Object.entries(headers)) {
|
||||
const lower = key.toLowerCase()
|
||||
if (
|
||||
lower.startsWith('x-anthropic') ||
|
||||
lower.startsWith('anthropic-') ||
|
||||
lower.startsWith('x-claude') ||
|
||||
lower === 'x-app' ||
|
||||
lower === 'x-client-app' ||
|
||||
lower === 'authorization' ||
|
||||
lower === 'x-api-key' ||
|
||||
lower === 'api-key'
|
||||
) {
|
||||
continue
|
||||
}
|
||||
filtered[key] = value
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
function hasGeminiApiHost(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
|
||||
@@ -569,14 +533,11 @@ function convertChunkUsage(
|
||||
): Partial<AnthropicUsage> | undefined {
|
||||
if (!usage) return undefined
|
||||
|
||||
const cached = usage.prompt_tokens_details?.cached_tokens ?? 0
|
||||
return {
|
||||
// Subtract cached tokens: OpenAI includes them in prompt_tokens,
|
||||
// but Anthropic convention treats input_tokens as non-cached only.
|
||||
input_tokens: (usage.prompt_tokens ?? 0) - cached,
|
||||
input_tokens: usage.prompt_tokens ?? 0,
|
||||
output_tokens: usage.completion_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: cached,
|
||||
cache_read_input_tokens: usage.prompt_tokens_details?.cached_tokens ?? 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -627,8 +588,6 @@ async function* openaiStreamToAnthropic(
|
||||
let hasEmittedContentStart = false
|
||||
let hasEmittedThinkingStart = false
|
||||
let hasClosedThinking = false
|
||||
let activeTextBuffer = ''
|
||||
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
|
||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||
let hasEmittedFinalUsage = false
|
||||
let hasProcessedFinishReason = false
|
||||
@@ -659,30 +618,6 @@ async function* openaiStreamToAnthropic(
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
const closeActiveContentBlock = async function* () {
|
||||
if (!hasEmittedContentStart) return
|
||||
|
||||
if (textBufferMode !== 'none') {
|
||||
const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
|
||||
if (sanitized) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'text_delta', text: sanitized },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
activeTextBuffer = ''
|
||||
textBufferMode = 'none'
|
||||
}
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
@@ -737,7 +672,6 @@ async function* openaiStreamToAnthropic(
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
activeTextBuffer += delta.content
|
||||
if (!hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
@@ -746,35 +680,6 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
hasEmittedContentStart = true
|
||||
}
|
||||
|
||||
if (
|
||||
textBufferMode === 'strip' ||
|
||||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
|
||||
) {
|
||||
textBufferMode = 'strip'
|
||||
continue
|
||||
}
|
||||
|
||||
if (textBufferMode === 'pending') {
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: activeTextBuffer,
|
||||
},
|
||||
}
|
||||
textBufferMode = 'none'
|
||||
continue
|
||||
}
|
||||
|
||||
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
|
||||
textBufferMode = 'pending'
|
||||
continue
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
@@ -793,7 +698,12 @@ async function* openaiStreamToAnthropic(
|
||||
hasClosedThinking = true
|
||||
}
|
||||
if (hasEmittedContentStart) {
|
||||
yield* closeActiveContentBlock()
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
contentBlockIndex++
|
||||
hasEmittedContentStart = false
|
||||
}
|
||||
|
||||
const toolBlockIndex = contentBlockIndex
|
||||
@@ -876,7 +786,10 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
// Close any open content blocks
|
||||
if (hasEmittedContentStart) {
|
||||
yield* closeActiveContentBlock()
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
index: contentBlockIndex,
|
||||
}
|
||||
}
|
||||
// Close active tool calls
|
||||
for (const [, tc] of activeToolCalls) {
|
||||
@@ -1023,7 +936,7 @@ class OpenAIShimMessages {
|
||||
private providerOverride?: { model: string; baseURL: string; apiKey: string }
|
||||
|
||||
constructor(defaultHeaders: Record<string, string>, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh', providerOverride?: { model: string; baseURL: string; apiKey: string }) {
|
||||
this.defaultHeaders = filterAnthropicHeaders(defaultHeaders)
|
||||
this.defaultHeaders = defaultHeaders
|
||||
this.reasoningEffort = reasoningEffort
|
||||
this.providerOverride = providerOverride
|
||||
}
|
||||
@@ -1133,7 +1046,7 @@ class OpenAIShimMessages {
|
||||
params,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders,
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
...(options?.headers ?? {}),
|
||||
...COPILOT_HEADERS,
|
||||
},
|
||||
signal: options?.signal,
|
||||
@@ -1165,7 +1078,7 @@ class OpenAIShimMessages {
|
||||
params,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders,
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
...(options?.headers ?? {}),
|
||||
},
|
||||
signal: options?.signal,
|
||||
})
|
||||
@@ -1192,7 +1105,6 @@ class OpenAIShimMessages {
|
||||
model: request.resolvedModel,
|
||||
messages: openaiMessages,
|
||||
stream: params.stream ?? false,
|
||||
store: false,
|
||||
}
|
||||
// Convert max_tokens to max_completion_tokens for OpenAI API compatibility.
|
||||
// Azure OpenAI requires max_completion_tokens and does not accept max_tokens.
|
||||
@@ -1215,22 +1127,15 @@ class OpenAIShimMessages {
|
||||
}
|
||||
|
||||
const isGithub = isGithubModelsMode()
|
||||
const isMistral = isMistralMode()
|
||||
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubCopilot = isGithub && githubEndpointType === 'copilot'
|
||||
const isGithubModels = isGithub && (githubEndpointType === 'models' || githubEndpointType === 'custom')
|
||||
|
||||
if ((isGithub || isMistral) && body.max_completion_tokens !== undefined) {
|
||||
if (isGithub && body.max_completion_tokens !== undefined) {
|
||||
body.max_tokens = body.max_completion_tokens
|
||||
delete body.max_completion_tokens
|
||||
}
|
||||
|
||||
// mistral also doesn't recognize body.store
|
||||
if (isMistral) {
|
||||
delete body.store
|
||||
}
|
||||
|
||||
if (params.temperature !== undefined) body.temperature = params.temperature
|
||||
if (params.top_p !== undefined) body.top_p = params.top_p
|
||||
|
||||
@@ -1265,11 +1170,12 @@ class OpenAIShimMessages {
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
...this.defaultHeaders,
|
||||
...filterAnthropicHeaders(options?.headers),
|
||||
...(options?.headers ?? {}),
|
||||
}
|
||||
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
const isGemini = isGeminiMode()
|
||||
const apiKey =
|
||||
this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
||||
// path segments like https://evil.com/cognitiveservices.azure.com/
|
||||
let isAzure = false
|
||||
@@ -1373,7 +1279,6 @@ class OpenAIShimMessages {
|
||||
}>,
|
||||
),
|
||||
stream: params.stream ?? false,
|
||||
store: false,
|
||||
}
|
||||
|
||||
if (!Array.isArray(responsesBody.input) || responsesBody.input.length === 0) {
|
||||
@@ -1478,9 +1383,9 @@ class OpenAIShimMessages {
|
||||
const choice = data.choices?.[0]
|
||||
const content: Array<Record<string, unknown>> = []
|
||||
|
||||
// Some reasoning models (e.g. GLM-5) put their chain-of-thought in
|
||||
// reasoning_content while content stays null. Preserve it as a thinking
|
||||
// block, but do not surface it as visible assistant text.
|
||||
// Some reasoning models (e.g. GLM-5) put their reply in reasoning_content
|
||||
// while content stays null — emit reasoning as a thinking block, then
|
||||
// fall back to it for visible text if content is empty.
|
||||
const reasoningText = choice?.message?.reasoning_content
|
||||
if (typeof reasoningText === 'string' && reasoningText) {
|
||||
content.push({ type: 'thinking', thinking: reasoningText })
|
||||
@@ -1488,12 +1393,9 @@ class OpenAIShimMessages {
|
||||
const rawContent =
|
||||
choice?.message?.content !== '' && choice?.message?.content != null
|
||||
? choice?.message?.content
|
||||
: null
|
||||
: choice?.message?.reasoning_content
|
||||
if (typeof rawContent === 'string' && rawContent) {
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(rawContent),
|
||||
})
|
||||
content.push({ type: 'text', text: rawContent })
|
||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||
const parts: string[] = []
|
||||
for (const part of rawContent) {
|
||||
@@ -1508,10 +1410,7 @@ class OpenAIShimMessages {
|
||||
}
|
||||
const joined = parts.join('\n')
|
||||
if (joined) {
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: stripLeakedReasoningPreamble(joined),
|
||||
})
|
||||
content.push({ type: 'text', text: joined })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1601,13 +1500,6 @@ export function createOpenAIShimClient(options: {
|
||||
if (process.env.GEMINI_MODEL && !process.env.OPENAI_MODEL) {
|
||||
process.env.OPENAI_MODEL = process.env.GEMINI_MODEL
|
||||
}
|
||||
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
process.env.OPENAI_BASE_URL =
|
||||
process.env.MISTRAL_BASE_URL ?? 'https://api.mistral.ai/v1'
|
||||
process.env.OPENAI_API_KEY = process.env.MISTRAL_API_KEY
|
||||
if (process.env.MISTRAL_MODEL) {
|
||||
process.env.OPENAI_MODEL = process.env.MISTRAL_MODEL
|
||||
}
|
||||
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
process.env.OPENAI_BASE_URL ??= GITHUB_COPILOT_BASE
|
||||
process.env.OPENAI_API_KEY ??=
|
||||
|
||||
@@ -7,7 +7,6 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
||||
export const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1'
|
||||
/** Default GitHub Copilot API model when user selects copilot / github:copilot */
|
||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'gpt-4o'
|
||||
|
||||
@@ -358,20 +357,15 @@ export function resolveProviderRequest(options?: {
|
||||
reasoningEffortOverride?: ReasoningEffort
|
||||
}): ResolvedProviderRequest {
|
||||
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const isMistralMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
const requestedModel =
|
||||
options?.model?.trim() ||
|
||||
(isMistralMode
|
||||
? process.env.MISTRAL_MODEL?.trim()
|
||||
: process.env.OPENAI_MODEL?.trim()) ||
|
||||
process.env.OPENAI_MODEL?.trim() ||
|
||||
options?.fallbackModel?.trim() ||
|
||||
(isGithubMode ? 'github:copilot' : 'gpt-4o')
|
||||
const descriptor = parseModelDescriptor(requestedModel)
|
||||
const rawBaseUrl =
|
||||
asEnvUrl(options?.baseUrl) ??
|
||||
asEnvUrl(
|
||||
isMistralMode ? (process.env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL) : process.env.OPENAI_BASE_URL,
|
||||
) ??
|
||||
asEnvUrl(process.env.OPENAI_BASE_URL) ??
|
||||
asEnvUrl(process.env.OPENAI_API_BASE)
|
||||
|
||||
const githubEndpointType = isGithubMode
|
||||
@@ -424,7 +418,6 @@ export function resolveProviderRequest(options?: {
|
||||
export function getAdditionalModelOptionsCacheScope(): string | null {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_MISTRAL) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) &&
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
looksLikeLeakedReasoningPrefix,
|
||||
shouldBufferPotentialReasoningPrefix,
|
||||
stripLeakedReasoningPreamble,
|
||||
} from './reasoningLeakSanitizer.ts'
|
||||
|
||||
describe('reasoning leak sanitizer', () => {
|
||||
test('strips explicit internal reasoning preambles', () => {
|
||||
const text =
|
||||
'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(true)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(
|
||||
'Hey! How can I help you today?',
|
||||
)
|
||||
})
|
||||
|
||||
test('does not strip normal user-facing advice that mentions "the user should"', () => {
|
||||
const text =
|
||||
'The user should reset their password immediately.\n\nHere are the steps...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
|
||||
test('does not strip legitimate first-person advice about responding to an incident', () => {
|
||||
const text =
|
||||
'I need to respond to this security incident immediately. The system is compromised.\n\nHere are the remediation steps...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
|
||||
test('does not strip legitimate first-person advice about answering a support ticket', () => {
|
||||
const text =
|
||||
'I need to answer the support ticket before end of day. The customer is waiting.\n\nHere is the response I drafted...'
|
||||
|
||||
expect(looksLikeLeakedReasoningPrefix(text)).toBe(false)
|
||||
expect(shouldBufferPotentialReasoningPrefix(text)).toBe(false)
|
||||
expect(stripLeakedReasoningPreamble(text)).toBe(text)
|
||||
})
|
||||
})
|
||||
@@ -1,54 +0,0 @@
|
||||
const EXPLICIT_REASONING_START_RE =
|
||||
/^\s*(i should\b|i need to\b|let me think\b|the task\b|the request\b)/i
|
||||
|
||||
const EXPLICIT_REASONING_META_RE =
|
||||
/\b(user|request|question|prompt|message|task|greeting|small talk|briefly|friendly|concise)\b/i
|
||||
|
||||
const USER_META_START_RE =
|
||||
/^\s*the user\s+(just\s+)?(said|asked|is asking|wants|wanted|mentioned|seems|appears)\b/i
|
||||
|
||||
const USER_REASONING_RE =
|
||||
/^\s*the user\s+(just\s+)?(said|asked|is asking|wants|wanted|mentioned|seems|appears)\b[\s\S]*\b(i should|i need to|let me think|respond|reply|answer|greeting|small talk|briefly|friendly|concise)\b/i
|
||||
|
||||
export function shouldBufferPotentialReasoningPrefix(text: string): boolean {
|
||||
const normalized = text.trim()
|
||||
if (!normalized) return false
|
||||
|
||||
if (looksLikeLeakedReasoningPrefix(normalized)) {
|
||||
return true
|
||||
}
|
||||
|
||||
const hasParagraphBoundary = /\n\s*\n/.test(normalized)
|
||||
if (hasParagraphBoundary) {
|
||||
return false
|
||||
}
|
||||
|
||||
return (
|
||||
EXPLICIT_REASONING_START_RE.test(normalized) ||
|
||||
USER_META_START_RE.test(normalized)
|
||||
)
|
||||
}
|
||||
|
||||
export function looksLikeLeakedReasoningPrefix(text: string): boolean {
|
||||
const normalized = text.trim()
|
||||
if (!normalized) return false
|
||||
return (
|
||||
(EXPLICIT_REASONING_START_RE.test(normalized) &&
|
||||
EXPLICIT_REASONING_META_RE.test(normalized)) ||
|
||||
USER_REASONING_RE.test(normalized)
|
||||
)
|
||||
}
|
||||
|
||||
export function stripLeakedReasoningPreamble(text: string): string {
|
||||
const normalized = text.replace(/\r\n/g, '\n')
|
||||
const parts = normalized.split(/\n\s*\n/)
|
||||
if (parts.length < 2) return text
|
||||
|
||||
const first = parts[0]?.trim() ?? ''
|
||||
if (!looksLikeLeakedReasoningPrefix(first)) {
|
||||
return text
|
||||
}
|
||||
|
||||
const remainder = parts.slice(1).join('\n\n').trim()
|
||||
return remainder || text
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
getEffectiveContextWindowSize,
|
||||
getAutoCompactThreshold,
|
||||
} from './autoCompact.ts'
|
||||
|
||||
describe('getEffectiveContextWindowSize', () => {
|
||||
test('returns positive value for known models with large context windows', () => {
|
||||
// claude-sonnet-4 has 200k context
|
||||
const effective = getEffectiveContextWindowSize('claude-sonnet-4')
|
||||
expect(effective).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
test('never returns negative even for unknown 3P models (issue #635)', () => {
|
||||
// Previously, unknown 3P models got 8k context → effective context was
|
||||
// 8k minus 20k summary reservation = -12k, causing infinite auto-compact.
|
||||
// Now the fallback is 128k and there's a floor, so effective is always
|
||||
// at least reservedTokensForSummary + buffer.
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
try {
|
||||
const effective = getEffectiveContextWindowSize('some-unknown-3p-model')
|
||||
expect(effective).toBeGreaterThan(0)
|
||||
// Must be at least summary reservation (20k) + buffer (13k) = 33k
|
||||
expect(effective).toBeGreaterThanOrEqual(33_000)
|
||||
} finally {
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAutoCompactThreshold', () => {
|
||||
test('returns positive threshold for known models', () => {
|
||||
const threshold = getAutoCompactThreshold('claude-sonnet-4')
|
||||
expect(threshold).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
test('never returns negative threshold even for unknown 3P models (issue #635)', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
try {
|
||||
const threshold = getAutoCompactThreshold('some-unknown-3p-model')
|
||||
expect(threshold).toBeGreaterThan(0)
|
||||
} finally {
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -45,12 +45,7 @@ export function getEffectiveContextWindowSize(model: string): number {
|
||||
}
|
||||
}
|
||||
|
||||
// Floor: effective context must be at least the summary reservation plus a
|
||||
// usable buffer. If it goes lower, the auto-compact threshold becomes
|
||||
// negative and fires on every message (issue #635).
|
||||
const autocompactBuffer = 13_000 // must match AUTOCOMPACT_BUFFER_TOKENS
|
||||
const effectiveContext = contextWindow - reservedTokensForSummary
|
||||
return Math.max(effectiveContext, reservedTokensForSummary + autocompactBuffer)
|
||||
return contextWindow - reservedTokensForSummary
|
||||
}
|
||||
|
||||
export type AutoCompactTrackingState = {
|
||||
|
||||
@@ -9,10 +9,7 @@ const sessionTranscriptModule = feature('KAIROS')
|
||||
|
||||
import { APIUserAbortError } from '@anthropic-ai/sdk'
|
||||
import { markPostCompaction } from 'src/bootstrap/state.js'
|
||||
import {
|
||||
getInvokedSkillsForAgent,
|
||||
getOriginalCwd,
|
||||
} from '../../bootstrap/state.js'
|
||||
import { getInvokedSkillsForAgent } from '../../bootstrap/state.js'
|
||||
import type { QuerySource } from '../../constants/querySource.js'
|
||||
import type { CanUseToolFn } from '../../hooks/useCanUseTool.js'
|
||||
import type { Tool, ToolUseContext } from '../../Tool.js'
|
||||
@@ -71,7 +68,6 @@ import {
|
||||
} from '../../utils/messages.js'
|
||||
import { expandPath } from '../../utils/path.js'
|
||||
import { getPlan, getPlanFilePath } from '../../utils/plans.js'
|
||||
import { getProjectInstructionFilePaths } from '../../utils/projectInstructions.js'
|
||||
import {
|
||||
isSessionActivityTrackingActive,
|
||||
sendSessionActivitySignal,
|
||||
@@ -1693,13 +1689,8 @@ function shouldExcludeFromPostCompactRestore(
|
||||
// and to also match child directory memory files (.claude/rules/*.md, etc.)
|
||||
try {
|
||||
const normalizedMemoryPaths = new Set(
|
||||
MEMORY_TYPE_VALUES.filter(type => type !== 'Project').map(type =>
|
||||
expandPath(getMemoryPath(type)),
|
||||
),
|
||||
MEMORY_TYPE_VALUES.map(type => expandPath(getMemoryPath(type))),
|
||||
)
|
||||
for (const path of getProjectInstructionFilePaths(getOriginalCwd())) {
|
||||
normalizedMemoryPaths.add(expandPath(path))
|
||||
}
|
||||
|
||||
if (normalizedMemoryPaths.has(normalizedFilename)) {
|
||||
return true
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
import { readdir, readFile, writeFile } from 'fs/promises'
|
||||
import { basename, relative } from 'path'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
|
||||
async function listMarkdownFiles(dir: string): Promise<string[]> {
|
||||
const entries = await readdir(dir, { withFileTypes: true })
|
||||
const files: string[] = []
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = `${dir}/${entry.name}`
|
||||
if (entry.isDirectory()) {
|
||||
files.push(...(await listMarkdownFiles(fullPath)))
|
||||
} else if (entry.isFile() && entry.name.endsWith('.md')) {
|
||||
files.push(fullPath)
|
||||
}
|
||||
}
|
||||
|
||||
return files.sort()
|
||||
}
|
||||
|
||||
async function getPageTitle(path: string): Promise<string> {
|
||||
const content = await readFile(path, 'utf8')
|
||||
const titleLine = content
|
||||
.split('\n')
|
||||
.map(line => line.trim())
|
||||
.find(line => line.startsWith('# '))
|
||||
|
||||
return titleLine ? titleLine.replace(/^#\s+/, '') : basename(path, '.md')
|
||||
}
|
||||
|
||||
export async function rebuildWikiIndex(cwd: string): Promise<void> {
|
||||
const paths = getWikiPaths(cwd)
|
||||
const pageFiles = await listMarkdownFiles(paths.pagesDir)
|
||||
const sourceFiles = await listMarkdownFiles(paths.sourcesDir)
|
||||
|
||||
const pageLinks = await Promise.all(
|
||||
pageFiles.map(async file => {
|
||||
const rel = relative(paths.root, file)
|
||||
const title = await getPageTitle(file)
|
||||
return `- [${title}](./${rel.replace(/\\/g, '/')})`
|
||||
}),
|
||||
)
|
||||
|
||||
const sourceLinks = sourceFiles.map(file => {
|
||||
const rel = relative(paths.root, file).replace(/\\/g, '/')
|
||||
const title = basename(file, '.md')
|
||||
return `- [${title}](./${rel})`
|
||||
})
|
||||
|
||||
const content = `# ${basename(cwd)} Wiki
|
||||
|
||||
This wiki is maintained by OpenClaude as a durable project knowledge layer.
|
||||
|
||||
## Core Pages
|
||||
|
||||
${pageLinks.length > 0 ? pageLinks.join('\n') : '- No pages yet'}
|
||||
|
||||
## Sources
|
||||
|
||||
${sourceLinks.length > 0 ? sourceLinks.join('\n') : '- No sources yet'}
|
||||
|
||||
## Recent Updates
|
||||
|
||||
- See [log.md](./log.md)
|
||||
`
|
||||
|
||||
await writeFile(paths.indexFile, content, 'utf8')
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
import { mkdtemp, readFile, rm, writeFile } from 'fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
import { ingestLocalWikiSource } from './ingest.js'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
tempDirs.splice(0).map(dir => rm(dir, { recursive: true, force: true })),
|
||||
)
|
||||
})
|
||||
|
||||
async function makeProjectDir(): Promise<string> {
|
||||
const dir = await mkdtemp(join(tmpdir(), 'openclaude-wiki-ingest-'))
|
||||
tempDirs.push(dir)
|
||||
return dir
|
||||
}
|
||||
|
||||
test('ingestLocalWikiSource creates a source note and updates log/index', async () => {
|
||||
const cwd = await makeProjectDir()
|
||||
const sourcePath = join(cwd, 'notes.md')
|
||||
await writeFile(
|
||||
sourcePath,
|
||||
'# Design Notes\n\nThis subsystem coordinates provider routing and session state.\nIt should be documented for future contributors.\n',
|
||||
'utf8',
|
||||
)
|
||||
|
||||
const result = await ingestLocalWikiSource(cwd, 'notes.md')
|
||||
const paths = getWikiPaths(cwd)
|
||||
|
||||
expect(result.sourceFile).toBe('notes.md')
|
||||
expect(result.title).toBe('Design Notes')
|
||||
expect(result.sourceNote.startsWith('.openclaude/wiki/sources/')).toBe(true)
|
||||
|
||||
const sourceNote = await readFile(join(cwd, result.sourceNote), 'utf8')
|
||||
expect(sourceNote).toContain('# Design Notes')
|
||||
expect(sourceNote).toContain('Path: `notes.md`')
|
||||
|
||||
const log = await readFile(paths.logFile, 'utf8')
|
||||
expect(log).toContain('Ingested `notes.md`')
|
||||
|
||||
const index = await readFile(paths.indexFile, 'utf8')
|
||||
expect(index).toContain('./sources/')
|
||||
expect(index).toContain(result.sourceNote.replace('.openclaude/wiki/', './'))
|
||||
})
|
||||
@@ -1,93 +0,0 @@
|
||||
import { appendFile, readFile, stat, writeFile } from 'fs/promises'
|
||||
import { basename, extname, isAbsolute, relative, resolve } from 'path'
|
||||
import { initializeWiki } from './init.js'
|
||||
import { rebuildWikiIndex } from './indexBuilder.js'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
import type { WikiIngestResult } from './types.js'
|
||||
import {
|
||||
extractTitleFromText,
|
||||
sanitizeWikiSlug,
|
||||
summarizeText,
|
||||
} from './utils.js'
|
||||
|
||||
function buildSourceNote(params: {
|
||||
title: string
|
||||
sourcePath: string
|
||||
ingestedAt: string
|
||||
summary: string
|
||||
excerpt: string
|
||||
}): string {
|
||||
const { title, sourcePath, ingestedAt, summary, excerpt } = params
|
||||
|
||||
return `# ${title}
|
||||
|
||||
## Source
|
||||
|
||||
- Path: \`${sourcePath}\`
|
||||
- Ingested at: ${ingestedAt}
|
||||
|
||||
## Summary
|
||||
|
||||
${summary}
|
||||
|
||||
## Excerpt
|
||||
|
||||
\`\`\`
|
||||
${excerpt}
|
||||
\`\`\`
|
||||
|
||||
## Linked Pages
|
||||
|
||||
- [Architecture](../pages/architecture.md)
|
||||
`
|
||||
}
|
||||
|
||||
function buildLogEntry(sourcePath: string, title: string, ingestedAt: string): string {
|
||||
return `- ${ingestedAt}: Ingested \`${sourcePath}\` into source note "${title}"`
|
||||
}
|
||||
|
||||
export async function ingestLocalWikiSource(
|
||||
cwd: string,
|
||||
rawPath: string,
|
||||
): Promise<WikiIngestResult> {
|
||||
await initializeWiki(cwd)
|
||||
|
||||
const resolvedPath = isAbsolute(rawPath) ? rawPath : resolve(cwd, rawPath)
|
||||
const fileInfo = await stat(resolvedPath)
|
||||
if (!fileInfo.isFile()) {
|
||||
throw new Error(`Not a file: ${resolvedPath}`)
|
||||
}
|
||||
|
||||
const content = await readFile(resolvedPath, 'utf8')
|
||||
const relSourcePath = relative(cwd, resolvedPath).replace(/\\/g, '/')
|
||||
const ingestedAt = new Date().toISOString()
|
||||
const baseName = basename(resolvedPath, extname(resolvedPath))
|
||||
const title = extractTitleFromText(baseName, content)
|
||||
const summary = summarizeText(content)
|
||||
const excerpt = content.split('\n').slice(0, 20).join('\n').trim()
|
||||
const slug = sanitizeWikiSlug(`${baseName}-${Date.now()}`) || `source-${Date.now()}`
|
||||
|
||||
const paths = getWikiPaths(cwd)
|
||||
const sourceNotePath = `${paths.sourcesDir}/${slug}.md`
|
||||
|
||||
await writeFile(
|
||||
sourceNotePath,
|
||||
buildSourceNote({
|
||||
title,
|
||||
sourcePath: relSourcePath,
|
||||
ingestedAt,
|
||||
summary,
|
||||
excerpt,
|
||||
}),
|
||||
'utf8',
|
||||
)
|
||||
await appendFile(paths.logFile, `${buildLogEntry(relSourcePath, title, ingestedAt)}\n`, 'utf8')
|
||||
await rebuildWikiIndex(cwd)
|
||||
|
||||
return {
|
||||
sourceFile: relSourcePath,
|
||||
sourceNote: relative(cwd, sourceNotePath).replace(/\\/g, '/'),
|
||||
summary,
|
||||
title,
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
import { mkdtemp, readFile, rm } from 'fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
import { initializeWiki } from './init.js'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
tempDirs.splice(0).map(dir => rm(dir, { recursive: true, force: true })),
|
||||
)
|
||||
})
|
||||
|
||||
async function makeProjectDir(): Promise<string> {
|
||||
const dir = await mkdtemp(join(tmpdir(), 'openclaude-wiki-init-'))
|
||||
tempDirs.push(dir)
|
||||
return dir
|
||||
}
|
||||
|
||||
test('initializeWiki creates the expected wiki scaffold', async () => {
|
||||
const cwd = await makeProjectDir()
|
||||
const result = await initializeWiki(cwd)
|
||||
const paths = getWikiPaths(cwd)
|
||||
|
||||
expect(result.alreadyExisted).toBe(false)
|
||||
expect(result.createdFiles).toEqual([
|
||||
'.openclaude/wiki/schema.md',
|
||||
'.openclaude/wiki/index.md',
|
||||
'.openclaude/wiki/log.md',
|
||||
'.openclaude/wiki/pages/architecture.md',
|
||||
])
|
||||
expect(await readFile(paths.schemaFile, 'utf8')).toContain(
|
||||
'# OpenClaude Wiki Schema',
|
||||
)
|
||||
expect(await readFile(paths.indexFile, 'utf8')).toContain('Wiki')
|
||||
expect(await readFile(paths.logFile, 'utf8')).toContain(
|
||||
'Wiki initialized by OpenClaude',
|
||||
)
|
||||
expect(await readFile(join(paths.pagesDir, 'architecture.md'), 'utf8')).toContain(
|
||||
'# Architecture',
|
||||
)
|
||||
})
|
||||
|
||||
test('initializeWiki is idempotent and preserves existing files', async () => {
|
||||
const cwd = await makeProjectDir()
|
||||
|
||||
await initializeWiki(cwd)
|
||||
const second = await initializeWiki(cwd)
|
||||
|
||||
expect(second.alreadyExisted).toBe(true)
|
||||
expect(second.createdFiles).toEqual([])
|
||||
})
|
||||
@@ -1,140 +0,0 @@
|
||||
import { mkdir, writeFile } from 'fs/promises'
|
||||
import { basename, relative } from 'path'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
import type { WikiInitResult } from './types.js'
|
||||
|
||||
function buildSchemaTemplate(projectName: string): string {
|
||||
return `# OpenClaude Wiki Schema
|
||||
|
||||
This wiki stores durable, human-readable project knowledge for ${projectName}.
|
||||
|
||||
## Goals
|
||||
|
||||
- Keep useful project knowledge in markdown, not only in chat history
|
||||
- Prefer synthesized facts over raw copy-paste
|
||||
- Keep source attribution explicit
|
||||
- Make pages easy for both humans and agents to update
|
||||
|
||||
## Structure
|
||||
|
||||
- \`index.md\`: top-level navigation and major topics
|
||||
- \`log.md\`: append-only update log
|
||||
- \`pages/\`: durable topic and architecture pages
|
||||
- \`sources/\`: source ingestion notes and summaries
|
||||
|
||||
## Page Rules
|
||||
|
||||
- Keep pages focused on one topic
|
||||
- Use stable headings such as:
|
||||
- \`## Summary\`
|
||||
- \`## Key Facts\`
|
||||
- \`## Relationships\`
|
||||
- \`## Open Questions\`
|
||||
- \`## Sources\`
|
||||
- Add or update facts only when they are grounded in project files or explicit source notes
|
||||
- Prefer editing an existing page over creating duplicates
|
||||
`
|
||||
}
|
||||
|
||||
function buildIndexTemplate(projectName: string): string {
|
||||
return `# ${projectName} Wiki
|
||||
|
||||
This wiki is maintained by OpenClaude as a durable project knowledge layer.
|
||||
|
||||
## Core Pages
|
||||
|
||||
- [Architecture](./pages/architecture.md)
|
||||
|
||||
## Sources
|
||||
|
||||
- Source notes live in [sources/](./sources/)
|
||||
|
||||
## Recent Updates
|
||||
|
||||
- See [log.md](./log.md)
|
||||
`
|
||||
}
|
||||
|
||||
function buildLogTemplate(timestamp: string): string {
|
||||
return `# Wiki Update Log
|
||||
|
||||
- ${timestamp}: Wiki initialized by OpenClaude
|
||||
`
|
||||
}
|
||||
|
||||
function buildArchitectureTemplate(projectName: string): string {
|
||||
return `# Architecture
|
||||
|
||||
## Summary
|
||||
|
||||
High-level architecture notes for ${projectName}.
|
||||
|
||||
## Key Facts
|
||||
|
||||
- This page is the starting point for durable architecture knowledge.
|
||||
|
||||
## Relationships
|
||||
|
||||
- Link this page to major subsystems as the wiki grows.
|
||||
|
||||
## Open Questions
|
||||
|
||||
- What are the most important runtime subsystems?
|
||||
- Which files best represent the system architecture?
|
||||
|
||||
## Sources
|
||||
|
||||
- Wiki bootstrap
|
||||
`
|
||||
}
|
||||
|
||||
async function ensureFile(
|
||||
filePath: string,
|
||||
content: string,
|
||||
createdFiles: string[],
|
||||
): Promise<void> {
|
||||
try {
|
||||
await writeFile(filePath, content, { encoding: 'utf8', flag: 'wx' })
|
||||
createdFiles.push(filePath)
|
||||
} catch (error: unknown) {
|
||||
if (
|
||||
typeof error === 'object' &&
|
||||
error !== null &&
|
||||
'code' in error &&
|
||||
error.code === 'EEXIST'
|
||||
) {
|
||||
return
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
export async function initializeWiki(cwd: string): Promise<WikiInitResult> {
|
||||
const paths = getWikiPaths(cwd)
|
||||
const createdDirectories: string[] = []
|
||||
const createdFiles: string[] = []
|
||||
|
||||
for (const dir of [paths.root, paths.pagesDir, paths.sourcesDir]) {
|
||||
await mkdir(dir, { recursive: true })
|
||||
createdDirectories.push(dir)
|
||||
}
|
||||
|
||||
const projectName = basename(cwd)
|
||||
const timestamp = new Date().toISOString()
|
||||
|
||||
await ensureFile(paths.schemaFile, buildSchemaTemplate(projectName), createdFiles)
|
||||
await ensureFile(paths.indexFile, buildIndexTemplate(projectName), createdFiles)
|
||||
await ensureFile(paths.logFile, buildLogTemplate(timestamp), createdFiles)
|
||||
await ensureFile(
|
||||
`${paths.pagesDir}/architecture.md`,
|
||||
buildArchitectureTemplate(projectName),
|
||||
createdFiles,
|
||||
)
|
||||
|
||||
return {
|
||||
root: paths.root,
|
||||
createdFiles: createdFiles.map(file => relative(cwd, file)),
|
||||
createdDirectories: createdDirectories.map(dir => relative(cwd, dir)),
|
||||
alreadyExisted: createdFiles.length === 0,
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
import { join } from 'path'
|
||||
import type { WikiPaths } from './types.js'
|
||||
|
||||
export const OPENCLAUDE_DIRNAME = '.openclaude'
|
||||
export const WIKI_DIRNAME = 'wiki'
|
||||
|
||||
export function getWikiPaths(cwd: string): WikiPaths {
|
||||
const root = join(cwd, OPENCLAUDE_DIRNAME, WIKI_DIRNAME)
|
||||
|
||||
return {
|
||||
root,
|
||||
pagesDir: join(root, 'pages'),
|
||||
sourcesDir: join(root, 'sources'),
|
||||
schemaFile: join(root, 'schema.md'),
|
||||
indexFile: join(root, 'index.md'),
|
||||
logFile: join(root, 'log.md'),
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
import { mkdtemp, mkdir, rm, writeFile } from 'fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
import { initializeWiki } from './init.js'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
import { getWikiStatus } from './status.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
tempDirs.splice(0).map(dir => rm(dir, { recursive: true, force: true })),
|
||||
)
|
||||
})
|
||||
|
||||
async function makeProjectDir(): Promise<string> {
|
||||
const dir = await mkdtemp(join(tmpdir(), 'openclaude-wiki-status-'))
|
||||
tempDirs.push(dir)
|
||||
return dir
|
||||
}
|
||||
|
||||
test('getWikiStatus reports uninitialized wiki state', async () => {
|
||||
const cwd = await makeProjectDir()
|
||||
const status = await getWikiStatus(cwd)
|
||||
|
||||
expect(status.initialized).toBe(false)
|
||||
expect(status.pageCount).toBe(0)
|
||||
expect(status.sourceCount).toBe(0)
|
||||
expect(status.lastUpdatedAt).toBeNull()
|
||||
})
|
||||
|
||||
test('getWikiStatus counts pages and sources for initialized wiki', async () => {
|
||||
const cwd = await makeProjectDir()
|
||||
await initializeWiki(cwd)
|
||||
const paths = getWikiPaths(cwd)
|
||||
|
||||
await writeFile(join(paths.pagesDir, 'commands.md'), '# Commands\n', 'utf8')
|
||||
await mkdir(join(paths.sourcesDir, 'external'), { recursive: true })
|
||||
await writeFile(
|
||||
join(paths.sourcesDir, 'external', 'spec.md'),
|
||||
'# Spec\n',
|
||||
'utf8',
|
||||
)
|
||||
|
||||
const status = await getWikiStatus(cwd)
|
||||
|
||||
expect(status.initialized).toBe(true)
|
||||
expect(status.pageCount).toBe(2)
|
||||
expect(status.sourceCount).toBe(1)
|
||||
expect(status.hasSchema).toBe(true)
|
||||
expect(status.hasIndex).toBe(true)
|
||||
expect(status.hasLog).toBe(true)
|
||||
expect(status.lastUpdatedAt).not.toBeNull()
|
||||
})
|
||||
@@ -1,82 +0,0 @@
|
||||
import { readdir, stat } from 'fs/promises'
|
||||
import { getWikiPaths } from './paths.js'
|
||||
import type { WikiStatus } from './types.js'
|
||||
|
||||
async function pathExists(path: string): Promise<boolean> {
|
||||
try {
|
||||
await stat(path)
|
||||
return true
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
async function listMarkdownFiles(dir: string): Promise<string[]> {
|
||||
if (!(await pathExists(dir))) {
|
||||
return []
|
||||
}
|
||||
|
||||
const entries = await readdir(dir, { withFileTypes: true })
|
||||
const files: string[] = []
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = `${dir}/${entry.name}`
|
||||
if (entry.isDirectory()) {
|
||||
files.push(...(await listMarkdownFiles(fullPath)))
|
||||
} else if (entry.isFile() && entry.name.endsWith('.md')) {
|
||||
files.push(fullPath)
|
||||
}
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
async function getLastUpdatedAt(pathsToCheck: string[]): Promise<string | null> {
|
||||
const mtimes: number[] = []
|
||||
|
||||
for (const path of pathsToCheck) {
|
||||
try {
|
||||
const info = await stat(path)
|
||||
mtimes.push(info.mtimeMs)
|
||||
} catch {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if (mtimes.length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return new Date(Math.max(...mtimes)).toISOString()
|
||||
}
|
||||
|
||||
export async function getWikiStatus(cwd: string): Promise<WikiStatus> {
|
||||
const paths = getWikiPaths(cwd)
|
||||
|
||||
const [hasRoot, hasSchema, hasIndex, hasLog, pages, sources] =
|
||||
await Promise.all([
|
||||
pathExists(paths.root),
|
||||
pathExists(paths.schemaFile),
|
||||
pathExists(paths.indexFile),
|
||||
pathExists(paths.logFile),
|
||||
listMarkdownFiles(paths.pagesDir),
|
||||
listMarkdownFiles(paths.sourcesDir),
|
||||
])
|
||||
|
||||
return {
|
||||
initialized: hasRoot && hasSchema && hasIndex && hasLog,
|
||||
root: paths.root,
|
||||
pageCount: pages.length,
|
||||
sourceCount: sources.length,
|
||||
hasSchema,
|
||||
hasIndex,
|
||||
hasLog,
|
||||
lastUpdatedAt: await getLastUpdatedAt([
|
||||
paths.schemaFile,
|
||||
paths.indexFile,
|
||||
paths.logFile,
|
||||
...pages,
|
||||
...sources,
|
||||
]),
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
export type WikiPaths = {
|
||||
root: string
|
||||
pagesDir: string
|
||||
sourcesDir: string
|
||||
schemaFile: string
|
||||
indexFile: string
|
||||
logFile: string
|
||||
}
|
||||
|
||||
export type WikiInitResult = {
|
||||
root: string
|
||||
createdFiles: string[]
|
||||
createdDirectories: string[]
|
||||
alreadyExisted: boolean
|
||||
}
|
||||
|
||||
export type WikiStatus = {
|
||||
initialized: boolean
|
||||
root: string
|
||||
pageCount: number
|
||||
sourceCount: number
|
||||
hasSchema: boolean
|
||||
hasIndex: boolean
|
||||
hasLog: boolean
|
||||
lastUpdatedAt: string | null
|
||||
}
|
||||
|
||||
export type WikiIngestResult = {
|
||||
sourceFile: string
|
||||
sourceNote: string
|
||||
summary: string
|
||||
title: string
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
export function sanitizeWikiSlug(value: string): string {
|
||||
return value
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9]+/g, '-')
|
||||
.replace(/^-+|-+$/g, '')
|
||||
.replace(/-{2,}/g, '-')
|
||||
}
|
||||
|
||||
export function summarizeText(input: string, maxLength = 280): string {
|
||||
const normalized = input.replace(/\s+/g, ' ').trim()
|
||||
if (!normalized) {
|
||||
return 'No summary available.'
|
||||
}
|
||||
|
||||
if (normalized.length <= maxLength) {
|
||||
return normalized
|
||||
}
|
||||
|
||||
return `${normalized.slice(0, maxLength - 1).trimEnd()}…`
|
||||
}
|
||||
|
||||
export function extractTitleFromText(
|
||||
fallbackName: string,
|
||||
content: string,
|
||||
): string {
|
||||
const firstNonEmptyLine = content
|
||||
.split('\n')
|
||||
.map(line => line.trim())
|
||||
.find(Boolean)
|
||||
|
||||
if (!firstNonEmptyLine) {
|
||||
return fallbackName
|
||||
}
|
||||
|
||||
return firstNonEmptyLine.replace(/^#+\s*/, '') || fallbackName
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
import type { Command } from '../commands.js'
|
||||
import { createStore } from './store.js'
|
||||
|
||||
const pluginCommandsStore = createStore<Command[]>([])
|
||||
|
||||
export const getPluginCommandsState = (): Command[] =>
|
||||
pluginCommandsStore.getState()
|
||||
|
||||
export const subscribePluginCommands = pluginCommandsStore.subscribe
|
||||
|
||||
export function setPluginCommandsState(commands: Command[]): void {
|
||||
pluginCommandsStore.setState(() => [...commands])
|
||||
}
|
||||
@@ -27,19 +27,19 @@ function getClaudeCodeGuideBasePrompt(): string {
|
||||
? `${FILE_READ_TOOL_NAME}, \`find\`, and \`grep\``
|
||||
: `${FILE_READ_TOOL_NAME}, ${GLOB_TOOL_NAME}, and ${GREP_TOOL_NAME}`
|
||||
|
||||
return `You are the OpenClaude guide agent. Your primary responsibility is helping users understand and use OpenClaude, the Claude Agent SDK, and the Claude API (formerly the Anthropic API) effectively.
|
||||
return `You are the Claude guide agent. Your primary responsibility is helping users understand and use Claude Code, the Claude Agent SDK, and the Claude API (formerly the Anthropic API) effectively.
|
||||
|
||||
**Your expertise spans three domains:**
|
||||
|
||||
1. **OpenClaude** (the CLI tool): Installation, configuration, hooks, skills, MCP servers, keyboard shortcuts, IDE integrations, settings, and workflows.
|
||||
1. **Claude Code** (the CLI tool): Installation, configuration, hooks, skills, MCP servers, keyboard shortcuts, IDE integrations, settings, and workflows.
|
||||
|
||||
2. **Claude Agent SDK**: A framework for building custom AI agents. Available for Node.js/TypeScript and Python.
|
||||
2. **Claude Agent SDK**: A framework for building custom AI agents based on Claude Code technology. Available for Node.js/TypeScript and Python.
|
||||
|
||||
3. **Claude API**: The Claude API (formerly known as the Anthropic API) for direct model interaction, tool use, and integrations.
|
||||
|
||||
**Documentation sources:**
|
||||
|
||||
- **Claude Code docs** (${CLAUDE_CODE_DOCS_MAP_URL}): Use these as the compatibility reference for questions about the OpenClaude CLI tool, including:
|
||||
- **Claude Code docs** (${CLAUDE_CODE_DOCS_MAP_URL}): Fetch this for questions about the Claude Code CLI tool, including:
|
||||
- Installation, setup, and getting started
|
||||
- Hooks (pre/post command execution)
|
||||
- Custom skills
|
||||
@@ -97,7 +97,7 @@ function getFeedbackGuideline(): string {
|
||||
|
||||
export const CLAUDE_CODE_GUIDE_AGENT: BuiltInAgentDefinition = {
|
||||
agentType: CLAUDE_CODE_GUIDE_AGENT_TYPE,
|
||||
whenToUse: `Use this agent when the user asks questions ("Can OpenClaude...", "Does OpenClaude...", "How do I...") about: (1) OpenClaude (the CLI tool) - features, hooks, slash commands, MCP servers, settings, IDE integrations, keyboard shortcuts; (2) Claude Agent SDK - building custom agents; (3) Claude API (formerly Anthropic API) - API usage, tool use, Anthropic SDK usage. **IMPORTANT:** Before spawning a new agent, check if there is already a running or recently completed claude-code-guide agent that you can continue via ${SEND_MESSAGE_TOOL_NAME}.`,
|
||||
whenToUse: `Use this agent when the user asks questions ("Can Claude...", "Does Claude...", "How do I...") about: (1) Claude Code (the CLI tool) - features, hooks, slash commands, MCP servers, settings, IDE integrations, keyboard shortcuts; (2) Claude Agent SDK - building custom agents; (3) Claude API (formerly Anthropic API) - API usage, tool use, Anthropic SDK usage. **IMPORTANT:** Before spawning a new agent, check if there is already a running or recently completed claude-code-guide agent that you can continue via ${SEND_MESSAGE_TOOL_NAME}.`,
|
||||
// Ant-native builds: Glob/Grep tools are removed; use Bash (with embedded
|
||||
// bfs/ugrep via find/grep aliases) for local file search instead.
|
||||
tools: hasEmbeddedSearchTools()
|
||||
|
||||
@@ -21,7 +21,7 @@ function getExploreSystemPrompt(): string {
|
||||
? `- Use \`grep\` via ${BASH_TOOL_NAME} for searching file contents with regex`
|
||||
: `- Use ${GREP_TOOL_NAME} for searching file contents with regex`
|
||||
|
||||
return `You are a file search specialist for OpenClaude. You excel at thoroughly navigating and exploring codebases.
|
||||
return `You are a file search specialist for OpenClaude, an open-source fork of Claude Code. You excel at thoroughly navigating and exploring codebases.
|
||||
|
||||
=== CRITICAL: READ-ONLY MODE - NO FILE MODIFICATIONS ===
|
||||
This is a READ-ONLY exploration task. You are STRICTLY PROHIBITED from:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { BuiltInAgentDefinition } from '../loadAgentsDir.js'
|
||||
|
||||
const SHARED_PREFIX = `You are an agent for OpenClaude, an open-source coding agent and CLI. Given the user's message, you should use the tools available to complete the task. Complete the task fully—don't gold-plate, but don't leave it half-done.`
|
||||
const SHARED_PREFIX = `You are an agent for OpenClaude, an open-source fork of Claude Code. Given the user's message, you should use the tools available to complete the task. Complete the task fully—don't gold-plate, but don't leave it half-done.`
|
||||
|
||||
const SHARED_GUIDELINES = `Your strengths:
|
||||
- Searching for code, configurations, and patterns across large codebases
|
||||
|
||||
@@ -18,7 +18,7 @@ function getPlanV2SystemPrompt(): string {
|
||||
? `\`find\`, \`grep\`, and ${FILE_READ_TOOL_NAME}`
|
||||
: `${GLOB_TOOL_NAME}, ${GREP_TOOL_NAME}, and ${FILE_READ_TOOL_NAME}`
|
||||
|
||||
return `You are a software architect and planning specialist for OpenClaude. Your role is to explore the codebase and design implementation plans.
|
||||
return `You are a software architect and planning specialist for Claude Code. Your role is to explore the codebase and design implementation plans.
|
||||
|
||||
=== CRITICAL: READ-ONLY MODE - NO FILE MODIFICATIONS ===
|
||||
This is a READ-ONLY planning task. You are STRICTLY PROHIBITED from:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { BuiltInAgentDefinition } from '../loadAgentsDir.js'
|
||||
|
||||
const STATUSLINE_SYSTEM_PROMPT = `You are a status line setup agent for OpenClaude. Your job is to create or update the statusLine command in the user's OpenClaude settings.
|
||||
const STATUSLINE_SYSTEM_PROMPT = `You are a status line setup agent for Claude Code. Your job is to create or update the statusLine command in the user's Claude Code settings.
|
||||
|
||||
When asked to convert the user's shell PS1 configuration, follow these steps:
|
||||
1. Read the user's shell configuration files in this order of preference:
|
||||
@@ -47,7 +47,7 @@ How to use the statusLine command:
|
||||
"project_dir": "string", // Project root directory path
|
||||
"added_dirs": ["string"] // Directories added via /add-dir
|
||||
},
|
||||
"version": "string", // OpenClaude app version (e.g., "1.0.71")
|
||||
"version": "string", // Claude Code app version (e.g., "1.0.71")
|
||||
"output_style": {
|
||||
"name": "string", // Output style name (e.g., "default", "Explanatory", "Learning")
|
||||
},
|
||||
@@ -110,11 +110,10 @@ How to use the statusLine command:
|
||||
To display both 5-hour and 7-day limits when available:
|
||||
- input=$(cat); five=$(echo "$input" | jq -r '.rate_limits.five_hour.used_percentage // empty'); week=$(echo "$input" | jq -r '.rate_limits.seven_day.used_percentage // empty'); out=""; [ -n "$five" ] && out="5h:$(printf '%.0f' "$five")%"; [ -n "$week" ] && out="$out 7d:$(printf '%.0f' "$week")%"; echo "$out"
|
||||
|
||||
2. For longer commands, save a new file alongside the user's existing settings home instead of creating a new config directory.
|
||||
- Prefer the currently used config home if one already exists.
|
||||
- In most existing setups this will be ~/.claude/statusline-command.sh.
|
||||
2. For longer commands, you can save a new file in the user's ~/.claude directory, e.g.:
|
||||
- ~/.claude/statusline-command.sh and reference that file in the settings.
|
||||
|
||||
3. Update the user's resolved settings file with:
|
||||
3. Update the user's ~/.claude/settings.json with:
|
||||
{
|
||||
"statusLine": {
|
||||
"type": "command",
|
||||
@@ -122,23 +121,20 @@ How to use the statusLine command:
|
||||
}
|
||||
}
|
||||
|
||||
4. Use the existing config home already in use by the installation.
|
||||
- Prefer the settings file that already exists.
|
||||
- If the settings file is a symlink, update the target file instead.
|
||||
- Do not create a second parallel settings home just because another directory name also exists.
|
||||
4. If ~/.claude/settings.json is a symlink, update the target file instead.
|
||||
|
||||
Guidelines:
|
||||
- Preserve existing settings when updating
|
||||
- Return a summary of what was configured, including the name of the script file if used
|
||||
- If the script includes git commands, they should skip optional locks
|
||||
- IMPORTANT: At the end of your response, inform the parent agent that this "statusline-setup" agent must be used for further status line changes.
|
||||
Also ensure that the user is informed that they can ask OpenClaude to continue to make changes to the status line.
|
||||
Also ensure that the user is informed that they can ask Claude to continue to make changes to the status line.
|
||||
`
|
||||
|
||||
export const STATUSLINE_SETUP_AGENT: BuiltInAgentDefinition = {
|
||||
agentType: 'statusline-setup',
|
||||
whenToUse:
|
||||
"Use this agent to configure the user's OpenClaude status line setting.",
|
||||
"Use this agent to configure the user's Claude Code status line setting.",
|
||||
tools: ['Read', 'Edit'],
|
||||
source: 'built-in',
|
||||
baseDir: 'built-in',
|
||||
|
||||
@@ -525,10 +525,7 @@ export const FileEditTool = buildTool({
|
||||
})
|
||||
|
||||
// 7. Log events
|
||||
if (
|
||||
absoluteFilePath.endsWith(`${sep}AGENTS.md`) ||
|
||||
absoluteFilePath.endsWith(`${sep}CLAUDE.md`)
|
||||
) {
|
||||
if (absoluteFilePath.endsWith(`${sep}CLAUDE.md`)) {
|
||||
logEvent('tengu_write_claudemd', {})
|
||||
}
|
||||
countLinesChanged(patch)
|
||||
|
||||
@@ -11,7 +11,7 @@ export function getEditToolDescription(): string {
|
||||
|
||||
function getDefaultEditDescription(): string {
|
||||
const prefixFormat = isCompactLinePrefixEnabled()
|
||||
? 'line number + arrow'
|
||||
? 'line number + tab'
|
||||
: 'spaces + line number + arrow'
|
||||
const minimalUniquenessHint =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
|
||||
@@ -336,11 +336,8 @@ export const FileWriteTool = buildTool({
|
||||
limit: undefined,
|
||||
})
|
||||
|
||||
// Log when writing to the root project instruction file
|
||||
if (
|
||||
fullFilePath.endsWith(`${sep}AGENTS.md`) ||
|
||||
fullFilePath.endsWith(`${sep}CLAUDE.md`)
|
||||
) {
|
||||
// Log when writing to CLAUDE.md
|
||||
if (fullFilePath.endsWith(`${sep}CLAUDE.md`)) {
|
||||
logEvent('tengu_write_claudemd', {})
|
||||
}
|
||||
|
||||
|
||||
@@ -14,21 +14,8 @@ import {
|
||||
export const inputSchema = lazySchema(() => z.object({}).passthrough())
|
||||
type InputSchema = ReturnType<typeof inputSchema>
|
||||
|
||||
// MCP tools can return either a plain string or an array of content blocks
|
||||
// (text, images, etc.). The outputSchema must reflect both shapes so the model
|
||||
// knows rich content is possible.
|
||||
export const outputSchema = lazySchema(() =>
|
||||
z.union([
|
||||
z.string().describe('MCP tool execution result as text'),
|
||||
z
|
||||
.array(
|
||||
z.object({
|
||||
type: z.string(),
|
||||
text: z.string().optional(),
|
||||
}),
|
||||
)
|
||||
.describe('MCP tool execution result as content blocks'),
|
||||
]),
|
||||
z.string().describe('MCP tool execution result'),
|
||||
)
|
||||
type OutputSchema = ReturnType<typeof outputSchema>
|
||||
|
||||
@@ -78,19 +65,7 @@ export const MCPTool = buildTool({
|
||||
renderToolUseProgressMessage,
|
||||
renderToolResultMessage,
|
||||
isResultTruncated(output: Output): boolean {
|
||||
if (typeof output === 'string') {
|
||||
return isOutputLineTruncated(output)
|
||||
}
|
||||
// Array of content blocks — check if any text block exceeds the display limit
|
||||
if (Array.isArray(output)) {
|
||||
return output.some(
|
||||
block =>
|
||||
block?.type === 'text' &&
|
||||
typeof block.text === 'string' &&
|
||||
isOutputLineTruncated(block.text),
|
||||
)
|
||||
}
|
||||
return false
|
||||
},
|
||||
mapToolResultToToolResultBlockParam(content, toolUseID) {
|
||||
return {
|
||||
|
||||
@@ -1,29 +1,6 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import type { Command } from '../../commands.js'
|
||||
import { SkillTool } from './SkillTool.js'
|
||||
import { renderToolUseMessage } from './UI.js'
|
||||
|
||||
function createPromptCommand(
|
||||
name: string,
|
||||
options: {
|
||||
source?: 'builtin' | 'plugin' | 'mcp' | 'bundled'
|
||||
loadedFrom?: Command['loadedFrom']
|
||||
} = {},
|
||||
): Command {
|
||||
return {
|
||||
type: 'prompt',
|
||||
name,
|
||||
description: `${name} description`,
|
||||
progressMessage: `${name} progress`,
|
||||
contentLength: 0,
|
||||
source: options.source ?? 'builtin',
|
||||
loadedFrom: options.loadedFrom,
|
||||
async getPromptForCommand() {
|
||||
return []
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
describe('SkillTool missing parameter handling', () => {
|
||||
test('missing skill stays required at the schema level', async () => {
|
||||
@@ -52,47 +29,3 @@ describe('SkillTool missing parameter handling', () => {
|
||||
expect(parsed.success).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('SkillTool renderToolUseMessage', () => {
|
||||
test('plugin skills render correctly without plugin command metadata', () => {
|
||||
const pluginSkillName = 'plugin:review-pr'
|
||||
|
||||
expect(
|
||||
renderToolUseMessage(
|
||||
{ skill: pluginSkillName },
|
||||
{
|
||||
commands: [],
|
||||
},
|
||||
),
|
||||
).toBe(pluginSkillName)
|
||||
|
||||
expect(
|
||||
renderToolUseMessage(
|
||||
{ skill: pluginSkillName },
|
||||
{
|
||||
commands: [
|
||||
createPromptCommand(pluginSkillName, {
|
||||
source: 'plugin',
|
||||
loadedFrom: 'plugin',
|
||||
}),
|
||||
],
|
||||
},
|
||||
),
|
||||
).toBe(pluginSkillName)
|
||||
})
|
||||
|
||||
test('legacy commands still render with a slash prefix when metadata is present', () => {
|
||||
expect(
|
||||
renderToolUseMessage(
|
||||
{ skill: 'legacy-command' },
|
||||
{
|
||||
commands: [
|
||||
createPromptCommand('legacy-command', {
|
||||
loadedFrom: 'commands_DEPRECATED',
|
||||
}),
|
||||
],
|
||||
},
|
||||
),
|
||||
).toBe('/legacy-command')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -54,10 +54,7 @@ export function renderToolUseMessage({
|
||||
if (!skill) {
|
||||
return null;
|
||||
}
|
||||
// Only legacy /commands_DEPRECATED entries need the command lookup so we can
|
||||
// preserve the slash-prefixed display. Plugin skills already carry the
|
||||
// invoked skill name in `skill`, so transcript/history rendering does not
|
||||
// need plugin command metadata.
|
||||
// Look up the command to check if it came from the legacy /commands folder
|
||||
const command = commands?.find(c => c.name === skill);
|
||||
const displayName = command?.loadedFrom === 'commands_DEPRECATED' ? `/${skill}` : skill;
|
||||
return displayName;
|
||||
|
||||
@@ -1,518 +0,0 @@
|
||||
# Web Search Providers
|
||||
|
||||
OpenClaude supports multiple search backends through a provider adapter system.
|
||||
|
||||
## Supported Providers
|
||||
|
||||
| Provider | Env Var | Auth Header | Method |
|
||||
|---|---|---|---|
|
||||
| Custom API | `WEB_SEARCH_API` | Configurable | GET/POST |
|
||||
| SearXNG | `WEB_PROVIDER=searxng` | — | GET |
|
||||
| Google | `WEB_PROVIDER=google` | `Authorization: Bearer` | GET |
|
||||
| Brave | `WEB_PROVIDER=brave` | `X-Subscription-Token` | GET |
|
||||
| SerpAPI | `WEB_PROVIDER=serpapi` | `Authorization: Bearer` | GET |
|
||||
| Firecrawl | `FIRECRAWL_API_KEY` | Internal | SDK |
|
||||
| Tavily | `TAVILY_API_KEY` | `Authorization: Bearer` | POST |
|
||||
| Exa | `EXA_API_KEY` | `x-api-key` | POST |
|
||||
| You.com | `YOU_API_KEY` | `X-API-Key` | GET |
|
||||
| Jina | `JINA_API_KEY` | `Authorization: Bearer` | GET |
|
||||
| Bing | `BING_API_KEY` | `Ocp-Apim-Subscription-Key` | GET |
|
||||
| Mojeek | `MOJEEK_API_KEY` | `Authorization: Bearer` | GET |
|
||||
| Linkup | `LINKUP_API_KEY` | `Authorization: Bearer` | POST |
|
||||
| DuckDuckGo | *(default)* | — | SDK |
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Tavily (recommended for AI — fast, RAG-ready)
|
||||
export TAVILY_API_KEY=tvly-your-key
|
||||
|
||||
# Exa (neural search, semantic queries)
|
||||
export EXA_API_KEY=your-exa-key
|
||||
|
||||
# Brave (traditional web search, good coverage)
|
||||
export WEB_PROVIDER=brave
|
||||
export WEB_KEY=your-brave-key
|
||||
|
||||
# Bing
|
||||
export BING_API_KEY=your-bing-key
|
||||
|
||||
# Self-hosted SearXNG (free, private)
|
||||
export WEB_PROVIDER=searxng
|
||||
export WEB_SEARCH_API=https://search.example.com/search
|
||||
```
|
||||
|
||||
## Provider Selection Mode
|
||||
|
||||
`WEB_SEARCH_PROVIDER` controls fallback behavior:
|
||||
|
||||
| Mode | Behavior |
|
||||
|---|---|
|
||||
| `auto` (default) | Try all configured providers in order, fall through on failure |
|
||||
| `tavily` | Tavily only — throws on failure |
|
||||
| `exa` | Exa only — throws on failure |
|
||||
| `custom` | Custom API only — throws on failure. **Not in the auto chain** — must be explicitly selected |
|
||||
| `firecrawl` | Firecrawl only — throws on failure |
|
||||
| `ddg` | DuckDuckGo only — throws on failure |
|
||||
| `native` | Anthropic native / Codex only |
|
||||
|
||||
**Auto mode priority:** firecrawl → tavily → exa → you → jina → bing → mojeek → linkup → ddg
|
||||
|
||||
> **Note:** The `custom` provider is excluded from the `auto` chain. It is only used when `WEB_SEARCH_PROVIDER=custom` is explicitly set. This prevents the generic outbound provider from silently becoming the default backend.
|
||||
|
||||
```bash
|
||||
# Fail loudly if Tavily is down (don't silently switch backends)
|
||||
export WEB_SEARCH_PROVIDER=tavily
|
||||
|
||||
# Try everything, fall through gracefully
|
||||
export WEB_SEARCH_PROVIDER=auto
|
||||
```
|
||||
|
||||
## Provider Request & Response Formats
|
||||
|
||||
### Tavily
|
||||
|
||||
```bash
|
||||
export TAVILY_API_KEY=tvly-your-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
POST https://api.tavily.com/search
|
||||
Authorization: Bearer tvly-your-key
|
||||
Content-Type: application/json
|
||||
|
||||
{"query": "search terms", "max_results": 10, "include_answer": false}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"content": "Full text snippet from the page...",
|
||||
"score": 0.95
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Exa
|
||||
|
||||
```bash
|
||||
export EXA_API_KEY=your-exa-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
POST https://api.exa.ai/search
|
||||
x-api-key: your-exa-key
|
||||
Content-Type: application/json
|
||||
|
||||
{"query": "search terms", "numResults": 10, "type": "auto"}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"snippet": "A short summary of the page content...",
|
||||
"score": 0.89
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### You.com
|
||||
|
||||
```bash
|
||||
export YOU_API_KEY=your-you-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
GET https://api.ydc-index.io/v1/search?query=search+terms
|
||||
X-API-Key: your-you-key
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"results": {
|
||||
"web": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"snippets": ["First snippet from the page...", "Second snippet..."],
|
||||
"description": "Page description"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Jina
|
||||
|
||||
```bash
|
||||
export JINA_API_KEY=your-jina-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
GET https://s.jina.ai/?q=search+terms
|
||||
Authorization: Bearer your-jina-key
|
||||
Accept: application/json
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"description": "Snippet from the page..."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Bing
|
||||
|
||||
```bash
|
||||
export BING_API_KEY=your-bing-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
GET https://api.bing.microsoft.com/v7.0/search?q=search+terms&count=10
|
||||
Ocp-Apim-Subscription-Key: your-bing-key
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"webPages": {
|
||||
"value": [
|
||||
{
|
||||
"name": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"snippet": "A short excerpt from the page...",
|
||||
"displayUrl": "example.com/page"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Mojeek
|
||||
|
||||
```bash
|
||||
export MOJEEK_API_KEY=your-mojeek-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
GET https://www.mojeek.com/search?q=search+terms&fmt=json
|
||||
Authorization: Bearer your-mojeek-key
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"snippet": "Excerpt from the page..."
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Linkup
|
||||
|
||||
```bash
|
||||
export LINKUP_API_KEY=your-linkup-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
POST https://api.linkup.so/v1/search
|
||||
Authorization: Bearer your-linkup-key
|
||||
Content-Type: application/json
|
||||
|
||||
{"q": "search terms", "search_type": "standard"}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"name": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"snippet": "A short description of the result..."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### SearXNG (Built-in Preset)
|
||||
|
||||
```bash
|
||||
export WEB_PROVIDER=searxng
|
||||
export WEB_SEARCH_API=https://search.example.com/search
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
GET https://search.example.com/search?q=search+terms
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"content": "Snippet from the page...",
|
||||
"engine": "google"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Google Custom Search (Built-in Preset)
|
||||
|
||||
```bash
|
||||
export WEB_PROVIDER=google
|
||||
export WEB_KEY=your-google-api-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
GET https://www.googleapis.com/customsearch/v1?q=search+terms
|
||||
Authorization: Bearer your-google-api-key
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"items": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"link": "https://example.com/page",
|
||||
"snippet": "A short excerpt...",
|
||||
"displayLink": "example.com"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Brave (Built-in Preset)
|
||||
|
||||
```bash
|
||||
export WEB_PROVIDER=brave
|
||||
export WEB_KEY=your-brave-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
GET https://api.search.brave.com/res/v1/web/search?q=search+terms
|
||||
X-Subscription-Token: your-brave-key
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"web": {
|
||||
"results": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"url": "https://example.com/page",
|
||||
"description": "Page description..."
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### SerpAPI (Built-in Preset)
|
||||
|
||||
```bash
|
||||
export WEB_PROVIDER=serpapi
|
||||
export WEB_KEY=your-serpapi-key
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```
|
||||
GET https://serpapi.com/search.json?q=search+terms
|
||||
Authorization: Bearer your-serpapi-key
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"organic_results": [
|
||||
{
|
||||
"title": "Result Title",
|
||||
"link": "https://example.com/page",
|
||||
"snippet": "A short excerpt...",
|
||||
"displayed_link": "example.com"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### DuckDuckGo (Default Fallback)
|
||||
|
||||
No configuration needed. Uses the `duck-duck-scrape` npm package.
|
||||
|
||||
```bash
|
||||
# Set as explicit-only backend
|
||||
export WEB_SEARCH_PROVIDER=ddg
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Custom API Configuration
|
||||
|
||||
### Standard GET
|
||||
|
||||
```
|
||||
GET https://api.example.com/search?q=hello
|
||||
```
|
||||
|
||||
```bash
|
||||
export WEB_SEARCH_API=https://api.example.com/search
|
||||
export WEB_QUERY_PARAM=q
|
||||
```
|
||||
|
||||
### Query in URL Path
|
||||
|
||||
```
|
||||
GET https://api.example.com/v2/search/hello
|
||||
```
|
||||
|
||||
```bash
|
||||
export WEB_URL_TEMPLATE=https://api.example.com/v2/search/{query}
|
||||
```
|
||||
|
||||
### POST with Custom Body
|
||||
|
||||
```
|
||||
POST https://api.example.com/v1/query
|
||||
Content-Type: application/json
|
||||
|
||||
{"input": {"text": "hello"}}
|
||||
```
|
||||
|
||||
```bash
|
||||
export WEB_SEARCH_API=https://api.example.com/v1/query
|
||||
export WEB_METHOD=POST
|
||||
export WEB_BODY_TEMPLATE='{"input":{"text":"{query}"}}'
|
||||
```
|
||||
|
||||
### Extra Static Params
|
||||
|
||||
```bash
|
||||
export WEB_PARAMS='{"lang":"en","count":"10"}'
|
||||
```
|
||||
|
||||
## Auth
|
||||
|
||||
API keys are sent in HTTP headers, **never** in query strings.
|
||||
|
||||
```bash
|
||||
# Default: Authorization: Bearer <key>
|
||||
export WEB_KEY=your-key
|
||||
|
||||
# Custom header
|
||||
export WEB_AUTH_HEADER=X-Api-Key
|
||||
export WEB_AUTH_SCHEME=""
|
||||
|
||||
# Extra headers
|
||||
export WEB_HEADERS="X-Tenant: acme; Accept: application/json"
|
||||
```
|
||||
|
||||
## Response Parsing
|
||||
|
||||
The tool auto-detects many response formats:
|
||||
|
||||
```jsonc
|
||||
{ "results": [{ "title": "...", "url": "..." }] } // flat array
|
||||
{ "items": [{ "title": "...", "link": "..." }] } // Google-style
|
||||
{ "results": { "engine": [{ "title": "...", "url": "..." }] } } // nested map
|
||||
[{ "title": "...", "url": "..." }] // bare array
|
||||
```
|
||||
|
||||
Field name aliases: `title`/`headline`/`name`, `url`/`link`/`href`, `description`/`snippet`/`content`
|
||||
|
||||
For deeply nested responses:
|
||||
```bash
|
||||
export WEB_JSON_PATH=response.payload.results
|
||||
```
|
||||
|
||||
## Retry
|
||||
|
||||
Failed requests (network errors, 5xx) are retried once after 500ms. Client errors (4xx) are not retried. Custom requests have a default 120s timeout.
|
||||
|
||||
## Custom Provider Security Guardrails
|
||||
|
||||
The custom provider enforces the following guardrails by default:
|
||||
|
||||
| Guardrail | Default | Override |
|
||||
|-----------|---------|----------|
|
||||
| HTTPS-only | ✅ | `WEB_CUSTOM_ALLOW_HTTP=true` |
|
||||
| Block private IPs / localhost | ✅ | `WEB_CUSTOM_ALLOW_PRIVATE=true` |
|
||||
| Header allowlist | ✅ | `WEB_CUSTOM_ALLOW_ARBITRARY_HEADERS=true` |
|
||||
| Max POST body | 300 KB | `WEB_CUSTOM_MAX_BODY_KB=<kb>` |
|
||||
| Request timeout | 120s | `WEB_CUSTOM_TIMEOUT_SEC=<seconds>` |
|
||||
| Audit log (one-time warning) | ✅ | — |
|
||||
|
||||
### Self-hosted SearXNG example
|
||||
|
||||
```bash
|
||||
export WEB_PROVIDER=searxng
|
||||
export WEB_SEARCH_API=https://search.mydomain.com/search
|
||||
export WEB_CUSTOM_ALLOW_PRIVATE=true # needed if SearXNG is on a private IP
|
||||
```
|
||||
|
||||
### Header allowlist
|
||||
|
||||
By default only these headers are permitted:
|
||||
`accept`, `accept-encoding`, `accept-language`, `authorization`, `cache-control`, `content-type`, `if-modified-since`, `if-none-match`, `ocp-apim-subscription-key`, `user-agent`, `x-api-key`, `x-subscription-token`, `x-tenant-id`
|
||||
|
||||
## Adding a Provider
|
||||
|
||||
1. Create `providers/myprovider.ts`:
|
||||
|
||||
```typescript
|
||||
import type { SearchInput, SearchProvider } from './types.js'
|
||||
import { applyDomainFilters, type ProviderOutput } from './types.js'
|
||||
|
||||
export const myProvider: SearchProvider = {
|
||||
name: 'myprovider',
|
||||
isConfigured() { return Boolean(process.env.MYPROVIDER_API_KEY) },
|
||||
async search(input: SearchInput): Promise<ProviderOutput> {
|
||||
const start = performance.now()
|
||||
// ... call API, map to SearchHit[] ...
|
||||
return {
|
||||
hits: applyDomainFilters(hits, input),
|
||||
providerName: 'myprovider',
|
||||
durationSeconds: (performance.now() - start) / 1000,
|
||||
}
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
2. Register in `providers/index.ts` — add import and push to `ALL_PROVIDERS`.
|
||||
@@ -28,13 +28,6 @@ import {
|
||||
renderToolUseProgressMessage,
|
||||
} from './UI.js'
|
||||
|
||||
import {
|
||||
runSearch,
|
||||
getProviderMode,
|
||||
getAvailableProviders,
|
||||
type ProviderOutput,
|
||||
} from './providers/index.js'
|
||||
|
||||
const inputSchema = lazySchema(() =>
|
||||
z.strictObject({
|
||||
query: z.string().min(2).describe('The search query to use'),
|
||||
@@ -86,39 +79,6 @@ export type { WebSearchProgress } from '../../types/tools.js'
|
||||
|
||||
import type { WebSearchProgress } from '../../types/tools.js'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Shared formatting: ProviderOutput → Output
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function formatProviderOutput(po: ProviderOutput, query: string): Output {
|
||||
const results: (SearchResult | string)[] = []
|
||||
|
||||
const snippets = po.hits
|
||||
.filter(h => h.description)
|
||||
.map(h => `**${h.title}** — ${h.description} (${h.url})`)
|
||||
.join('\n')
|
||||
if (snippets) results.push(snippets)
|
||||
|
||||
if (po.hits.length > 0) {
|
||||
results.push({
|
||||
tool_use_id: `${po.providerName}-search`,
|
||||
content: po.hits.map(h => ({ title: h.title, url: h.url })),
|
||||
})
|
||||
}
|
||||
|
||||
if (results.length === 0) results.push('No results found.')
|
||||
|
||||
return {
|
||||
query,
|
||||
results,
|
||||
durationSeconds: po.durationSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Native Anthropic + Codex paths (unchanged, tightly coupled to SDK)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeToolSchema(input: Input): BetaWebSearchTool20250305 {
|
||||
return {
|
||||
type: 'web_search_20250305',
|
||||
@@ -129,10 +89,161 @@ function makeToolSchema(input: Input): BetaWebSearchTool20250305 {
|
||||
}
|
||||
}
|
||||
|
||||
function isFirecrawlEnabled(): boolean {
|
||||
return Boolean(process.env.FIRECRAWL_API_KEY)
|
||||
}
|
||||
|
||||
function shouldUseFirecrawl(): boolean {
|
||||
if (!isFirecrawlEnabled()) return false
|
||||
// Don't override native search on providers that already have it
|
||||
if (isCodexResponsesWebSearchEnabled()) return false
|
||||
const provider = getAPIProvider()
|
||||
if (provider === 'firstParty' || provider === 'vertex' || provider === 'foundry') return false
|
||||
return true
|
||||
}
|
||||
|
||||
function isClaudeModel(model: string): boolean {
|
||||
return /claude/i.test(model)
|
||||
}
|
||||
|
||||
function shouldUseDuckDuckGo(): boolean {
|
||||
if (isCodexResponsesWebSearchEnabled()) return false
|
||||
|
||||
const provider = getAPIProvider()
|
||||
// Don't override providers/models that have native web search support.
|
||||
if (provider === 'firstParty' || provider === 'vertex' || provider === 'foundry') {
|
||||
return false
|
||||
}
|
||||
|
||||
// Use free DDG search for non-Claude models by default.
|
||||
return !isClaudeModel(getMainLoopModel())
|
||||
}
|
||||
|
||||
async function runDuckDuckGoSearch(input: Input): Promise<Output> {
|
||||
const startTime = performance.now()
|
||||
|
||||
try {
|
||||
const { search } = await import('duck-duck-scrape')
|
||||
|
||||
const response = await search(input.query, {
|
||||
safeSearch: 0,
|
||||
})
|
||||
|
||||
let hits = response.results.map(r => ({
|
||||
title: r.title || r.url,
|
||||
url: r.url,
|
||||
snippet: r.description,
|
||||
}))
|
||||
|
||||
if (input.blocked_domains?.length) {
|
||||
hits = hits.filter(h => {
|
||||
try {
|
||||
const host = new URL(h.url).hostname
|
||||
return !input.blocked_domains!.some(d => host.endsWith(d))
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (input.allowed_domains?.length) {
|
||||
hits = hits.filter(h => {
|
||||
try {
|
||||
const host = new URL(h.url).hostname
|
||||
return input.allowed_domains!.some(d => host.endsWith(d))
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const snippets = hits
|
||||
.filter(h => h.snippet)
|
||||
.map(h => `**${h.title}** — ${h.snippet} (${h.url})`)
|
||||
.join('\n')
|
||||
|
||||
const results: Output['results'] = []
|
||||
if (snippets) results.push(snippets)
|
||||
results.push({
|
||||
tool_use_id: 'duckduckgo-search',
|
||||
content: hits.map(({ title, url }) => ({ title, url })),
|
||||
})
|
||||
|
||||
return {
|
||||
query: input.query,
|
||||
results,
|
||||
durationSeconds: (performance.now() - startTime) / 1000,
|
||||
}
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
const isRateLimited =
|
||||
message.includes('429') ||
|
||||
message.includes('rate') ||
|
||||
message.includes('CAPTCHA') ||
|
||||
message.includes('blocked')
|
||||
|
||||
if (isRateLimited && isFirecrawlEnabled()) {
|
||||
return runFirecrawlSearch(input)
|
||||
}
|
||||
|
||||
return {
|
||||
query: input.query,
|
||||
results: [
|
||||
'Web search temporarily unavailable — try again or add a Firecrawl API key for reliable results.',
|
||||
],
|
||||
durationSeconds: (performance.now() - startTime) / 1000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function runFirecrawlSearch(input: Input): Promise<Output> {
|
||||
const startTime = performance.now()
|
||||
const { FirecrawlClient } = await import('@mendable/firecrawl-js')
|
||||
const app = new FirecrawlClient({ apiKey: process.env.FIRECRAWL_API_KEY! })
|
||||
|
||||
let query = input.query
|
||||
if (input.blocked_domains?.length) {
|
||||
const exclusions = input.blocked_domains.map(d => `-site:${d}`).join(' ')
|
||||
query = `${query} ${exclusions}`
|
||||
}
|
||||
|
||||
const data = await app.search(query, { limit: 10 })
|
||||
|
||||
let hits = (data.web ?? []).map((r: { url: string; title?: string }) => ({
|
||||
title: r.title ?? r.url,
|
||||
url: r.url,
|
||||
}))
|
||||
|
||||
if (input.allowed_domains?.length) {
|
||||
hits = hits.filter(h =>
|
||||
input.allowed_domains!.some(d => {
|
||||
try {
|
||||
return new URL(h.url).hostname.endsWith(d)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
const snippets = (data.web ?? [])
|
||||
.filter((r: { description?: string }) => r.description)
|
||||
.map((r: { url: string; title?: string; description?: string }) =>
|
||||
`**${r.title ?? r.url}** — ${r.description} (${r.url})`,
|
||||
)
|
||||
.join('\n')
|
||||
|
||||
const results: Output['results'] = []
|
||||
if (snippets) results.push(snippets)
|
||||
results.push({ tool_use_id: 'firecrawl-search', content: hits })
|
||||
|
||||
return {
|
||||
query: input.query,
|
||||
results,
|
||||
durationSeconds: (performance.now() - startTime) / 1000,
|
||||
}
|
||||
}
|
||||
|
||||
function isCodexResponsesWebSearchEnabled(): boolean {
|
||||
if (getAPIProvider() !== 'openai') {
|
||||
return false
|
||||
@@ -406,60 +517,6 @@ function makeOutputFromSearchResponse(
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helper: should we use adapter-based providers?
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Returns true for transient errors that are safe to fall through on in auto mode
|
||||
* (network failures, timeouts, HTTP 5xx). Config and guardrail errors return false.
|
||||
*/
|
||||
function isTransientError(err: unknown): boolean {
|
||||
if (!(err instanceof Error)) return true
|
||||
const msg = err.message.toLowerCase()
|
||||
// Guardrail / config errors — must surface
|
||||
if (msg.includes('must use https')) return false
|
||||
if (msg.includes('private/reserved address')) return false
|
||||
if (msg.includes('not in the safe allowlist')) return false
|
||||
if (msg.includes('exceeds') && msg.includes('bytes')) return false
|
||||
if (msg.includes('not a valid url')) return false
|
||||
if (msg.includes('is not configured')) return false
|
||||
// Transient errors — safe to fall through
|
||||
if (err.name === 'AbortError') return true
|
||||
if (msg.includes('timed out')) return true
|
||||
if (msg.includes('fetch failed') || msg.includes('econnrefused') || msg.includes('enotfound')) return true
|
||||
if (msg.includes('returned 5')) return true // HTTP 5xx
|
||||
// Unknown — treat as transient to preserve auto-mode fallback semantics
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true when we should use the adapter-based provider system.
|
||||
*
|
||||
* In auto mode: native/first-party/Codex paths take precedence.
|
||||
* → Only falls back to adapter if no native path is available.
|
||||
* In explicit adapter modes (tavily, ddg, custom, etc.): always true.
|
||||
* In native mode: never true.
|
||||
*/
|
||||
function shouldUseAdapterProvider(): boolean {
|
||||
const mode = getProviderMode()
|
||||
if (mode === 'native') return false
|
||||
if (mode !== 'auto') return true // explicit adapter mode (tavily, ddg, custom, etc.)
|
||||
|
||||
// Auto mode: native/first-party/Codex take precedence over adapter
|
||||
if (isCodexResponsesWebSearchEnabled()) return false
|
||||
const provider = getAPIProvider()
|
||||
if (provider === 'firstParty' || provider === 'vertex' || provider === 'foundry') {
|
||||
return false
|
||||
}
|
||||
// No native path available — fall back to adapter
|
||||
return getAvailableProviders().length > 0
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tool export
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export const WebSearchTool = buildTool({
|
||||
name: WEB_SEARCH_TOOL_NAME,
|
||||
searchHint: 'search the web for current information',
|
||||
@@ -477,20 +534,21 @@ export const WebSearchTool = buildTool({
|
||||
return summary ? `Searching for ${summary}` : 'Searching the web'
|
||||
},
|
||||
isEnabled() {
|
||||
const mode = getProviderMode()
|
||||
|
||||
// Specific provider mode: enabled if any adapter is configured
|
||||
if (mode !== 'auto' && mode !== 'native') {
|
||||
return getAvailableProviders().length > 0
|
||||
if (shouldUseFirecrawl()) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Auto/native mode: check all paths
|
||||
if (getAvailableProviders().length > 0) return true
|
||||
if (isCodexResponsesWebSearchEnabled()) return true
|
||||
if (shouldUseDuckDuckGo()) {
|
||||
return true
|
||||
}
|
||||
|
||||
const provider = getAPIProvider()
|
||||
const model = getMainLoopModel()
|
||||
|
||||
if (isCodexResponsesWebSearchEnabled()) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Enable for firstParty
|
||||
if (provider === 'firstParty') {
|
||||
return true
|
||||
@@ -543,8 +601,11 @@ export const WebSearchTool = buildTool({
|
||||
}
|
||||
},
|
||||
async prompt() {
|
||||
// Strip "US only" when using non-native backends
|
||||
if (shouldUseAdapterProvider() || isCodexResponsesWebSearchEnabled()) {
|
||||
if (
|
||||
shouldUseDuckDuckGo() ||
|
||||
shouldUseFirecrawl() ||
|
||||
isCodexResponsesWebSearchEnabled()
|
||||
) {
|
||||
return getWebSearchPrompt().replace(
|
||||
/\n\s*-\s*Web search is only available in the US/,
|
||||
'',
|
||||
@@ -581,47 +642,20 @@ export const WebSearchTool = buildTool({
|
||||
return { result: true }
|
||||
},
|
||||
async call(input, context, _canUseTool, _parentMessage, onProgress) {
|
||||
// --- Adapter-based providers (custom, firecrawl, ddg) ---
|
||||
// runSearch handles fallback semantics based on WEB_SEARCH_PROVIDER mode:
|
||||
// - "auto": tries each provider, falls through on failure
|
||||
// - specific mode: runs one provider, throws on failure
|
||||
if (shouldUseAdapterProvider()) {
|
||||
const mode = getProviderMode()
|
||||
const isExplicitAdapter = mode !== 'auto'
|
||||
try {
|
||||
const providerOutput = await runSearch(
|
||||
{
|
||||
query: input.query,
|
||||
allowed_domains: input.allowed_domains,
|
||||
blocked_domains: input.blocked_domains,
|
||||
},
|
||||
context.abortController.signal,
|
||||
)
|
||||
// Explicit adapter: return even 0 hits (no silent native fallback)
|
||||
if (isExplicitAdapter || providerOutput.hits.length > 0) {
|
||||
return { data: formatProviderOutput(providerOutput, input.query) }
|
||||
}
|
||||
// Auto mode with 0 hits: fall through to native
|
||||
} catch (err) {
|
||||
// Explicit adapter: throw the real error (no silent native fallback)
|
||||
if (isExplicitAdapter) throw err
|
||||
// Auto mode: only fall through on transient errors (network, timeout, 5xx).
|
||||
// Config / guardrail errors (SSRF, HTTPS, bad URL, etc.) must surface.
|
||||
if (!isTransientError(err)) throw err
|
||||
console.error(
|
||||
`[web-search] Adapter failed, falling through to native: ${err}`,
|
||||
)
|
||||
}
|
||||
if (shouldUseFirecrawl()) {
|
||||
return { data: await runFirecrawlSearch(input) }
|
||||
}
|
||||
|
||||
if (shouldUseDuckDuckGo()) {
|
||||
return { data: await runDuckDuckGoSearch(input) }
|
||||
}
|
||||
|
||||
// --- Codex / OpenAI Responses path ---
|
||||
if (isCodexResponsesWebSearchEnabled()) {
|
||||
return {
|
||||
data: await runCodexWebSearch(input, context.abortController.signal),
|
||||
}
|
||||
}
|
||||
|
||||
// --- Native Anthropic path (firstParty / vertex / foundry) ---
|
||||
const startTime = performance.now()
|
||||
const { query } = input
|
||||
const userMessage = createUserMessage({
|
||||
@@ -681,6 +715,8 @@ export const WebSearchTool = buildTool({
|
||||
if (contentBlock && contentBlock.type === 'server_tool_use') {
|
||||
currentToolUseId = contentBlock.id
|
||||
currentToolUseJson = ''
|
||||
// Note: The ServerToolUseBlock doesn't contain input.query
|
||||
// The actual query comes through input_json_delta events
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -697,10 +733,12 @@ export const WebSearchTool = buildTool({
|
||||
|
||||
// Try to extract query from partial JSON for progress updates
|
||||
try {
|
||||
// Look for a complete query field
|
||||
const queryMatch = currentToolUseJson.match(
|
||||
/"query"\s*:\s*"((?:[^"\\]|\\.)*)"/,
|
||||
)
|
||||
if (queryMatch && queryMatch[1]) {
|
||||
// The regex properly handles escaped characters
|
||||
const query = jsonParse('"' + queryMatch[1] + '"')
|
||||
|
||||
if (
|
||||
@@ -733,6 +771,7 @@ export const WebSearchTool = buildTool({
|
||||
) {
|
||||
const contentBlock = event.event.content_block
|
||||
if (contentBlock && contentBlock.type === 'web_search_tool_result') {
|
||||
// Get the actual query that was used for this search
|
||||
const toolUseId = contentBlock.tool_use_id
|
||||
const actualQuery = toolUseQueries.get(toolUseId) || query
|
||||
const content = contentBlock.content
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
/**
|
||||
* Bing Web Search API adapter.
|
||||
* GET https://api.bing.microsoft.com/v7.0/search?q=...
|
||||
* Auth: Ocp-Apim-Subscription-Key: <key>
|
||||
*/
|
||||
|
||||
import type { SearchInput, SearchProvider } from './types.js'
|
||||
import { applyDomainFilters, type ProviderOutput } from './types.js'
|
||||
|
||||
export const bingProvider: SearchProvider = {
|
||||
name: 'bing',
|
||||
|
||||
isConfigured() {
|
||||
return Boolean(process.env.BING_API_KEY)
|
||||
},
|
||||
|
||||
async search(input: SearchInput, signal?: AbortSignal): Promise<ProviderOutput> {
|
||||
const start = performance.now()
|
||||
|
||||
const url = new URL('https://api.bing.microsoft.com/v7.0/search')
|
||||
url.searchParams.set('q', input.query)
|
||||
url.searchParams.set('count', '10')
|
||||
|
||||
const res = await fetch(url.toString(), {
|
||||
headers: { 'Ocp-Apim-Subscription-Key': process.env.BING_API_KEY! },
|
||||
signal,
|
||||
})
|
||||
|
||||
if (!res.ok) {
|
||||
throw new Error(`Bing search error ${res.status}: ${await res.text().catch(() => '')}`)
|
||||
}
|
||||
|
||||
const data = await res.json()
|
||||
const hits = (data.webPages?.value ?? []).map((r: any) => ({
|
||||
title: r.name ?? '',
|
||||
url: r.url ?? '',
|
||||
description: r.snippet,
|
||||
source: r.displayUrl,
|
||||
}))
|
||||
|
||||
return {
|
||||
hits: applyDomainFilters(hits, input),
|
||||
providerName: 'bing',
|
||||
durationSeconds: (performance.now() - start) / 1000,
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -1,268 +0,0 @@
|
||||
import { describe, expect, test, beforeEach, afterEach } from 'bun:test'
|
||||
import { extractHits, customProvider, isPrivateHostname } from './custom.js'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// extractHits — flexible response parsing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('extractHits', () => {
|
||||
test('extracts from results array', () => {
|
||||
const data = { results: [{ title: 'T', url: 'https://ex.com' }] }
|
||||
const hits = extractHits(data)
|
||||
expect(hits).toHaveLength(1)
|
||||
expect(hits[0].title).toBe('T')
|
||||
})
|
||||
|
||||
test('extracts from items array (Google-style)', () => {
|
||||
const data = { items: [{ title: 'T', link: 'https://ex.com' }] }
|
||||
const hits = extractHits(data)
|
||||
expect(hits).toHaveLength(1)
|
||||
expect(hits[0].url).toBe('https://ex.com')
|
||||
})
|
||||
|
||||
test('extracts from data array', () => {
|
||||
const data = { data: [{ title: 'T', url: 'https://ex.com' }] }
|
||||
const hits = extractHits(data)
|
||||
expect(hits).toHaveLength(1)
|
||||
})
|
||||
|
||||
test('extracts from bare array', () => {
|
||||
const data = [{ title: 'T', url: 'https://ex.com' }]
|
||||
const hits = extractHits(data)
|
||||
expect(hits).toHaveLength(1)
|
||||
})
|
||||
|
||||
test('extracts from nested map (e.g. web.results)', () => {
|
||||
const data = {
|
||||
web: {
|
||||
results: [{ title: 'T', url: 'https://ex.com' }],
|
||||
},
|
||||
}
|
||||
const hits = extractHits(data)
|
||||
expect(hits).toHaveLength(1)
|
||||
})
|
||||
|
||||
test('extracts with explicit jsonPath', () => {
|
||||
const data = {
|
||||
response: {
|
||||
payload: [{ title: 'T', url: 'https://ex.com' }],
|
||||
},
|
||||
}
|
||||
const hits = extractHits(data, 'response.payload')
|
||||
expect(hits).toHaveLength(1)
|
||||
})
|
||||
|
||||
test('returns empty for empty object', () => {
|
||||
expect(extractHits({})).toHaveLength(0)
|
||||
})
|
||||
|
||||
test('returns empty for null', () => {
|
||||
expect(extractHits(null)).toHaveLength(0)
|
||||
})
|
||||
|
||||
test('returns empty for no array keys', () => {
|
||||
expect(extractHits({ status: 'ok', count: 5 })).toHaveLength(0)
|
||||
})
|
||||
|
||||
test('filters out hits with no title and no url', () => {
|
||||
const data = {
|
||||
results: [
|
||||
{ title: 'Valid', url: 'https://ex.com' },
|
||||
{ description: 'no title or url' },
|
||||
],
|
||||
}
|
||||
const hits = extractHits(data)
|
||||
expect(hits).toHaveLength(1)
|
||||
})
|
||||
|
||||
test('extracts from organic_results (SerpAPI-style)', () => {
|
||||
const data = {
|
||||
organic_results: [{ title: 'T', link: 'https://ex.com' }],
|
||||
}
|
||||
const hits = extractHits(data)
|
||||
expect(hits).toHaveLength(1)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// buildAuthHeadersForPreset — tested indirectly via env vars
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('buildAuthHeadersForPreset auth header behavior', () => {
|
||||
const savedEnv: Record<string, string | undefined> = {}
|
||||
|
||||
beforeEach(() => {
|
||||
for (const k of ['WEB_KEY', 'WEB_AUTH_HEADER', 'WEB_AUTH_SCHEME']) {
|
||||
savedEnv[k] = process.env[k]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const [k, v] of Object.entries(savedEnv)) {
|
||||
if (v === undefined) delete process.env[k]
|
||||
else process.env[k] = v
|
||||
}
|
||||
})
|
||||
|
||||
// We test isConfigured() which depends on WEB_SEARCH_API/WEB_PROVIDER/WEB_URL_TEMPLATE
|
||||
// and the auth behavior through the public search() interface
|
||||
test('custom provider is configured when WEB_URL_TEMPLATE is set', () => {
|
||||
process.env.WEB_URL_TEMPLATE = 'https://example.com/search?q={query}'
|
||||
const { customProvider } = require('./custom.js')
|
||||
expect(customProvider.isConfigured()).toBe(true)
|
||||
delete process.env.WEB_URL_TEMPLATE
|
||||
})
|
||||
|
||||
test('custom provider is NOT configured when no env vars are set', () => {
|
||||
delete process.env.WEB_URL_TEMPLATE
|
||||
delete process.env.WEB_SEARCH_API
|
||||
delete process.env.WEB_PROVIDER
|
||||
expect(customProvider.isConfigured()).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// buildAuthHeadersForPreset — direct tests for WEB_AUTH_HEADER / WEB_AUTH_SCHEME
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('buildAuthHeadersForPreset direct assertions', () => {
|
||||
const savedEnv: Record<string, string | undefined> = {}
|
||||
|
||||
beforeEach(() => {
|
||||
for (const k of ['WEB_KEY', 'WEB_AUTH_HEADER', 'WEB_AUTH_SCHEME']) {
|
||||
savedEnv[k] = process.env[k]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const [k, v] of Object.entries(savedEnv)) {
|
||||
if (v === undefined) delete process.env[k]
|
||||
else process.env[k] = v
|
||||
}
|
||||
})
|
||||
|
||||
test('WEB_AUTH_HEADER="" is an explicit opt-out — returns empty headers even with WEB_KEY set', () => {
|
||||
process.env.WEB_KEY = 'sk-test-123'
|
||||
process.env.WEB_AUTH_HEADER = ''
|
||||
const { buildAuthHeadersForPreset } = require('./custom.js')
|
||||
expect(buildAuthHeadersForPreset({ urlTemplate: '', queryParam: 'q', authHeader: 'Authorization' })).toEqual({})
|
||||
})
|
||||
|
||||
test('WEB_AUTH_SCHEME="" strips the scheme prefix (bare key only)', () => {
|
||||
process.env.WEB_KEY = 'sk-test-123'
|
||||
process.env.WEB_AUTH_SCHEME = ''
|
||||
delete process.env.WEB_AUTH_HEADER
|
||||
const { buildAuthHeadersForPreset } = require('./custom.js')
|
||||
const result = buildAuthHeadersForPreset({ urlTemplate: '', queryParam: 'q', authHeader: 'X-Api-Key' })
|
||||
// scheme is '' so the header value should be just the key (trimmed)
|
||||
expect(result).toEqual({ 'X-Api-Key': 'sk-test-123' })
|
||||
})
|
||||
|
||||
test('uses preset authHeader and authScheme when no env overrides', () => {
|
||||
process.env.WEB_KEY = 'tok-abc'
|
||||
delete process.env.WEB_AUTH_HEADER
|
||||
delete process.env.WEB_AUTH_SCHEME
|
||||
const { buildAuthHeadersForPreset } = require('./custom.js')
|
||||
const result = buildAuthHeadersForPreset({ urlTemplate: '', queryParam: 'q', authHeader: 'Authorization', authScheme: 'Bearer' })
|
||||
expect(result).toEqual({ 'Authorization': 'Bearer tok-abc' })
|
||||
})
|
||||
|
||||
test('returns empty when WEB_KEY is not set', () => {
|
||||
delete process.env.WEB_KEY
|
||||
delete process.env.WEB_AUTH_HEADER
|
||||
delete process.env.WEB_AUTH_SCHEME
|
||||
const { buildAuthHeadersForPreset } = require('./custom.js')
|
||||
expect(buildAuthHeadersForPreset({ urlTemplate: '', queryParam: 'q', authHeader: 'Authorization' })).toEqual({})
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// isPrivateHostname — SSRF guard
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Helper: route through new URL() the way validateUrl() does, so we exercise
|
||||
// the same normalized hostname that production code sees.
|
||||
const hostOf = (url: string) => new URL(url).hostname
|
||||
|
||||
describe('isPrivateHostname — IPv4', () => {
|
||||
test('blocks localhost', () => {
|
||||
expect(isPrivateHostname('localhost')).toBe(true)
|
||||
expect(isPrivateHostname('LOCALHOST')).toBe(true)
|
||||
})
|
||||
|
||||
test('blocks 127.0.0.0/8 loopback including short/numeric/hex/octal forms (via URL normalization)', () => {
|
||||
expect(isPrivateHostname(hostOf('http://127.0.0.1/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://127.1/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://2130706433/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://0x7f000001/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://0177.0.0.1/'))).toBe(true)
|
||||
})
|
||||
|
||||
test('blocks RFC1918 ranges', () => {
|
||||
expect(isPrivateHostname('10.0.0.1')).toBe(true)
|
||||
expect(isPrivateHostname('172.16.0.1')).toBe(true)
|
||||
expect(isPrivateHostname('172.31.255.255')).toBe(true)
|
||||
expect(isPrivateHostname('192.168.1.1')).toBe(true)
|
||||
})
|
||||
|
||||
test('blocks 169.254.0.0/16 link-local (AWS/GCP metadata)', () => {
|
||||
expect(isPrivateHostname('169.254.169.254')).toBe(true)
|
||||
})
|
||||
|
||||
test('blocks 100.64.0.0/10 CGNAT', () => {
|
||||
expect(isPrivateHostname('100.64.0.1')).toBe(true)
|
||||
expect(isPrivateHostname('100.127.255.255')).toBe(true)
|
||||
})
|
||||
|
||||
test('blocks 0.0.0.0/8', () => {
|
||||
expect(isPrivateHostname('0.0.0.0')).toBe(true)
|
||||
expect(isPrivateHostname('0.1.2.3')).toBe(true)
|
||||
})
|
||||
|
||||
test('allows public IPv4', () => {
|
||||
expect(isPrivateHostname('8.8.8.8')).toBe(false)
|
||||
expect(isPrivateHostname('172.15.0.1')).toBe(false) // just outside 172.16/12
|
||||
expect(isPrivateHostname('172.32.0.1')).toBe(false)
|
||||
expect(isPrivateHostname('100.63.255.255')).toBe(false) // just outside CGNAT
|
||||
expect(isPrivateHostname('100.128.0.0')).toBe(false)
|
||||
})
|
||||
|
||||
test('allows regular hostnames', () => {
|
||||
expect(isPrivateHostname('example.com')).toBe(false)
|
||||
expect(isPrivateHostname('api.search.brave.com')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('isPrivateHostname — IPv6', () => {
|
||||
test('blocks ::1 loopback and :: unspecified', () => {
|
||||
expect(isPrivateHostname(hostOf('http://[::1]/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://[::]/'))).toBe(true)
|
||||
})
|
||||
|
||||
test('blocks IPv4-mapped IPv6 pointing at private v4 (the previous bypass)', () => {
|
||||
// WHATWG URL normalizes [::ffff:127.0.0.1] → [::ffff:7f00:1]; must still block.
|
||||
expect(isPrivateHostname(hostOf('http://[::ffff:127.0.0.1]/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://[::ffff:7f00:1]/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://[::ffff:169.254.169.254]/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://[::ffff:10.0.0.1]/'))).toBe(true)
|
||||
})
|
||||
|
||||
test('blocks ULA fc00::/7', () => {
|
||||
expect(isPrivateHostname(hostOf('http://[fc00::1]/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://[fd12:3456:789a::1]/'))).toBe(true)
|
||||
})
|
||||
|
||||
test('blocks link-local fe80::/10', () => {
|
||||
expect(isPrivateHostname(hostOf('http://[fe80::1]/'))).toBe(true)
|
||||
expect(isPrivateHostname(hostOf('http://[febf::1]/'))).toBe(true)
|
||||
})
|
||||
|
||||
test('allows public IPv6', () => {
|
||||
expect(isPrivateHostname(hostOf('http://[2001:4860:4860::8888]/'))).toBe(false)
|
||||
expect(isPrivateHostname(hostOf('http://[2606:4700:4700::1111]/'))).toBe(false)
|
||||
})
|
||||
|
||||
test('malformed IPv6 is not classified as private (URL parser rejects it upstream)', () => {
|
||||
expect(isPrivateHostname('not:an:ipv6')).toBe(false)
|
||||
})
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user