Compare commits

..

1 Commits

Author SHA1 Message Date
root
2f4a06dd40 fix(theme): remove stale React Compiler memo wrappers from theme hooks
Rebase on current main (includes #589 reconciler fix).

The React Compiler memo caches (_c) in useTheme() and usePreviewTheme()
use referential equality checks on destructured context values. These
caches can return stale references when the ThemeProvider's useMemo
recreates the context value object but the individual property
references (setThemeSetting, setPreviewTheme, etc.) compare equal —
the memo short-circuits and returns a cached tuple/object that still
holds the old closure captures.

This is a distinct bug from #589 (which fixed the ink reconciler's
commitUpdate path for host prop updates). #589 ensures that when
React _does_ re-render a component with new props, those props actually
reach the DOM node. But the memo wrappers here prevent React from
_even seeing_ the new context value in the first place — the hook
returns the stale cached result.

Removing the memo wrappers ensures useTheme() and usePreviewTheme()
always read the current context value, eliminating the stale-reference
path entirely.
2026-04-12 07:39:39 +00:00
276 changed files with 2891 additions and 28793 deletions

View File

@@ -1,16 +0,0 @@
node_modules
dist
.git
.gitignore
.env
.env.*
!.env.example
coverage
reports
vscode-extension
python
docs
*.md
!README.md
.github
.tsbuildinfo

View File

@@ -149,23 +149,6 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
# Use a custom OpenAI-compatible endpoint (optional — defaults to api.openai.com) # Use a custom OpenAI-compatible endpoint (optional — defaults to api.openai.com)
# OPENAI_BASE_URL=https://api.openai.com/v1 # OPENAI_BASE_URL=https://api.openai.com/v1
# Fallback context window size (tokens) when the model is not found in the
# built-in table (default: 128000). Increase this for models with larger
# context windows (e.g. 200000 for Claude-sized contexts).
# CLAUDE_CODE_OPENAI_FALLBACK_CONTEXT_WINDOW=128000
# Per-model context window overrides as a JSON object.
# Takes precedence over the built-in table, so you can register new or
# custom models without patching source.
# Example: CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS={"my-corp/llm-v3":262144,"gpt-4o-mini":128000}
# CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS=
# Per-model maximum output token overrides as a JSON object.
# Use this alongside CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS when your model
# supports a different output limit than what the built-in table specifies.
# Example: CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS={"my-corp/llm-v3":8192}
# CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS=
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# Option 3: Google Gemini # Option 3: Google Gemini
@@ -242,30 +225,6 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
# GOOGLE_CLOUD_PROJECT=your-gcp-project-id # GOOGLE_CLOUD_PROJECT=your-gcp-project-id
# -----------------------------------------------------------------------------
# Option 9: NVIDIA NIM
# -----------------------------------------------------------------------------
# NVIDIA NIM provides hosted inference endpoints for NVIDIA models.
# Get your API key from https://build.nvidia.com/
#
# CLAUDE_CODE_USE_OPENAI=1
# NVIDIA_API_KEY=nvapi-your-key-here
# OPENAI_BASE_URL=https://integrate.api.nvidia.com/v1
# OPENAI_MODEL=nvidia/llama-3.1-nemotron-70b-instruct
# -----------------------------------------------------------------------------
# Option 10: MiniMax
# -----------------------------------------------------------------------------
# MiniMax API provides text generation models.
# Get your API key from https://platform.minimax.io/
#
# CLAUDE_CODE_USE_OPENAI=1
# MINIMAX_API_KEY=your-minimax-key-here
# OPENAI_BASE_URL=https://api.minimax.io/v1
# OPENAI_MODEL=MiniMax-M2.5
# ============================================================================= # =============================================================================
# OPTIONAL TUNING # OPTIONAL TUNING
# ============================================================================= # =============================================================================
@@ -284,16 +243,6 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
# Disable "Co-authored-by" line in git commits made by OpenClaude # Disable "Co-authored-by" line in git commits made by OpenClaude
# OPENCLAUDE_DISABLE_CO_AUTHORED_BY=1 # OPENCLAUDE_DISABLE_CO_AUTHORED_BY=1
# Disable strict tool schema normalization for non-Gemini providers
# Useful when MCP tools with complex optional params (e.g. list[dict])
# trigger "Extra required key ... supplied" errors from OpenAI-compatible endpoints
# OPENCLAUDE_DISABLE_STRICT_TOOLS=1
# Disable hidden <system-reminder> messages injected into tool output
# Suppresses the file-read cyber-risk reminder and the todo/task tool nudges
# Useful for users who want full transparency over what the model sees
# OPENCLAUDE_DISABLE_TOOL_REMINDERS=1
# Custom timeout for API requests in milliseconds (default: varies) # Custom timeout for API requests in milliseconds (default: varies)
# API_TIMEOUT_MS=60000 # API_TIMEOUT_MS=60000

View File

@@ -1,144 +0,0 @@
name: Auto Release
on:
push:
branches:
- main
concurrency:
group: auto-release-${{ github.ref }}
cancel-in-progress: false
jobs:
release-please:
if: ${{ github.repository == 'Gitlawb/openclaude' }}
name: Release Please
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
outputs:
release_created: ${{ steps.release.outputs.release_created }}
tag_name: ${{ steps.release.outputs.tag_name }}
version: ${{ steps.release.outputs.version }}
steps:
- name: Run release-please
id: release
uses: googleapis/release-please-action@16a9c90856f42705d54a6fda1823352bdc62cf38
with:
token: ${{ secrets.GITHUB_TOKEN }}
release-type: node
publish-npm:
name: Publish to npm
needs: release-please
if: ${{ needs.release-please.outputs.release_created == 'true' }}
runs-on: ubuntu-latest
environment: release
permissions:
contents: read
id-token: write
steps:
- name: Checkout release tag
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5
with:
ref: ${{ needs.release-please.outputs.tag_name }}
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020
with:
node-version: 24
registry-url: https://registry.npmjs.org
- name: Set up Bun
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6
with:
bun-version: 1.3.11
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Run unit tests
run: bun test --max-concurrency=1
- name: Smoke test
run: bun run smoke
- name: Build
run: bun run build
- name: Dry-run package
run: npm pack --dry-run
- name: Clear token auth for trusted publishing
run: |
unset NODE_AUTH_TOKEN
echo "NODE_AUTH_TOKEN=" >> "$GITHUB_ENV"
- name: Publish to npm
run: npm publish --access public --provenance
- name: Release summary
run: |
{
echo "## Released ${{ needs.release-please.outputs.tag_name }}"
echo
echo "- npm: https://www.npmjs.com/package/@gitlawb/openclaude"
echo "- GitHub: https://github.com/Gitlawb/openclaude/releases/tag/${{ needs.release-please.outputs.tag_name }}"
} >> "$GITHUB_STEP_SUMMARY"
docker:
name: Build & Push Docker Image
needs: release-please
if: ${{ needs.release-please.outputs.release_created == 'true' }}
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout release tag
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ needs.release-please.outputs.tag_name }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- name: Log in to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
with:
images: ghcr.io/${{ github.repository }}
tags: |
type=semver,pattern={{version}},value=${{ needs.release-please.outputs.version }}
type=semver,pattern={{major}}.{{minor}},value=${{ needs.release-please.outputs.version }}
type=raw,value=latest
- name: Build and load locally
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
with:
context: .
load: true
tags: openclaude:smoke
cache-from: type=gha
- name: Smoke test
run: docker run --rm openclaude:smoke --version
- name: Build and push
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max

2
.gitignore vendored
View File

@@ -7,8 +7,6 @@ dist/
.openclaude-profile.json .openclaude-profile.json
reports/ reports/
GEMINI.md GEMINI.md
CLAUDE.md
package-lock.json package-lock.json
/.claude /.claude
coverage/ coverage/
agent.log

View File

@@ -1,3 +0,0 @@
{
".": "0.6.0"
}

View File

@@ -1,176 +0,0 @@
# Changelog
## [0.6.0](https://github.com/Gitlawb/openclaude/compare/v0.5.2...v0.6.0) (2026-04-22)
### Features
* add model caching and benchmarking utilities ([#671](https://github.com/Gitlawb/openclaude/issues/671)) ([2b15e16](https://github.com/Gitlawb/openclaude/commit/2b15e16421f793f954a92c53933a07094544b29d))
* add thinking token extraction ([#798](https://github.com/Gitlawb/openclaude/issues/798)) ([268c039](https://github.com/Gitlawb/openclaude/commit/268c0398e4bf1ab898069c61500a2b3c226a0322))
* **api:** compress old tool_result content for small-context providers ([#801](https://github.com/Gitlawb/openclaude/issues/801)) ([a6a3de5](https://github.com/Gitlawb/openclaude/commit/a6a3de5ac155fe9d00befbfcab98d439314effd8))
* **api:** improve local provider reliability with readiness and self-healing ([#738](https://github.com/Gitlawb/openclaude/issues/738)) ([4cb963e](https://github.com/Gitlawb/openclaude/commit/4cb963e660dbd6ee438c04042700db05a9d32c59))
* **api:** smart model routing primitive (cheap-for-simple, strong-for-hard) ([#785](https://github.com/Gitlawb/openclaude/issues/785)) ([e908864](https://github.com/Gitlawb/openclaude/commit/e908864da7e7c987a98053ac5d18d702e192db2b))
* enable 15 additional feature flags in open build ([#667](https://github.com/Gitlawb/openclaude/issues/667)) ([6a62e3f](https://github.com/Gitlawb/openclaude/commit/6a62e3ff76ba9ba446b8e20cf2bb139ee76a9387))
* native Anthropic API mode for Claude models on GitHub Copilot ([#579](https://github.com/Gitlawb/openclaude/issues/579)) ([fdef4a1](https://github.com/Gitlawb/openclaude/commit/fdef4a1b4ce218ded4937ca83b30acce7c726472))
* **provider:** expose Atomic Chat in /provider picker with autodetect ([#810](https://github.com/Gitlawb/openclaude/issues/810)) ([ee19159](https://github.com/Gitlawb/openclaude/commit/ee19159c17b3de3b4a8b4a4541a6569f4261d54e))
* **provider:** zero-config autodetection primitive ([#784](https://github.com/Gitlawb/openclaude/issues/784)) ([a5bfcbb](https://github.com/Gitlawb/openclaude/commit/a5bfcbbadf8e9a1fd42f3e103d295524b8da64b0))
### Bug Fixes
* **api:** ensure strict role sequence and filter empty assistant messages after interruption ([#745](https://github.com/Gitlawb/openclaude/issues/745) regression) ([#794](https://github.com/Gitlawb/openclaude/issues/794)) ([06e7684](https://github.com/Gitlawb/openclaude/commit/06e7684eb56df8e694ac784575e163641931c44c))
* Collapse all-text arrays to string for DeepSeek compatibility ([#806](https://github.com/Gitlawb/openclaude/issues/806)) ([761924d](https://github.com/Gitlawb/openclaude/commit/761924daa7e225fe8acf41651408c7cae639a511))
* **model:** codex/nvidia-nim/minimax now read OPENAI_MODEL env ([#815](https://github.com/Gitlawb/openclaude/issues/815)) ([4581208](https://github.com/Gitlawb/openclaude/commit/458120889f6ce54cc9f0b287461d5e38eae48a20))
* **provider:** saved profile ignored when stale CLAUDE_CODE_USE_* in shell ([#807](https://github.com/Gitlawb/openclaude/issues/807)) ([13de4e8](https://github.com/Gitlawb/openclaude/commit/13de4e85df7f5fadc8cd15a76076374dc112360b))
* rename .claude.json to .openclaude.json with legacy fallback ([#582](https://github.com/Gitlawb/openclaude/issues/582)) ([4d4fb28](https://github.com/Gitlawb/openclaude/commit/4d4fb2880e4d0e3a62d8715e1ec13d932e736279))
* replace discontinued gemini-2.5-pro-preview-03-25 with stable gemini-2.5-pro ([#802](https://github.com/Gitlawb/openclaude/issues/802)) ([64582c1](https://github.com/Gitlawb/openclaude/commit/64582c119d5d0278195271379da4a68d59a89c1f)), closes [#398](https://github.com/Gitlawb/openclaude/issues/398)
* **security:** harden project settings trust boundary + MCP sanitization ([#789](https://github.com/Gitlawb/openclaude/issues/789)) ([ae3b723](https://github.com/Gitlawb/openclaude/commit/ae3b723f3b297b49925cada4728f3174aee8bf12))
* **test:** autoCompact floor assertion is flag-sensitive ([#816](https://github.com/Gitlawb/openclaude/issues/816)) ([c13842e](https://github.com/Gitlawb/openclaude/commit/c13842e91c7227246520955de6ae0636b30def9a))
* **ui:** prevent provider manager lag by deferring sync I/O ([#803](https://github.com/Gitlawb/openclaude/issues/803)) ([85eab27](https://github.com/Gitlawb/openclaude/commit/85eab2751e7d351bb0ed6a3fe0e15461d241c9cb))
## [0.5.2](https://github.com/Gitlawb/openclaude/compare/v0.5.1...v0.5.2) (2026-04-20)
### Bug Fixes
* **api:** replace phrase-based reasoning sanitizer with tag-based filter ([#779](https://github.com/Gitlawb/openclaude/issues/779)) ([336ddcc](https://github.com/Gitlawb/openclaude/commit/336ddcc50d59d79ebff50993f2673652aecb0d7d))
## [0.5.1](https://github.com/Gitlawb/openclaude/compare/v0.5.0...v0.5.1) (2026-04-20)
### Bug Fixes
* enforce Bash path constraints after sandbox allow ([#777](https://github.com/Gitlawb/openclaude/issues/777)) ([7002cb3](https://github.com/Gitlawb/openclaude/commit/7002cb302b78ea2a19da3f26226de24e2903fa1d))
* enforce MCP OAuth callback state before errors ([#775](https://github.com/Gitlawb/openclaude/issues/775)) ([739b8d1](https://github.com/Gitlawb/openclaude/commit/739b8d1f40fde0e401a5cbd2b9a55d88bd5124ad))
* require trusted approval for sandbox override ([#778](https://github.com/Gitlawb/openclaude/issues/778)) ([aab4890](https://github.com/Gitlawb/openclaude/commit/aab489055c53dd64369414116fe93226d2656273))
## [0.5.0](https://github.com/Gitlawb/openclaude/compare/v0.4.0...v0.5.0) (2026-04-20)
### Features
* add OPENCLAUDE_DISABLE_STRICT_TOOLS env var to opt out of strict MCP tool schema normalization ([#770](https://github.com/Gitlawb/openclaude/issues/770)) ([e6e8d9a](https://github.com/Gitlawb/openclaude/commit/e6e8d9a24897e4c9ef08b72df20fabbf8ef27f38))
* mask provider api key input ([#772](https://github.com/Gitlawb/openclaude/issues/772)) ([13e9f22](https://github.com/Gitlawb/openclaude/commit/13e9f22a83a2b0f85f557b1e12c9442ba61241e4))
### Bug Fixes
* allow provider recovery during startup ([#765](https://github.com/Gitlawb/openclaude/issues/765)) ([f828171](https://github.com/Gitlawb/openclaude/commit/f828171ef1ab94e2acf73a28a292799e4e26cc0d))
* **api:** drop orphan tool results to satisfy strict role sequence ([#745](https://github.com/Gitlawb/openclaude/issues/745)) ([b786b76](https://github.com/Gitlawb/openclaude/commit/b786b765f01f392652eaf28ed3579a96b7260a53))
* **help:** prevent /help tab crash from undefined descriptions ([#732](https://github.com/Gitlawb/openclaude/issues/732)) ([3d1979f](https://github.com/Gitlawb/openclaude/commit/3d1979ff066db32415e0c8321af916d81f5f2621))
* **mcp:** sync required array with properties in tool schemas ([#754](https://github.com/Gitlawb/openclaude/issues/754)) ([002a8f1](https://github.com/Gitlawb/openclaude/commit/002a8f1f6de2fcfc917165d828501d3047bad61f))
* remove cached mcpClient in diagnostic tracking to prevent stale references ([#727](https://github.com/Gitlawb/openclaude/issues/727)) ([2c98be7](https://github.com/Gitlawb/openclaude/commit/2c98be700274a4241963b5f43530bf3bd8f8963f))
* use raw context window for auto-compact percentage display ([#748](https://github.com/Gitlawb/openclaude/issues/748)) ([55c5f26](https://github.com/Gitlawb/openclaude/commit/55c5f262a9a5a8be0aa9ae8dc6c7dafc465eb2c6))
## [0.4.0](https://github.com/Gitlawb/openclaude/compare/v0.3.0...v0.4.0) (2026-04-17)
### Features
* add Alibaba Coding Plan (DashScope) provider support ([#509](https://github.com/Gitlawb/openclaude/issues/509)) ([43ac6db](https://github.com/Gitlawb/openclaude/commit/43ac6dba75537282da1e2ad8f855082bc4e25f1e))
* add NVIDIA NIM and MiniMax provider support ([#552](https://github.com/Gitlawb/openclaude/issues/552)) ([51191d6](https://github.com/Gitlawb/openclaude/commit/51191d61326e1f8319d70b3a3c0d9229e185a564))
* add ripgrep to Dockerfile for faster file searching ([#688](https://github.com/Gitlawb/openclaude/issues/688)) ([12dd375](https://github.com/Gitlawb/openclaude/commit/12dd3755c619cc27af3b151ae8fdb9d425a7b9a2))
* **api:** classify openai-compatible provider failures ([#708](https://github.com/Gitlawb/openclaude/issues/708)) ([80a00ac](https://github.com/Gitlawb/openclaude/commit/80a00acc2c6dc4657a78de7366f7a9ebc920bfbb))
* **vscode:** add full chat interface to OpenClaude extension ([#608](https://github.com/Gitlawb/openclaude/issues/608)) ([fbcd928](https://github.com/Gitlawb/openclaude/commit/fbcd928f7f8511da795aea3ad318bddf0ab9a1a7))
### Bug Fixes
* focus "Done" option after completing provider manager actions ([#718](https://github.com/Gitlawb/openclaude/issues/718)) ([d6f5130](https://github.com/Gitlawb/openclaude/commit/d6f5130c204d8ffe582212466768706cd7fd6774))
* **models:** prevent /models crash from non-string saved model values ([#691](https://github.com/Gitlawb/openclaude/issues/691)) ([6b2121d](https://github.com/Gitlawb/openclaude/commit/6b2121da12189fa7ce1f33394d18abd24cf8a01b))
* prevent crash in commands tab when description is undefined ([#730](https://github.com/Gitlawb/openclaude/issues/730)) ([eed77e6](https://github.com/Gitlawb/openclaude/commit/eed77e6579866a98384dcc948a0ad6406614ede3))
* strip comments before scanning for missing imports ([#676](https://github.com/Gitlawb/openclaude/issues/676)) ([a00b792](https://github.com/Gitlawb/openclaude/commit/a00b7928de9662ffb7ef6abd8cd040afe6f4f122))
* **ui:** show correct endpoint URL in intro screen for custom Anthropic endpoints ([#735](https://github.com/Gitlawb/openclaude/issues/735)) ([3424663](https://github.com/Gitlawb/openclaude/commit/34246635fb9a09499047a52e7f96ca9b36c8a85a))
## [0.3.0](https://github.com/Gitlawb/openclaude/compare/v0.2.3...v0.3.0) (2026-04-14)
### Features
* activate coordinator mode in open build ([#647](https://github.com/Gitlawb/openclaude/issues/647)) ([99a1714](https://github.com/Gitlawb/openclaude/commit/99a17144ee285b892a0801acb6abcc9af68879af))
* activate local-only team memory in open build ([#648](https://github.com/Gitlawb/openclaude/issues/648)) ([24d485f](https://github.com/Gitlawb/openclaude/commit/24d485f42f5b1405d2fab13f2f497d5edd3b5300))
* activate message actions in open build ([#632](https://github.com/Gitlawb/openclaude/issues/632)) ([252808b](https://github.com/Gitlawb/openclaude/commit/252808bbd0a12a6ccf97e2cb09752a0212ea3acd))
* add allowBypassPermissionsMode setting ([#658](https://github.com/Gitlawb/openclaude/issues/658)) ([31be66d](https://github.com/Gitlawb/openclaude/commit/31be66d7645ea3473334c9ce89ea1a5095b8df6e))
* add Docker image build and push to GHCR on release ([#656](https://github.com/Gitlawb/openclaude/issues/656)) ([658d076](https://github.com/Gitlawb/openclaude/commit/658d076909e14eb0459bcb98aee9aa0472118265))
* implement /loop command with fixed and dynamic scheduling ([#621](https://github.com/Gitlawb/openclaude/issues/621)) ([64298a6](https://github.com/Gitlawb/openclaude/commit/64298a663f1391b16aa1f5a49e8a877e1d3742f2))
* implement Monitor tool for streaming shell output ([#649](https://github.com/Gitlawb/openclaude/issues/649)) ([b818dd5](https://github.com/Gitlawb/openclaude/commit/b818dd5958f4e8428566ce25a1a6be5fd4fe66f8))
* local feature flag overrides via ~/.claude/feature-flags.json ([#639](https://github.com/Gitlawb/openclaude/issues/639)) ([0e48884](https://github.com/Gitlawb/openclaude/commit/0e48884f56c6c008f047a7926d3b2cb924170625))
* open useful USER_TYPE-gated features to all users ([#644](https://github.com/Gitlawb/openclaude/issues/644)) ([c1beea9](https://github.com/Gitlawb/openclaude/commit/c1beea98676a413c54152a45a6b9fbe7fb9ed028))
### Bug Fixes
* bump axios 1.14.0 → 1.15.0 (Dependabot [#4](https://github.com/Gitlawb/openclaude/issues/4), [#5](https://github.com/Gitlawb/openclaude/issues/5)) ([#670](https://github.com/Gitlawb/openclaude/issues/670)) ([a07e5ef](https://github.com/Gitlawb/openclaude/commit/a07e5ef990a5ed01a72e83fdbd1fcab36f515a08))
* extend provider guard to protect anthropic profiles from cross-terminal override ([#641](https://github.com/Gitlawb/openclaude/issues/641)) ([03e0b06](https://github.com/Gitlawb/openclaude/commit/03e0b06e0784e4ea46945b3950840b10b6e3ca49))
* improve fetch diagnostics for bootstrap and session requests ([#646](https://github.com/Gitlawb/openclaude/issues/646)) ([df2b9f2](https://github.com/Gitlawb/openclaude/commit/df2b9f2b7b4c661ee3d9ed5dc58b3064de0599d1))
* **openai-shim:** preserve tool result images and local token caps ([#659](https://github.com/Gitlawb/openclaude/issues/659)) ([30c866d](https://github.com/Gitlawb/openclaude/commit/30c866d31ad8538496460667d86ed5efbd4a8547))
* replace broken bun:bundle shim with source pre-processing ([#657](https://github.com/Gitlawb/openclaude/issues/657)) ([adbe391](https://github.com/Gitlawb/openclaude/commit/adbe391e63721918b5d147f4f845111c1a3143db))
* resolve 12 bugs across API, MCP, agent tools, web search, and context overflow ([#674](https://github.com/Gitlawb/openclaude/issues/674)) ([25ce2ca](https://github.com/Gitlawb/openclaude/commit/25ce2ca7bff8937b0b79ad7f85c6dc1c68432069))
* route OpenAI Codex shortcuts to correct endpoint ([#566](https://github.com/Gitlawb/openclaude/issues/566)) ([7c8bdcc](https://github.com/Gitlawb/openclaude/commit/7c8bdcc3e2ac1ecb98286c705c85671044be3d6b))
## [0.2.3](https://github.com/Gitlawb/openclaude/compare/v0.2.2...v0.2.3) (2026-04-12)
### Bug Fixes
* prevent infinite auto-compact loop for unknown 3P models ([#635](https://github.com/Gitlawb/openclaude/issues/635)) ([#636](https://github.com/Gitlawb/openclaude/issues/636)) ([aeaa658](https://github.com/Gitlawb/openclaude/commit/aeaa658f776fb8df95721e8b8962385f8b00f66a))
## [0.2.2](https://github.com/Gitlawb/openclaude/compare/v0.2.1...v0.2.2) (2026-04-12)
### Bug Fixes
* **read/edit:** make compact line prefix unambiguous for tab-indented files ([#613](https://github.com/Gitlawb/openclaude/issues/613)) ([08cc6f3](https://github.com/Gitlawb/openclaude/commit/08cc6f328711cd93ce9fa53351266c29a0b0a341))
## [0.2.1](https://github.com/Gitlawb/openclaude/compare/v0.2.0...v0.2.1) (2026-04-12)
### Bug Fixes
* **provider:** add recovery guidance for missing OpenAI API key ([#616](https://github.com/Gitlawb/openclaude/issues/616)) ([9419e8a](https://github.com/Gitlawb/openclaude/commit/9419e8a4a21b3771d9ddb10f7072e0a8c5b5b631))
## [0.2.0](https://github.com/Gitlawb/openclaude/compare/v0.1.8...v0.2.0) (2026-04-12)
### Features
* add /cache-probe diagnostic command ([#580](https://github.com/Gitlawb/openclaude/issues/580)) ([9ccaa7a](https://github.com/Gitlawb/openclaude/commit/9ccaa7a6759b6991f4a566b4118c06e68a2398fe)), closes [#515](https://github.com/Gitlawb/openclaude/issues/515)
* add auto-fix service — auto-lint and test after AI file edits ([#508](https://github.com/Gitlawb/openclaude/issues/508)) ([c385047](https://github.com/Gitlawb/openclaude/commit/c385047abba4366866f4c87bfb5e0b0bd4dcbb9d))
* Add Gemini support with thought_signature fix ([#404](https://github.com/Gitlawb/openclaude/issues/404)) ([5012c16](https://github.com/Gitlawb/openclaude/commit/5012c160c9a2dff9418e7ee19dc9a4d29ef2b024))
* add headless gRPC server for external agent integration ([#278](https://github.com/Gitlawb/openclaude/issues/278)) ([26eef92](https://github.com/Gitlawb/openclaude/commit/26eef92fe72e9c3958d61435b8d3571e12bf2b74))
* add wiki mvp commands ([#532](https://github.com/Gitlawb/openclaude/issues/532)) ([c328fdf](https://github.com/Gitlawb/openclaude/commit/c328fdf9e2fe59ad101b049301298ce9ff24caca))
* GitHub provider lifecycle and onboarding hardening ([#351](https://github.com/Gitlawb/openclaude/issues/351)) ([ff7d499](https://github.com/Gitlawb/openclaude/commit/ff7d49990de515825ddbe4099f3a39b944b61370))
### Bug Fixes
* add File polyfill for Node &lt; 20 to prevent startup deadlock with proxy ([#442](https://github.com/Gitlawb/openclaude/issues/442)) ([85aa8b0](https://github.com/Gitlawb/openclaude/commit/85aa8b0985c8f3cb8801efa5141114a0ab0f6a83))
* add GitHub Copilot model context windows and output limits ([#576](https://github.com/Gitlawb/openclaude/issues/576)) ([a7f5982](https://github.com/Gitlawb/openclaude/commit/a7f5982f6438ab0ddc3f0daae31ea68ac7ac206c)), closes [#515](https://github.com/Gitlawb/openclaude/issues/515)
* add LiteLLM-style aliases for GitHub Copilot context windows ([#606](https://github.com/Gitlawb/openclaude/issues/606)) ([2e0e14d](https://github.com/Gitlawb/openclaude/commit/2e0e14d71313e0e501efaa9e55c6c56f2742fb10))
* add store:false to Chat Completions and /responses fallback ([#578](https://github.com/Gitlawb/openclaude/issues/578)) ([8aaa4f2](https://github.com/Gitlawb/openclaude/commit/8aaa4f22ac5b942d82aa9cad54af30d56034515a))
* address code scanning alerts ([#434](https://github.com/Gitlawb/openclaude/issues/434)) ([e365cb4](https://github.com/Gitlawb/openclaude/commit/e365cb4010becabacd7cbccb4c3e59ea23a41e90))
* avoid sync github credential reads in provider manager ([#428](https://github.com/Gitlawb/openclaude/issues/428)) ([aff2bd8](https://github.com/Gitlawb/openclaude/commit/aff2bd87e4f2821992f74fb95481c505d0ba5d5d))
* convert dragged file paths to [@mentions](https://github.com/mentions) for attachment ([#382](https://github.com/Gitlawb/openclaude/issues/382)) ([112df59](https://github.com/Gitlawb/openclaude/commit/112df5911791ea71ee9efbb98ea59c5ded1ea161))
* custom web search — WEB_URL_TEMPLATE not recognized, timeout too short, silent native fallback ([#537](https://github.com/Gitlawb/openclaude/issues/537)) ([32fbd0c](https://github.com/Gitlawb/openclaude/commit/32fbd0c7b4168b32dcb13a5b69342e2727269201))
* defer startup checks and suppress recommendation dialogs during startup window (issue [#363](https://github.com/Gitlawb/openclaude/issues/363)) ([#504](https://github.com/Gitlawb/openclaude/issues/504)) ([2caf2fd](https://github.com/Gitlawb/openclaude/commit/2caf2fd982af1ec845c50152ad9d28d1a597f82f))
* display selected model in startup screen instead of hardcoded sonnet 4.6 ([#587](https://github.com/Gitlawb/openclaude/issues/587)) ([b126e38](https://github.com/Gitlawb/openclaude/commit/b126e38b1affddd2de83fcc3ba26f2e44b42a509))
* handle missing skill parameter in SkillTool ([#485](https://github.com/Gitlawb/openclaude/issues/485)) ([f9ce81b](https://github.com/Gitlawb/openclaude/commit/f9ce81bfb384e909353813fb6f6760cadd508ae7))
* include MCP tool results in microcompact to reduce token waste ([#348](https://github.com/Gitlawb/openclaude/issues/348)) ([52d33a8](https://github.com/Gitlawb/openclaude/commit/52d33a87a047b943aedaaaf772cd48636c263509))
* **ink:** restore host prop updates in React 19 reconciler ([#589](https://github.com/Gitlawb/openclaude/issues/589)) ([6e94dd9](https://github.com/Gitlawb/openclaude/commit/6e94dd913688b2d6433a9abe62a245c5f031b776))
* let saved provider profiles win on restart ([#513](https://github.com/Gitlawb/openclaude/issues/513)) ([cb8f8b7](https://github.com/Gitlawb/openclaude/commit/cb8f8b7ac2e3e74516ee219a3a48156db7c6ed78))
* normalize malformed Bash tool arguments from OpenAI-compatible providers ([#385](https://github.com/Gitlawb/openclaude/issues/385)) ([b4bd95b](https://github.com/Gitlawb/openclaude/commit/b4bd95b47715c9896240d708c106777507fd26ec))
* preserve only originally-required properties in strict tool schemas ([#471](https://github.com/Gitlawb/openclaude/issues/471)) ([ccaa193](https://github.com/Gitlawb/openclaude/commit/ccaa193eec5761f0972ffb58eb3189a81a9244b0))
* preserve unicode in Windows clipboard fallback ([#388](https://github.com/Gitlawb/openclaude/issues/388)) ([c193497](https://github.com/Gitlawb/openclaude/commit/c1934974aaf64db460cc850a044bd13cc744cce7))
* rebrand prompt identity to openclaude ([#496](https://github.com/Gitlawb/openclaude/issues/496)) ([598651f](https://github.com/Gitlawb/openclaude/commit/598651f42389ce76311ec00e8a9c701c939ead27))
* replace isDeepStrictEqual with navigation-aware options comparison ([#507](https://github.com/Gitlawb/openclaude/issues/507)) ([537c469](https://github.com/Gitlawb/openclaude/commit/537c469c3a2f7cb0eed05fa2f54dca57b6bc273f)), closes [#472](https://github.com/Gitlawb/openclaude/issues/472)
* report cache reads in streaming and correct cost calculation ([#577](https://github.com/Gitlawb/openclaude/issues/577)) ([f4ac709](https://github.com/Gitlawb/openclaude/commit/f4ac709fa6eda732bf45204fcab625ba6c5674b9))
* restore default context window for unknown 3p models ([#494](https://github.com/Gitlawb/openclaude/issues/494)) ([69ea1f1](https://github.com/Gitlawb/openclaude/commit/69ea1f1e4a99e9436215d8cb391a116a64442b94))
* restore Grep and Glob reliability on OpenAI paths ([#461](https://github.com/Gitlawb/openclaude/issues/461)) ([600c01f](https://github.com/Gitlawb/openclaude/commit/600c01faf761a080a2c7dede872ddbe05a132f23))
* restore Ollama auto-detect in first-run setup ([#561](https://github.com/Gitlawb/openclaude/issues/561)) ([68c2968](https://github.com/Gitlawb/openclaude/commit/68c296833dcef54ce44cb18b24357230b5204dbc))
* scrub canonical Anthropic headers from 3P shim requests ([#499](https://github.com/Gitlawb/openclaude/issues/499)) ([07621a6](https://github.com/Gitlawb/openclaude/commit/07621a6f8d0918170281869a47b5dbff90e71594))
* strip Anthropic params from 3P resume paths ([#479](https://github.com/Gitlawb/openclaude/issues/479)) ([4975cfc](https://github.com/Gitlawb/openclaude/commit/4975cfc2e0ddbe34aa4e8e3f52ee5eba07fbe465))
* suppress startup dialogs when input is buffered ([#423](https://github.com/Gitlawb/openclaude/issues/423)) ([8ece290](https://github.com/Gitlawb/openclaude/commit/8ece2900872dadd157e798ef501ddf126dac66c4))
* **tui:** restore prompt rendering on startup ([#498](https://github.com/Gitlawb/openclaude/issues/498)) ([e30ad17](https://github.com/Gitlawb/openclaude/commit/e30ad17ae0056787273be2caafd6cf5340b6ab57))
* update theme preview on focus change ([#562](https://github.com/Gitlawb/openclaude/issues/562)) ([6924718](https://github.com/Gitlawb/openclaude/commit/692471850fc789ee0797190089272407f9a4d953))
* **web-search:** close SSRF bypasses in custom provider hostname guard ([#610](https://github.com/Gitlawb/openclaude/issues/610)) ([a02c441](https://github.com/Gitlawb/openclaude/commit/a02c44143b257fbee7f38f1b93873cc0ea68a1f9))
* WebSearch providers + MCPTool bugs ([#593](https://github.com/Gitlawb/openclaude/issues/593)) ([91e4cfb](https://github.com/Gitlawb/openclaude/commit/91e4cfb15b62c04615834fd3c417fe38b4feb914))

View File

@@ -1,46 +0,0 @@
# ---- build stage ----
FROM node:22-slim AS build
# Install Bun
RUN npm install -g bun@1.3.11
WORKDIR /app
# Copy dependency manifests first for better layer caching
COPY package.json bun.lock ./
# Install all dependencies (including devDependencies for build)
RUN bun install --frozen-lockfile
# Copy source code
COPY src/ src/
COPY scripts/ scripts/
COPY bin/ bin/
COPY tsconfig.json ./
# Build the CLI bundle
RUN bun run build
# Prune devDependencies
RUN rm -rf node_modules && bun install --frozen-lockfile --production
# ---- runtime stage ----
FROM node:22-slim
WORKDIR /app
# Copy only what's needed to run
COPY --from=build /app/dist/cli.mjs dist/cli.mjs
COPY --from=build /app/bin/ bin/
COPY --from=build /app/node_modules/ node_modules/
COPY --from=build /app/package.json package.json
COPY README.md ./
# Install git and ripgrep — many CLI tool operations depend on them
RUN apt-get update && apt-get install -y --no-install-recommends git ripgrep \
&& rm -rf /var/lib/apt/lists/*
# Run as non-root user
USER node
ENTRYPOINT ["node", "/app/dist/cli.mjs"]

View File

@@ -2,7 +2,7 @@
OpenClaude is an open-source coding-agent CLI for cloud and local model providers. OpenClaude is an open-source coding-agent CLI for cloud and local model providers.
Use OpenAI-compatible APIs, Gemini, GitHub Models, Codex OAuth, Codex, Ollama, Atomic Chat, and other supported backends while keeping one terminal-first workflow: prompts, tools, agents, MCP, slash commands, and streaming output. Use OpenAI-compatible APIs, Gemini, GitHub Models, Codex, Ollama, Atomic Chat, and other supported backends while keeping one terminal-first workflow: prompts, tools, agents, MCP, slash commands, and streaming output.
[![PR Checks](https://github.com/Gitlawb/openclaude/actions/workflows/pr-checks.yml/badge.svg?branch=main)](https://github.com/Gitlawb/openclaude/actions/workflows/pr-checks.yml) [![PR Checks](https://github.com/Gitlawb/openclaude/actions/workflows/pr-checks.yml/badge.svg?branch=main)](https://github.com/Gitlawb/openclaude/actions/workflows/pr-checks.yml)
[![Release](https://img.shields.io/github/v/tag/Gitlawb/openclaude?label=release&color=0ea5e9)](https://github.com/Gitlawb/openclaude/tags) [![Release](https://img.shields.io/github/v/tag/Gitlawb/openclaude?label=release&color=0ea5e9)](https://github.com/Gitlawb/openclaude/tags)
@@ -10,20 +10,13 @@ Use OpenAI-compatible APIs, Gemini, GitHub Models, Codex OAuth, Codex, Ollama, A
[![Security Policy](https://img.shields.io/badge/security-policy-0f766e)](SECURITY.md) [![Security Policy](https://img.shields.io/badge/security-policy-0f766e)](SECURITY.md)
[![License](https://img.shields.io/badge/license-MIT-2563eb)](LICENSE) [![License](https://img.shields.io/badge/license-MIT-2563eb)](LICENSE)
OpenClaude is also mirrored to GitLawb:
[gitlawb.com/node/repos/z6MkqDnb/openclaude](https://gitlawb.com/node/repos/z6MkqDnb/openclaude)
[Quick Start](#quick-start) | [Setup Guides](#setup-guides) | [Providers](#supported-providers) | [Source Build](#source-build-and-local-development) | [VS Code Extension](#vs-code-extension) | [Community](#community) [Quick Start](#quick-start) | [Setup Guides](#setup-guides) | [Providers](#supported-providers) | [Source Build](#source-build-and-local-development) | [VS Code Extension](#vs-code-extension) | [Community](#community)
## Star History
[![Star History Chart](https://api.star-history.com/chart?repos=gitlawb/openclaude&type=date&legend=top-left)](https://www.star-history.com/?repos=gitlawb%2Fopenclaude&type=date&legend=top-left)
## Why OpenClaude ## Why OpenClaude
- Use one CLI across cloud APIs and local model backends - Use one CLI across cloud APIs and local model backends
- Save provider profiles inside the app with `/provider` - Save provider profiles inside the app with `/provider`
- Run with OpenAI-compatible services, Gemini, GitHub Models, Codex OAuth, Codex, Ollama, Atomic Chat, and other supported providers - Run with OpenAI-compatible services, Gemini, GitHub Models, Codex, Ollama, Atomic Chat, and other supported providers
- Keep coding-agent workflows in one place: bash, file tools, grep, glob, agents, tasks, MCP, and web tools - Keep coding-agent workflows in one place: bash, file tools, grep, glob, agents, tasks, MCP, and web tools
- Use the bundled VS Code extension for launch integration and theme support - Use the bundled VS Code extension for launch integration and theme support
@@ -92,16 +85,6 @@ $env:OPENAI_MODEL="qwen2.5-coder:7b"
openclaude openclaude
``` ```
### Using Ollama's launch command
If you have [Ollama](https://ollama.com) installed, you can skip the env var setup entirely:
```bash
ollama launch openclaude --model qwen2.5-coder:7b
```
This automatically sets `ANTHROPIC_BASE_URL`, model routing, and auth so all API traffic goes through your local Ollama instance. Works with any model you have pulled — local or cloud.
## Setup Guides ## Setup Guides
Beginner-friendly guides: Beginner-friendly guides:
@@ -122,10 +105,9 @@ Advanced and source-build guides:
| OpenAI-compatible | `/provider` or env vars | Works with OpenAI, OpenRouter, DeepSeek, Groq, Mistral, LM Studio, and other compatible `/v1` servers | | OpenAI-compatible | `/provider` or env vars | Works with OpenAI, OpenRouter, DeepSeek, Groq, Mistral, LM Studio, and other compatible `/v1` servers |
| Gemini | `/provider` or env vars | Supports API key, access token, or local ADC workflow on current `main` | | Gemini | `/provider` or env vars | Supports API key, access token, or local ADC workflow on current `main` |
| GitHub Models | `/onboard-github` | Interactive onboarding with saved credentials | | GitHub Models | `/onboard-github` | Interactive onboarding with saved credentials |
| Codex OAuth | `/provider` | Opens ChatGPT sign-in in your browser and stores Codex credentials securely | | Codex | `/provider` | Uses existing Codex credentials when available |
| Codex | `/provider` | Uses existing Codex CLI auth, OpenClaude secure storage, or env credentials | | Ollama | `/provider` or env vars | Local inference with no API key |
| Ollama | `/provider`, env vars, or `ollama launch` | Local inference with no API key | | Atomic Chat | advanced setup | Local Apple Silicon backend |
| Atomic Chat | `/provider`, env vars, or `bun run dev:atomic-chat` | Local Model Provider; auto-detects loaded models |
| Bedrock / Vertex / Foundry | env vars | Additional provider integrations for supported environments | | Bedrock / Vertex / Foundry | env vars | Additional provider integrations for supported environments |
## What Works ## What Works
@@ -331,8 +313,7 @@ For larger changes, open an issue first so the scope is clear before implementat
- `bun run build` - `bun run build`
- `bun run test:coverage` - `bun run test:coverage`
- `bun run smoke` - `bun run smoke`
- focused `bun test ...` runs for files and flows you changed - focused `bun test ...` runs for touched areas
## Disclaimer ## Disclaimer

View File

@@ -30,7 +30,7 @@
"@opentelemetry/semantic-conventions": "1.40.0", "@opentelemetry/semantic-conventions": "1.40.0",
"ajv": "8.18.0", "ajv": "8.18.0",
"auto-bind": "5.0.1", "auto-bind": "5.0.1",
"axios": "1.15.0", "axios": "1.14.0",
"bidi-js": "1.0.3", "bidi-js": "1.0.3",
"chalk": "5.6.2", "chalk": "5.6.2",
"chokidar": "4.0.3", "chokidar": "4.0.3",
@@ -479,7 +479,7 @@
"auto-bind": ["auto-bind@5.0.1", "", {}, "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg=="], "auto-bind": ["auto-bind@5.0.1", "", {}, "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg=="],
"axios": ["axios@1.15.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-wWyJDlAatxk30ZJer+GeCWS209sA42X+N5jU2jy6oHTp7ufw8uzUTVFBX9+wTfAlhiJXGS0Bq7X6efruWjuK9Q=="], "axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
"base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="], "base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="],
@@ -1151,8 +1151,6 @@
"@emnapi/runtime/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], "@emnapi/runtime/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
"@mendable/firecrawl-js/axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core": ["@opentelemetry/core@1.30.1", "", { "dependencies": { "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ=="], "@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core": ["@opentelemetry/core@1.30.1", "", { "dependencies": { "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ=="],
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-exporter-base": ["@opentelemetry/otlp-exporter-base@0.57.2", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/otlp-transformer": "0.57.2" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-XdxEzL23Urhidyebg5E6jZoaiW5ygP/mRjxLHixogbqwDy2Faduzb5N0o/Oi+XTIJu+iyxXdVORjXax+Qgfxag=="], "@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-exporter-base": ["@opentelemetry/otlp-exporter-base@0.57.2", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/otlp-transformer": "0.57.2" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-XdxEzL23Urhidyebg5E6jZoaiW5ygP/mRjxLHixogbqwDy2Faduzb5N0o/Oi+XTIJu+iyxXdVORjXax+Qgfxag=="],
@@ -1379,8 +1377,6 @@
"cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], "cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
"firecrawl/axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
"form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], "form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
"gaxios/is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="], "gaxios/is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="],

View File

@@ -48,8 +48,6 @@ export OPENAI_MODEL=gpt-4o
`codexplan` maps to GPT-5.4 on the Codex backend with high reasoning. `codexplan` maps to GPT-5.4 on the Codex backend with high reasoning.
`codexspark` maps to GPT-5.3 Codex Spark for faster loops. `codexspark` maps to GPT-5.3 Codex Spark for faster loops.
If you use the in-app provider wizard, choose `Codex OAuth` to open ChatGPT sign-in in your browser and let OpenClaude store Codex credentials securely.
If you already use the Codex CLI, OpenClaude reads `~/.codex/auth.json` automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or override the token directly with `CODEX_API_KEY`. If you already use the Codex CLI, OpenClaude reads `~/.codex/auth.json` automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or override the token directly with `CODEX_API_KEY`.
```bash ```bash
@@ -84,16 +82,6 @@ OpenRouter model availability changes over time. If a model stops working, try a
### Ollama ### Ollama
Using `ollama launch` (recommended if you have Ollama installed):
```bash
ollama launch openclaude --model llama3.3:70b
```
This handles all environment setup automatically — no env vars needed. Works with any local or cloud model available in your Ollama instance.
Using environment variables manually:
```bash ```bash
ollama pull llama3.3:70b ollama pull llama3.3:70b

View File

@@ -1,333 +0,0 @@
# Hook Chains (Self-Healing Agent Mesh MVP)
Hook Chains provide an event-driven recovery layer for important workflow failures.
When a matching hook event occurs, OpenClaude evaluates declarative rules and can dispatch remediation actions such as:
- `spawn_fallback_agent`
- `notify_team`
- `warm_remote_capacity`
## Disabled-By-Default Rollout
> **Rollout recommendation:** keep Hook Chains disabled until you validate rules in your environment.
>
> - Set top-level config to `"enabled": false` initially.
> - Enable per environment when ready.
> - Dispatch is gated by `feature('HOOK_CHAINS')`.
> - Env gate defaults to off unless `CLAUDE_CODE_ENABLE_HOOK_CHAINS=1` is set.
This keeps existing workflows unchanged while you tune guard windows and action behavior.
## Feature Overview
Hook Chains are loaded from a deterministic config file and evaluated on dispatched hook events.
MVP runtime trigger wiring:
- `PostToolUseFailure` hooks dispatch Hook Chains with outcome `failed`.
- `TaskCompleted` hooks dispatch Hook Chains with outcome:
- `success` when completion hooks did not block.
- `failed` when completion hooks returned blocking errors or prevented continuation.
Default config path:
- `.openclaude/hook-chains.json`
Override path:
- `CLAUDE_CODE_HOOK_CHAINS_CONFIG_PATH=/abs/or/relative/path/to/hook-chains.json`
Global gate:
- `feature('HOOK_CHAINS')` must be enabled in the build
- `CLAUDE_CODE_ENABLE_HOOK_CHAINS=0|1` (defaults to disabled when unset)
## Safety Guarantees
The runtime is intentionally conservative:
- **Depth guard:** chain dispatch is blocked when `chainDepth >= maxChainDepth`.
- **Rule cooldown:** each rule can only re-fire after cooldown expires.
- **Dedup window:** identical event/action combinations are suppressed for a window.
- **Abort-safe behavior:** if the current signal is aborted, actions skip safely.
- **Policy-aware remote warm:** `warm_remote_capacity` skips when remote sessions are policy denied.
- **Bridge inactive no-op:** `warm_remote_capacity` safely skips when no active bridge handle exists.
- **Missing team context safety:** `notify_team` skips with structured reason if no team context/team file is available.
- **Fallback launcher safety:** `spawn_fallback_agent` fails with a structured reason when launch permissions/context are unavailable.
## Configuration Schema Reference
Top-level object:
```json
{
"version": 1,
"enabled": true,
"maxChainDepth": 2,
"defaultCooldownMs": 30000,
"defaultDedupWindowMs": 30000,
"rules": []
}
```
### Top-Level Fields
| Field | Type | Required | Notes |
|---|---|---:|---|
| `version` | `1` | No | Defaults to `1`. |
| `enabled` | `boolean` | No | Global feature switch for this config file. |
| `maxChainDepth` | `integer` | No | Global depth guard (default `2`, max `10`). |
| `defaultCooldownMs` | `integer` | No | Default rule cooldown in ms (default `30000`). |
| `defaultDedupWindowMs` | `integer` | No | Default action dedup window in ms (default `30000`). |
| `rules` | `HookChainRule[]` | No | Defaults to `[]`. May be omitted or empty; when no rules are present, dispatch is a no-op and returns `enabled: false`. |
> **Note:** An empty ruleset is valid and can be used to keep Hook Chains configured but effectively disabled until rules are added.
### Rule Object (`HookChainRule`)
```json
{
"id": "task-failure-recovery",
"enabled": true,
"trigger": {
"event": "TaskCompleted",
"outcome": "failed"
},
"condition": {
"toolNames": ["Edit"],
"taskStatuses": ["failed"],
"errorIncludes": ["timeout", "permission denied"],
"eventFieldEquals": {
"meta.source": "scheduler"
}
},
"cooldownMs": 60000,
"dedupWindowMs": 30000,
"maxDepth": 2,
"actions": []
}
```
| Field | Type | Required | Notes |
|---|---|---:|---|
| `id` | `string` | Yes | Stable identifier used in telemetry/guards. |
| `enabled` | `boolean` | No | Per-rule switch. |
| `trigger.event` | `HookEvent` | Yes | Event name to match. |
| `trigger.outcome` | `"success"|"failed"|"timeout"|"unknown"` | No | Single outcome matcher. |
| `trigger.outcomes` | `Outcome[]` | No | Multi-outcome matcher. Use either `outcome` or `outcomes`. |
| `condition` | `object` | No | Optional extra matching constraints. |
| `cooldownMs` | `integer` | No | Overrides global cooldown for this rule. |
| `dedupWindowMs` | `integer` | No | Overrides global dedup for this rule. |
| `maxDepth` | `integer` | No | Per-rule depth cap. |
| `actions` | `HookChainAction[]` | Yes | One or more actions to execute in order. |
### Condition Fields
| Field | Type | Notes |
|---|---|---|
| `toolNames` | `string[]` | Matches `tool_name` / `toolName` in event payload. |
| `taskStatuses` | `string[]` | Matches `task_status` / `taskStatus` / `status`. |
| `errorIncludes` | `string[]` | Case-insensitive substring match against `error` / `reason` / `message`. |
| `eventFieldEquals` | `Record<string, string\|number\|boolean>` | Dot-path equality against payload (example: `"meta.source": "scheduler"`). |
### Actions
#### `spawn_fallback_agent`
```json
{
"type": "spawn_fallback_agent",
"id": "fallback-1",
"enabled": true,
"dedupWindowMs": 30000,
"description": "Fallback recovery for failed task",
"promptTemplate": "Recover task ${TASK_SUBJECT}. Event=${EVENT_NAME}, outcome=${OUTCOME}, error=${ERROR}. Payload=${PAYLOAD_JSON}",
"agentType": "general-purpose",
"model": "sonnet"
}
```
#### `notify_team`
```json
{
"type": "notify_team",
"id": "notify-ops",
"enabled": true,
"dedupWindowMs": 30000,
"teamName": "mesh-team",
"recipients": ["*"],
"summary": "Hook chain ${RULE_ID} fired",
"messageTemplate": "Event=${EVENT_NAME} outcome=${OUTCOME}\nTask=${TASK_ID}\nError=${ERROR}\nPayload=${PAYLOAD_JSON}"
}
```
#### `warm_remote_capacity`
```json
{
"type": "warm_remote_capacity",
"id": "warm-bridge",
"enabled": true,
"dedupWindowMs": 60000,
"createDefaultEnvironmentIfMissing": false
}
```
## Complete Example Configs
### 1) Retry via Fallback Agent
```json
{
"version": 1,
"enabled": true,
"maxChainDepth": 2,
"defaultCooldownMs": 30000,
"defaultDedupWindowMs": 30000,
"rules": [
{
"id": "retry-task-via-fallback",
"trigger": {
"event": "TaskCompleted",
"outcome": "failed"
},
"cooldownMs": 60000,
"actions": [
{
"type": "spawn_fallback_agent",
"id": "spawn-retry-agent",
"description": "Retry failed task with fallback agent",
"promptTemplate": "A task failed. Recover it safely.\nTask=${TASK_SUBJECT}\nDescription=${TASK_DESCRIPTION}\nError=${ERROR}\nPayload=${PAYLOAD_JSON}",
"agentType": "general-purpose",
"model": "sonnet"
}
]
}
]
}
```
### 2) Notify Only
```json
{
"version": 1,
"enabled": true,
"maxChainDepth": 2,
"defaultCooldownMs": 30000,
"defaultDedupWindowMs": 30000,
"rules": [
{
"id": "notify-on-tool-failure",
"trigger": {
"event": "PostToolUseFailure",
"outcome": "failed"
},
"condition": {
"toolNames": ["Edit", "Write", "Bash"]
},
"actions": [
{
"type": "notify_team",
"id": "notify-team-failure",
"recipients": ["*"],
"summary": "Tool failure detected",
"messageTemplate": "Tool failure detected.\nEvent=${EVENT_NAME} outcome=${OUTCOME}\nError=${ERROR}\nPayload=${PAYLOAD_JSON}"
}
]
}
]
}
```
### 3) Combined Fallback + Notify + Bridge Warm
```json
{
"version": 1,
"enabled": true,
"maxChainDepth": 2,
"defaultCooldownMs": 45000,
"defaultDedupWindowMs": 30000,
"rules": [
{
"id": "full-recovery-chain",
"trigger": {
"event": "TaskCompleted",
"outcomes": ["failed", "timeout"]
},
"condition": {
"errorIncludes": ["timeout", "capacity", "connection"]
},
"cooldownMs": 90000,
"actions": [
{
"type": "spawn_fallback_agent",
"id": "fallback-agent",
"description": "Recover failed task execution",
"promptTemplate": "Recover failed task and produce a concise fix summary.\nTask=${TASK_SUBJECT}\nError=${ERROR}\nPayload=${PAYLOAD_JSON}"
},
{
"type": "notify_team",
"id": "notify-team",
"recipients": ["*"],
"summary": "Recovery chain triggered",
"messageTemplate": "Recovery chain ${RULE_ID} fired.\nOutcome=${OUTCOME}\nTask=${TASK_SUBJECT}\nError=${ERROR}"
},
{
"type": "warm_remote_capacity",
"id": "warm-capacity",
"createDefaultEnvironmentIfMissing": false
}
]
}
]
}
```
## Template Variables
The following placeholders are supported by `promptTemplate`, `summary`, and `messageTemplate`:
- `${EVENT_NAME}`
- `${OUTCOME}`
- `${RULE_ID}`
- `${TASK_SUBJECT}`
- `${TASK_DESCRIPTION}`
- `${TASK_ID}`
- `${ERROR}`
- `${PAYLOAD_JSON}`
## Troubleshooting
### Rule never triggers
- Verify `trigger.event` and `trigger.outcome`/`trigger.outcomes` exactly match dispatched event data.
- Check `condition` filters (especially `toolNames` and `eventFieldEquals` dot-path keys).
- Confirm the config file is valid JSON and schema-valid.
### Actions show as skipped
Common skip reasons:
- `action disabled`
- `rule cooldown active ...`
- `dedup window active ...`
- `max chain depth reached ...`
- `No team context is available ...`
- `Team file not found ...`
- `Remote sessions are blocked by policy`
- `Bridge is not active; warm_remote_capacity is a safe no-op`
- `No fallback agent launcher is registered in runtime context`
### Config changes not reflected
- Loader uses memoization by file mtime/size.
- Ensure your editor writes the file fully and updates mtime.
- If needed, force reload from the caller side with `forceReloadConfig: true`.
### Existing workflows changed unexpectedly
- Set `"enabled": false` at top-level.
- Or globally disable with `CLAUDE_CODE_ENABLE_HOOK_CHAINS=0`.
- Re-enable gradually after validating one rule at a time.

View File

@@ -1,6 +1,6 @@
{ {
"name": "@gitlawb/openclaude", "name": "@gitlawb/openclaude",
"version": "0.6.0", "version": "0.1.8",
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models", "description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
"type": "module", "type": "module",
"bin": { "bin": {
@@ -76,7 +76,7 @@
"@opentelemetry/semantic-conventions": "1.40.0", "@opentelemetry/semantic-conventions": "1.40.0",
"ajv": "8.18.0", "ajv": "8.18.0",
"auto-bind": "5.0.1", "auto-bind": "5.0.1",
"axios": "1.15.0", "axios": "1.14.0",
"bidi-js": "1.0.3", "bidi-js": "1.0.3",
"chalk": "5.6.2", "chalk": "5.6.2",
"chokidar": "4.0.3", "chokidar": "4.0.3",
@@ -140,7 +140,7 @@
}, },
"repository": { "repository": {
"type": "git", "type": "git",
"url": "https://github.com/Gitlawb/openclaude.git" "url": "https://gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude"
}, },
"keywords": [ "keywords": [
"claude-code", "claude-code",

View File

@@ -1,11 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
"packages": {
".": {
"release-type": "node",
"package-name": "@gitlawb/openclaude",
"bump-minor-pre-major": true,
"include-v-in-tag": true
}
}
}

View File

@@ -8,8 +8,7 @@
* - src/ path aliases * - src/ path aliases
*/ */
import { readFileSync, readdirSync, writeFileSync } from 'fs' import { readFileSync } from 'fs'
import { join } from 'path'
import { noTelemetryPlugin } from './no-telemetry-plugin' import { noTelemetryPlugin } from './no-telemetry-plugin'
const pkg = JSON.parse(readFileSync('./package.json', 'utf-8')) const pkg = JSON.parse(readFileSync('./package.json', 'utf-8'))
@@ -19,106 +18,31 @@ const version = pkg.version
// Most Anthropic-internal features stay off; open-build features can be // Most Anthropic-internal features stay off; open-build features can be
// selectively enabled here when their full source exists in the mirror. // selectively enabled here when their full source exists in the mirror.
const featureFlags: Record<string, boolean> = { const featureFlags: Record<string, boolean> = {
// ── Disabled: require Anthropic infrastructure or missing source ───── VOICE_MODE: false,
VOICE_MODE: false, // Push-to-talk STT via claude.ai OAuth endpoint PROACTIVE: false,
PROACTIVE: false, // Autonomous agent mode (missing proactive/ module) KAIROS: false,
KAIROS: false, // Persistent assistant/session mode (cloud backend) BRIDGE_MODE: false,
BRIDGE_MODE: false, // Remote desktop bridge via CCR infrastructure DAEMON: false,
DAEMON: false, // Background daemon process (stubbed in open build) AGENT_TRIGGERS: false,
AGENT_TRIGGERS: false, // Scheduled remote agent triggers MONITOR_TOOL: false,
ABLATION_BASELINE: false, // A/B testing harness for eval experiments ABLATION_BASELINE: false,
CONTEXT_COLLAPSE: false, // Context collapsing optimization (stubbed) DUMP_SYSTEM_PROMPT: false,
COMMIT_ATTRIBUTION: false, // Co-Authored-By metadata in git commits CACHED_MICROCOMPACT: false,
UDS_INBOX: false, // Unix Domain Socket inter-session messaging COORDINATOR_MODE: false,
BG_SESSIONS: false, // Background sessions via tmux (stubbed) CONTEXT_COLLAPSE: false,
WEB_BROWSER_TOOL: false, // Built-in browser automation (source not mirrored) COMMIT_ATTRIBUTION: false,
CHICAGO_MCP: false, // Computer-use MCP (native Swift modules stubbed) TEAMMEM: false,
COWORKER_TYPE_TELEMETRY: false, // Telemetry for agent/coworker type classification UDS_INBOX: false,
BG_SESSIONS: false,
// ── Enabled: upstream defaults ────────────────────────────────────── AWAY_SUMMARY: false,
COORDINATOR_MODE: true, // Multi-agent coordinator with worker delegation TRANSCRIPT_CLASSIFIER: false,
BUILTIN_EXPLORE_PLAN_AGENTS: true, // Built-in Explore/Plan specialized subagents WEB_BROWSER_TOOL: false,
BUDDY: true, // Buddy mode for paired programming MESSAGE_ACTIONS: false,
MONITOR_TOOL: true, // MCP server monitoring/streaming tool BUDDY: true,
TEAMMEM: true, // Team memory management CHICAGO_MCP: false,
MESSAGE_ACTIONS: true, // Message action buttons in the UI COWORKER_TYPE_TELEMETRY: false,
// ── Enabled: new activations ────────────────────────────────────────
DUMP_SYSTEM_PROMPT: true, // --dump-system-prompt CLI flag for debugging
CACHED_MICROCOMPACT: true, // Cache-aware tool result truncation optimization
AWAY_SUMMARY: true, // "While you were away" recap after 5min blur
TRANSCRIPT_CLASSIFIER: true, // Auto-approval classifier for safe tool uses
ULTRATHINK: true, // Deep thinking mode — type "ultrathink" to boost reasoning
TOKEN_BUDGET: true, // Token budget tracking with usage warnings
HISTORY_PICKER: true, // Enhanced interactive prompt history picker
QUICK_SEARCH: true, // Ctrl+G quick search across prompts
SHOT_STATS: true, // Shot distribution stats in session summary
EXTRACT_MEMORIES: true, // Auto-extract durable memories from conversations
FORK_SUBAGENT: true, // Implicit context-forking when omitting subagent_type
VERIFICATION_AGENT: true, // Built-in read-only agent for test/verification
MCP_SKILLS: true, // Discover skills dynamically from MCP server resources
PROMPT_CACHE_BREAK_DETECTION: true, // Detect & log unexpected prompt cache invalidations
HOOK_PROMPTS: true, // Allow tools to request interactive user prompts
} }
// ── Pre-process: replace feature() calls with boolean literals ──────
// Bun v1.3.9+ resolves `import { feature } from 'bun:bundle'` natively
// before plugins can intercept it via onResolve. The bun: namespace is
// handled by Bun's C++ resolver which runs before the JS plugin phase,
// so the previous onResolve/onLoad shim was silently ineffective — ALL
// feature() calls evaluated to false regardless of the featureFlags map.
//
// Fix: pre-process source files to strip the bun:bundle import and
// replace feature('FLAG') calls with their boolean literal. Files are
// modified in-place before Bun.build() and restored in a finally block.
// Match feature('FLAG') calls, including multi-line: feature(\n 'FLAG',\n)
const featureCallRe = /\bfeature\(\s*['"](\w+)['"][,\s]*\)/gs
const featureImportRe = /import\s*\{[^}]*\bfeature\b[^}]*\}\s*from\s*['"]bun:bundle['"];?\s*\n?/g
const modifiedFiles = new Map<string, string>() // path → original content
function preProcessFeatureFlags(dir: string) {
for (const ent of readdirSync(dir, { withFileTypes: true })) {
const full = join(dir, ent.name)
if (ent.isDirectory()) { preProcessFeatureFlags(full); continue }
if (!/\.(ts|tsx)$/.test(ent.name)) continue
const raw = readFileSync(full, 'utf-8')
if (!raw.includes('feature(')) continue
let contents = raw
contents = contents.replace(featureImportRe, '')
contents = contents.replace(featureCallRe, (_match, name) =>
String((featureFlags as Record<string, boolean>)[name] ?? false),
)
if (contents !== raw) {
modifiedFiles.set(full, raw)
writeFileSync(full, contents)
}
}
}
function restoreModifiedFiles() {
for (const [path, original] of modifiedFiles) {
writeFileSync(path, original)
}
modifiedFiles.clear()
}
preProcessFeatureFlags(join(import.meta.dir, '..', 'src'))
const numModified = modifiedFiles.size
// Restore source files on abrupt termination (Ctrl+C, kill, etc.)
for (const signal of ['SIGINT', 'SIGTERM'] as const) {
process.on(signal, () => {
restoreModifiedFiles()
process.exit(signal === 'SIGINT' ? 130 : 143)
})
}
try {
const result = await Bun.build({ const result = await Bun.build({
entrypoints: ['./src/entrypoints/cli.tsx'], entrypoints: ['./src/entrypoints/cli.tsx'],
outdir: './dist', outdir: './dist',
@@ -179,11 +103,18 @@ export async function handleBgFlag() { throw new Error("Background sessions are
], ],
] as const) ] as const)
// bun:bundle feature() replacement is handled by the source // Resolve `import { feature } from 'bun:bundle'` to a shim
// pre-processing step above (see preProcessFeatureFlags). build.onResolve({ filter: /^bun:bundle$/ }, () => ({
// The previous onResolve/onLoad shim was ineffective in Bun path: 'bun:bundle',
// v1.3.9+ because the bun: namespace is resolved natively namespace: 'bun-bundle-shim',
// before the JS plugin phase runs. }))
build.onLoad(
{ filter: /.*/, namespace: 'bun-bundle-shim' },
() => ({
contents: `const featureFlags = ${JSON.stringify(featureFlags)};\nexport function feature(name) { return featureFlags[name] ?? false; }`,
loader: 'js',
}),
)
build.onResolve( build.onResolve(
{ filter: /^\.\.\/(daemon\/workerRegistry|daemon\/main|cli\/bg|cli\/handlers\/templateJobs|environment-runner\/main|self-hosted-runner\/main)\.js$/ }, { filter: /^\.\.\/(daemon\/workerRegistry|daemon\/main|cli\/bg|cli\/handlers\/templateJobs|environment-runner\/main|self-hosted-runner\/main)\.js$/ },
@@ -343,7 +274,16 @@ export const SeverityNumber = {};
// Scan source to find imports that can't resolve // Scan source to find imports that can't resolve
function scanForMissingImports() { function scanForMissingImports() {
function checkAndRegister(specifier: string, fileDir: string, namedPart: string) { function walk(dir: string) {
for (const ent of fs.readdirSync(dir, { withFileTypes: true })) {
const full = pathMod.join(dir, ent.name)
if (ent.isDirectory()) { walk(full); continue }
if (!/\.(ts|tsx)$/.test(ent.name)) continue
const code: string = fs.readFileSync(full, 'utf-8')
// Collect all imports
for (const m of code.matchAll(/import\s+(?:\{([^}]*)\}|(\w+))?\s*(?:,\s*\{([^}]*)\})?\s*from\s+['"](.*?)['"]/g)) {
const specifier = m[4]
const namedPart = m[1] || m[3] || ''
const names = namedPart.split(',') const names = namedPart.split(',')
.map((s: string) => s.trim().replace(/^type\s+/, '')) .map((s: string) => s.trim().replace(/^type\s+/, ''))
.filter((s: string) => s && !s.startsWith('type ')) .filter((s: string) => s && !s.startsWith('type '))
@@ -363,7 +303,8 @@ export const SeverityNumber = {};
} }
// Check relative .js imports // Check relative .js imports
else if (specifier.endsWith('.js') && (specifier.startsWith('./') || specifier.startsWith('../'))) { else if (specifier.endsWith('.js') && (specifier.startsWith('./') || specifier.startsWith('../'))) {
const resolved = pathMod.resolve(fileDir, specifier) const dir2 = pathMod.dirname(full)
const resolved = pathMod.resolve(dir2, specifier)
const tsVariant = resolved.replace(/\.js$/, '.ts') const tsVariant = resolved.replace(/\.js$/, '.ts')
const tsxVariant = resolved.replace(/\.js$/, '.tsx') const tsxVariant = resolved.replace(/\.js$/, '.tsx')
if (!fs.existsSync(resolved) && !fs.existsSync(tsVariant) && !fs.existsSync(tsxVariant)) { if (!fs.existsSync(resolved) && !fs.existsSync(tsVariant) && !fs.existsSync(tsxVariant)) {
@@ -377,38 +318,6 @@ export const SeverityNumber = {};
for (const n of names) missingModuleExports.get(specifier)!.add(n) for (const n of names) missingModuleExports.get(specifier)!.add(n)
} }
} }
function walk(dir: string) {
for (const ent of fs.readdirSync(dir, { withFileTypes: true })) {
const full = pathMod.join(dir, ent.name)
if (ent.isDirectory()) { walk(full); continue }
if (!/\.(ts|tsx)$/.test(ent.name)) continue
const rawCode: string = fs.readFileSync(full, 'utf-8')
const fileDir = pathMod.dirname(full)
// Strip comments before scanning for imports/requires.
// The regex scanner matches require()/import() patterns
// inside JSDoc comments, causing false-positive missing
// module detection that breaks the build with noop stubs.
const code = rawCode
.replace(/\/\*[\s\S]*?\*\//g, '') // block comments
.replace(/\/\/.*$/gm, '') // line comments
// Collect static imports: import { X } from '...'
for (const m of code.matchAll(/import\s+(?:\{([^}]*)\}|(\w+))?\s*(?:,\s*\{([^}]*)\})?\s*from\s+['"](.*?)['"]/g)) {
checkAndRegister(m[4], fileDir, m[1] || m[3] || '')
}
// Collect dynamic requires: require('...') — these are used
// behind feature() gates and become live when flags are enabled.
for (const m of code.matchAll(/require\(\s*['"](\.\.?\/[^'"]+)['"]\s*\)/g)) {
checkAndRegister(m[1], fileDir, '')
}
// Collect dynamic imports: import('...')
for (const m of code.matchAll(/import\(\s*['"](\.\.?\/[^'"]+)['"]\s*\)/g)) {
checkAndRegister(m[1], fileDir, '')
}
} }
} }
walk(srcDir) walk(srcDir)
@@ -480,13 +389,7 @@ if (!result.success) {
for (const log of result.logs) { for (const log of result.logs) {
console.error(log) console.error(log)
} }
process.exitCode = 1 process.exit(1)
} else {
console.log(`✓ Built openclaude v${version} → dist/cli.mjs`)
} }
} finally { console.log(`✓ Built openclaude v${version} → dist/cli.mjs`)
// Always restore source files, even if Bun.build() throws
restoreModifiedFiles()
console.log(` 🔄 feature-flags: pre-processed ${numModified} files (restored)`)
}

View File

@@ -1,163 +0,0 @@
import { afterAll, beforeEach, describe, expect, test } from 'bun:test'
import { mkdirSync, readFileSync, rmSync, unlinkSync, writeFileSync } from 'node:fs'
import { join } from 'node:path'
import { tmpdir } from 'node:os'
// ---------------------------------------------------------------------------
// Setup: extract the growthbook stub from no-telemetry-plugin.ts, write it to
// a temp .mjs file, and dynamically import it so we can test the real code
// that gets bundled.
// ---------------------------------------------------------------------------
const pluginSource = readFileSync(join(__dirname, 'no-telemetry-plugin.ts'), 'utf-8')
const stubMatch = pluginSource.match(/'services\/analytics\/growthbook': `([\s\S]*?)`/)
if (!stubMatch) throw new Error('Could not extract growthbook stub from no-telemetry-plugin.ts')
const testDir = join(tmpdir(), `growthbook-stub-test-${process.pid}`)
const stubFile = join(testDir, 'growthbook-stub.mjs')
const flagsFile = join(testDir, 'test-flags.json')
mkdirSync(testDir, { recursive: true })
writeFileSync(stubFile, stubMatch[1])
// Point the stub at our test flags file (checked by _loadFlags on first access)
process.env.CLAUDE_FEATURE_FLAGS_FILE = flagsFile
const stub = await import(stubFile)
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
describe('growthbook stub — local feature flag overrides', () => {
beforeEach(() => {
stub.resetGrowthBook()
try { unlinkSync(flagsFile) } catch { /* may not exist */ }
})
afterAll(() => {
rmSync(testDir, { recursive: true, force: true })
delete process.env.CLAUDE_FEATURE_FLAGS_FILE
})
// ── File absent ──────────────────────────────────────────────────
test('returns defaultValue when flags file is absent', () => {
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 42)).toBe(42)
})
test('getAllGrowthBookFeatures returns {} when file is absent', () => {
expect(stub.getAllGrowthBookFeatures()).toEqual({})
})
// ── Open-build defaults (_openBuildDefaults) ────────────────────
test('returns open-build default when flags file is absent', () => {
// tengu_passport_quail is in _openBuildDefaults as true; without a
// flags file the stub should return the open-build override, not
// the call-site defaultValue.
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_passport_quail', false)).toBe(true)
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_coral_fern', false)).toBe(true)
})
test('flags file overrides open-build defaults', () => {
// User-provided feature-flags.json takes priority over _openBuildDefaults.
writeFileSync(flagsFile, JSON.stringify({ tengu_passport_quail: false }))
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_passport_quail', true)).toBe(false)
})
// ── Valid JSON object ────────────────────────────────────────────
test('loads and returns values from a valid JSON file', () => {
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: true, tengu_bar: 'hello' }))
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', false)).toBe(true)
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_bar', 'default')).toBe('hello')
})
test('returns defaultValue for keys not present in the file', () => {
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: true }))
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_missing', 99)).toBe(99)
})
test('getAllGrowthBookFeatures returns the full flags object', () => {
const flags = { tengu_a: true, tengu_b: false, tengu_c: 42 }
writeFileSync(flagsFile, JSON.stringify(flags))
expect(stub.getAllGrowthBookFeatures()).toEqual(flags)
})
// ── Malformed / non-object JSON ──────────────────────────────────
test('falls back to defaults on malformed JSON', () => {
writeFileSync(flagsFile, '{not valid json!!!')
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
})
test('falls back to defaults when JSON is a primitive (true)', () => {
writeFileSync(flagsFile, 'true')
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
})
test('falls back to defaults when JSON is an array', () => {
writeFileSync(flagsFile, '["a", "b"]')
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
})
// ── Cache invalidation ───────────────────────────────────────────
test('resetGrowthBook clears cache so the file is re-read', () => {
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'first' }))
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('first')
// Update the file — cached value is still 'first'
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'second' }))
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('first')
// After reset, the new value is picked up
stub.resetGrowthBook()
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('second')
})
test('refreshGrowthBookFeatures clears cache', async () => {
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'v1' }))
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('v1')
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'v2' }))
await stub.refreshGrowthBookFeatures()
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('v2')
})
// ── Multiple getter variants ─────────────────────────────────────
test('all getter functions read from local flags', async () => {
writeFileSync(flagsFile, JSON.stringify({ tengu_gate: true, tengu_config: { a: 1 } }))
expect(await stub.getFeatureValue_DEPRECATED('tengu_gate', false)).toBe(true)
stub.resetGrowthBook()
expect(stub.getFeatureValue_CACHED_WITH_REFRESH('tengu_gate', false)).toBe(true)
stub.resetGrowthBook()
expect(stub.checkStatsigFeatureGate_CACHED_MAY_BE_STALE('tengu_gate')).toBe(true)
stub.resetGrowthBook()
expect(await stub.checkGate_CACHED_OR_BLOCKING('tengu_gate')).toBe(true)
stub.resetGrowthBook()
expect(await stub.getDynamicConfig_BLOCKS_ON_INIT('tengu_config', {})).toEqual({ a: 1 })
stub.resetGrowthBook()
expect(stub.getDynamicConfig_CACHED_MAY_BE_STALE('tengu_config', {})).toEqual({ a: 1 })
})
// ── Security gate ────────────────────────────────────────────────
test('checkSecurityRestrictionGate always returns false regardless of flags', async () => {
writeFileSync(flagsFile, JSON.stringify({
tengu_disable_bypass_permissions_mode: true,
}))
expect(await stub.checkSecurityRestrictionGate()).toBe(false)
})
})

View File

@@ -34,201 +34,28 @@ export function _resetForTesting() {}
`, `,
'services/analytics/growthbook': ` 'services/analytics/growthbook': `
import _fs from 'node:fs';
import _path from 'node:path';
import _os from 'node:os';
let _flags = undefined;
// ── Open-build GrowthBook overrides ───────────────────────────────────
// Override upstream defaultValue for runtime gates tied to build-time
// features. Only keys that DIFFER from upstream belong here — the
// catalog below is pure documentation and does NOT affect resolution.
//
// Priority: ~/.claude/feature-flags.json > _openBuildDefaults > defaultValue
//
// To override at runtime, create ~/.claude/feature-flags.json:
// { "tengu_some_flag": true }
const _openBuildDefaults = {
'tengu_sedge_lantern': true, // AWAY_SUMMARY — "while you were away" recap (upstream: false)
'tengu_hive_evidence': true, // VERIFICATION_AGENT — read-only test/verification agent (upstream: false)
'tengu_passport_quail': true, // EXTRACT_MEMORIES — enable memory extraction (upstream: false)
'tengu_coral_fern': true, // EXTRACT_MEMORIES — enable memory search in past context (upstream: false)
};
/* ── Known runtime feature keys (reference only) ───────────────────────
* This catalog does NOT participate in flag resolution. It documents
* the known GrowthBook keys and their upstream default values, scraped
* from src/ call sites. It is NOT exhaustive — new keys may be added
* upstream between catalog updates.
*
* Some keys have different defaults at different call sites — this is
* intentional upstream (the server unifies the value at runtime).
*
* To activate any of these, add them to ~/.claude/feature-flags.json
* or to _openBuildDefaults above.
*
* ── Reasoning & thinking ──────────────────────────────────────────────
* tengu_turtle_carbon = true ULTRATHINK deep thinking runtime gate
* tengu_thinkback = gate /thinkback replay command
*
* ── Agents & orchestration ────────────────────────────────────────────
* tengu_amber_flint = true Agent swarms coordination
* tengu_amber_stoat = true Built-in agent availability (Explore, Plan, etc.)
* tengu_agent_list_attach = true Attach file context to agent list
* tengu_auto_background_agents = false Auto-spawn background agents
* tengu_slim_subagent_claudemd = true Lighter ClaudeMD for subagents
* tengu_hive_evidence = false Verification agent / evidence tracking (4 call sites)
* tengu_ultraplan_model = model cfg ULTRAPLAN model selection (dynamic config)
*
* ── Memory & context ──────────────────────────────────────────────────
* tengu_passport_quail = false EXTRACT_MEMORIES main gate (isExtractModeActive)
* tengu_coral_fern = false EXTRACT_MEMORIES search in past context
* tengu_slate_thimble = false Memory dir paths (non-interactive sessions)
* tengu_herring_clock = true/false Team memory paths (varies by call site)
* tengu_bramble_lintel = null Extract memories throttle (null → every turn)
* tengu_sedge_lantern = false AWAY_SUMMARY "while you were away" recap
* tengu_session_memory = false Session memory service
* tengu_sm_config = {} Session memory config (dynamic)
* tengu_sm_compact_config = {} Session memory compaction config (dynamic)
* tengu_cobalt_raccoon = false Reactive compaction (suppress auto-compact)
* tengu_pebble_leaf_prune = false Session storage pruning
*
* ── Kairos & cron ─────────────────────────────────────────────────────
* tengu_kairos_brief = false Brief layout mode (KAIROS)
* tengu_kairos_brief_config = {} Brief config (dynamic)
* tengu_kairos_cron = true Cron scheduler enable
* tengu_kairos_cron_durable = true Durable (disk-persistent) cron tasks
* tengu_kairos_cron_config = {} Cron jitter config (dynamic)
*
* ── Bridge & remote (require Anthropic infra) ─────────────────────────
* tengu_ccr_bridge = false CCR bridge connection
* tengu_ccr_bridge_multi_session = gate Multi-session spawn mode
* tengu_ccr_mirror = false CCR session mirroring
* tengu_ccr_bundle_seed_enabled = gate Git bundle seeding for CCR
* tengu_ccr_bundle_max_bytes = null Bundle size limit (null → default)
* tengu_bridge_repl_v2 = false Environment-less REPL bridge v2
* tengu_bridge_repl_v2_cse_shim_enabled = true CSE→Session tag retag shim
* tengu_bridge_min_version = {min:'0'} Min CLI version for bridge (dynamic)
* tengu_bridge_initial_history_cap = 200 Initial history cap for bridge
* tengu_bridge_system_init = false Bridge system initialization
* tengu_cobalt_harbor = false Auto-connect CCR at startup
* tengu_cobalt_lantern = false Remote setup preconditions
* tengu_remote_backend = false Remote TUI backend
* tengu_surreal_dali = false Remote agent tasks / triggers
*
* ── Prompt & API ──────────────────────────────────────────────────────
* tengu_attribution_header = true Attribution header in API requests
* tengu_basalt_3kr = true MCP instructions delta
* tengu_slate_prism = true/false Message formatting (varies by call site)
* tengu_amber_prism = false Message content formatting
* tengu_amber_json_tools = false JSON format for tool schemas
* tengu_fgts = false API feature gates
* tengu_otk_slot_v1 = false One-time key slots for API auth
* tengu_cicada_nap_ms = 0 Background GrowthBook refresh throttle (ms)
* tengu_miraculo_the_bard = false Service initialization gate
* tengu_immediate_model_command = false Immediate /model command execution
* tengu_chomp_inflection = false Prompt suggestions after responses
* tengu_tool_pear = gate API betas for tool use
* tengu-off-switch = {act:false} Service kill switch (dynamic; uses dash)
*
* ── Permissions & security ────────────────────────────────────────────
* tengu_birch_trellis = true Bash auto-mode permissions config
* tengu_auto_mode_config = {} Auto-mode configuration (dynamic, many call sites)
* tengu_iron_gate_closed = true Permission iron gate (with refresh)
* tengu_destructive_command_warning = false Warning for destructive bash commands
* tengu_disable_bypass_permissions_mode = security Security killswitch (always false in open build)
*
* ── UI & UX ───────────────────────────────────────────────────────────
* tengu_willow_mode = 'off' REPL rendering mode
* tengu_terminal_panel = false Terminal panel keybinding
* tengu_terminal_sidebar = false Terminal sidebar in REPL/config
* tengu_marble_sandcastle = false Fast mode gate
* tengu_jade_anvil_4 = false Rate limit options UI ordering
* tengu_collage_kaleidoscope = true Native clipboard image paste (macOS)
* tengu_lapis_finch = false Plugin/hint recommendation
* tengu_lodestone_enabled = false Deep links claude-cli:// protocol
* tengu_copper_panda = false Skill improvement suggestions
* tengu_desktop_upsell = {} Desktop app upsell config (dynamic)
* tengu-top-of-feed-tip = {} Emergency tip of feed (dynamic; uses dash)
*
* ── File operations ───────────────────────────────────────────────────
* tengu_quartz_lantern = false File read/write dedup optimization
* tengu_moth_copse = false Attachments handling (variant A)
* tengu_marble_fox = false Attachments handling (variant B)
* tengu_scratch = gate Scratchpad filesystem access / coordinator
*
* ── MCP & plugins ─────────────────────────────────────────────────────
* tengu_harbor = false MCP channel allowlist verification
* tengu_harbor_permissions = false MCP channel permissions enforcement
* tengu_copper_bridge = false Chrome MCP bridge
* tengu_chrome_auto_enable = false Auto-enable Chrome MCP on startup
* tengu_glacier_2xr = false Enhanced tool search / ToolSearchTool
* tengu_malort_pedway = {} Computer-use (Chicago) config (dynamic)
*
* ── VSCode / IDE ──────────────────────────────────────────────────────
* tengu_quiet_fern = false VSCode browser support
* tengu_vscode_cc_auth = false VSCode in-band OAuth via claude_authenticate
* tengu_vscode_review_upsell = gate VSCode review upsell
* tengu_vscode_onboarding = gate VSCode onboarding experience
*
* ── Voice ─────────────────────────────────────────────────────────────
* tengu_amber_quartz_disabled = false VOICE_MODE kill-switch (false = voice allowed)
*
* ── Auto-updater (stubbed in open build) ──────────────────────────────
* tengu_version_config = {min:'0'} Min version enforcement (dynamic)
* tengu_max_version_config = {} Max version / deprecation config (dynamic)
*
* ── Telemetry & tracing ───────────────────────────────────────────────
* tengu_trace_lantern = false Beta session tracing
* tengu_chair_sermon = gate Analytics / message formatting gate
* tengu_strap_foyer = false Settings sync to cloud
*/
function _loadFlags() {
if (_flags !== undefined) return;
try {
const flagsPath = process.env.CLAUDE_FEATURE_FLAGS_FILE
|| _path.join(_os.homedir(), '.claude', 'feature-flags.json');
const parsed = JSON.parse(_fs.readFileSync(flagsPath, 'utf-8'));
_flags = (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) ? parsed : null;
} catch {
_flags = null;
}
}
function _getFlagValue(key, defaultValue) {
_loadFlags();
if (_flags != null && Object.hasOwn(_flags, key)) return _flags[key];
if (Object.hasOwn(_openBuildDefaults, key)) return _openBuildDefaults[key];
return defaultValue;
}
const noop = () => {}; const noop = () => {};
export function onGrowthBookRefresh() { return noop; } export function onGrowthBookRefresh() { return noop; }
export function hasGrowthBookEnvOverride() { return false; } export function hasGrowthBookEnvOverride() { return false; }
export function getAllGrowthBookFeatures() { _loadFlags(); return _flags || {}; } export function getAllGrowthBookFeatures() { return {}; }
export function getGrowthBookConfigOverrides() { return {}; } export function getGrowthBookConfigOverrides() { return {}; }
export function setGrowthBookConfigOverride() {} export function setGrowthBookConfigOverride() {}
export function clearGrowthBookConfigOverrides() {} export function clearGrowthBookConfigOverrides() {}
export function getApiBaseUrlHost() { return undefined; } export function getApiBaseUrlHost() { return undefined; }
export const initializeGrowthBook = async () => null; export const initializeGrowthBook = async () => null;
export async function getFeatureValue_DEPRECATED(feature, defaultValue) { return _getFlagValue(feature, defaultValue); } export async function getFeatureValue_DEPRECATED(feature, defaultValue) { return defaultValue; }
export function getFeatureValue_CACHED_MAY_BE_STALE(feature, defaultValue) { return _getFlagValue(feature, defaultValue); } export function getFeatureValue_CACHED_MAY_BE_STALE(feature, defaultValue) { return defaultValue; }
export function getFeatureValue_CACHED_WITH_REFRESH(feature, defaultValue) { return _getFlagValue(feature, defaultValue); } export function getFeatureValue_CACHED_WITH_REFRESH(feature, defaultValue) { return defaultValue; }
export function checkStatsigFeatureGate_CACHED_MAY_BE_STALE(gate) { return Boolean(_getFlagValue(gate, false)); } export function checkStatsigFeatureGate_CACHED_MAY_BE_STALE() { return false; }
// Security killswitch — always false in the open build. Anthropic uses this export async function checkSecurityRestrictionGate() { return false; }
// gate to remotely disable bypassPermissions mode; exposing it via local flags export async function checkGate_CACHED_OR_BLOCKING() { return false; }
// would let users accidentally lock themselves out of --dangerously-skip-permissions.
export async function checkSecurityRestrictionGate(gate) { return false; }
export async function checkGate_CACHED_OR_BLOCKING(gate) { return Boolean(_getFlagValue(gate, false)); }
export function refreshGrowthBookAfterAuthChange() {} export function refreshGrowthBookAfterAuthChange() {}
export function resetGrowthBook() { _flags = undefined; } export function resetGrowthBook() {}
export async function refreshGrowthBookFeatures() { _flags = undefined; } export async function refreshGrowthBookFeatures() {}
export function setupPeriodicGrowthBookRefresh() {} export function setupPeriodicGrowthBookRefresh() {}
export function stopPeriodicGrowthBookRefresh() {} export function stopPeriodicGrowthBookRefresh() {}
export async function getDynamicConfig_BLOCKS_ON_INIT(configName, defaultValue) { return _getFlagValue(configName, defaultValue); } export async function getDynamicConfig_BLOCKS_ON_INIT(configName, defaultValue) { return defaultValue; }
export function getDynamicConfig_CACHED_MAY_BE_STALE(configName, defaultValue) { return _getFlagValue(configName, defaultValue); } export function getDynamicConfig_CACHED_MAY_BE_STALE(configName, defaultValue) { return defaultValue; }
`, `,
'services/analytics/sink': ` 'services/analytics/sink': `

View File

@@ -20,23 +20,6 @@ describe('formatReachabilityFailureDetail', () => {
) )
}) })
test('redacts credentials and sensitive query parameters in endpoint details', () => {
const detail = formatReachabilityFailureDetail(
'http://user:pass@localhost:11434/v1/models?token=abc123&mode=test',
502,
'bad gateway',
{
transport: 'chat_completions',
requestedModel: 'llama3.1:8b',
resolvedModel: 'llama3.1:8b',
},
)
expect(detail).toBe(
'Unexpected status 502 from http://redacted:redacted@localhost:11434/v1/models?token=redacted&mode=test. Body: bad gateway',
)
})
test('adds alias/entitlement hint for codex model support 400s', () => { test('adds alias/entitlement hint for codex model support 400s', () => {
const detail = formatReachabilityFailureDetail( const detail = formatReachabilityFailureDetail(
'https://chatgpt.com/backend-api/codex/responses', 'https://chatgpt.com/backend-api/codex/responses',

View File

@@ -7,11 +7,6 @@ import {
resolveProviderRequest, resolveProviderRequest,
isLocalProviderUrl as isProviderLocalUrl, isLocalProviderUrl as isProviderLocalUrl,
} from '../src/services/api/providerConfig.js' } from '../src/services/api/providerConfig.js'
import {
getLocalOpenAICompatibleProviderLabel,
probeOllamaGenerationReadiness,
} from '../src/utils/providerDiscovery.js'
import { redactUrlForDisplay } from '../src/utils/urlRedaction.js'
type CheckResult = { type CheckResult = {
ok: boolean ok: boolean
@@ -74,7 +69,7 @@ export function formatReachabilityFailureDetail(
}, },
): string { ): string {
const compactBody = responseBody.trim().replace(/\s+/g, ' ').slice(0, 240) const compactBody = responseBody.trim().replace(/\s+/g, ' ').slice(0, 240)
const base = `Unexpected status ${status} from ${redactUrlForDisplay(endpoint)}.` const base = `Unexpected status ${status} from ${endpoint}.`
const bodySuffix = compactBody ? ` Body: ${compactBody}` : '' const bodySuffix = compactBody ? ` Body: ${compactBody}` : ''
if (request.transport !== 'codex_responses' || status !== 400) { if (request.transport !== 'codex_responses' || status !== 400) {
@@ -260,7 +255,7 @@ function checkOpenAIEnv(): CheckResult[] {
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL)) results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
} }
results.push(pass('OPENAI_BASE_URL', redactUrlForDisplay(request.baseUrl))) results.push(pass('OPENAI_BASE_URL', request.baseUrl))
if (request.transport === 'codex_responses') { if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials(process.env) const credentials = resolveCodexApiCredentials(process.env)
@@ -313,7 +308,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).') return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
} }
if (useGithub && !useOpenAI) { if (useGithub) {
return pass( return pass(
'Provider reachability', 'Provider reachability',
'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).', 'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).',
@@ -331,7 +326,6 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
const endpoint = request.transport === 'codex_responses' const endpoint = request.transport === 'codex_responses'
? `${request.baseUrl}/responses` ? `${request.baseUrl}/responses`
: `${request.baseUrl}/models` : `${request.baseUrl}/models`
const redactedEndpoint = redactUrlForDisplay(endpoint)
const controller = new AbortController() const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), 4000) const timeout = setTimeout(() => controller.abort(), 4000)
@@ -381,10 +375,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
}) })
if (response.status === 200 || response.status === 401 || response.status === 403) { if (response.status === 200 || response.status === 401 || response.status === 403) {
return pass( return pass('Provider reachability', `Reached ${endpoint} (status ${response.status}).`)
'Provider reachability',
`Reached ${redactedEndpoint} (status ${response.status}).`,
)
} }
const responseBody = await response.text().catch(() => '') const responseBody = await response.text().catch(() => '')
@@ -400,100 +391,12 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
) )
} catch (error) { } catch (error) {
const message = error instanceof Error ? error.message : String(error) const message = error instanceof Error ? error.message : String(error)
return fail( return fail('Provider reachability', `Failed to reach ${endpoint}: ${message}`)
'Provider reachability',
`Failed to reach ${redactedEndpoint}: ${message}`,
)
} finally { } finally {
clearTimeout(timeout) clearTimeout(timeout)
} }
} }
async function checkProviderGenerationReadiness(): Promise<CheckResult> {
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
if (!useGemini && !useOpenAI && !useGithub && !useMistral) {
return pass('Provider generation readiness', 'Skipped (OpenAI-compatible mode disabled).')
}
if (useGithub && !useOpenAI) {
return pass(
'Provider generation readiness',
'Skipped for GitHub Models (runtime generation uses a different endpoint flow).',
)
}
if (useGemini || useMistral) {
return pass(
'Provider generation readiness',
'Skipped for managed provider mode.',
)
}
if (!useOpenAI) {
return pass('Provider generation readiness', 'Skipped (OpenAI-compatible mode disabled).')
}
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
if (request.transport === 'codex_responses') {
return pass(
'Provider generation readiness',
'Skipped for Codex responses (reachability probe already performs a lightweight generation request).',
)
}
if (!isLocalBaseUrl(request.baseUrl)) {
return pass('Provider generation readiness', 'Skipped for non-local provider URL.')
}
const localProviderLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
if (localProviderLabel !== 'Ollama') {
return pass(
'Provider generation readiness',
`Skipped for ${localProviderLabel} (no provider-specific generation probe).`,
)
}
const readiness = await probeOllamaGenerationReadiness({
baseUrl: request.baseUrl,
model: request.requestedModel,
})
if (readiness.state === 'ready') {
return pass(
'Provider generation readiness',
`Generated a test response with ${readiness.probeModel ?? request.requestedModel}.`,
)
}
if (readiness.state === 'unreachable') {
return fail(
'Provider generation readiness',
`Could not reach Ollama at ${redactUrlForDisplay(request.baseUrl)}.`,
)
}
if (readiness.state === 'no_models') {
return fail(
'Provider generation readiness',
'Ollama is reachable, but no installed models were found. Pull a model first (for example: ollama pull qwen2.5-coder:7b).',
)
}
const detailSuffix = readiness.detail ? ` Detail: ${readiness.detail}.` : ''
return fail(
'Provider generation readiness',
`Ollama is reachable, but generation failed for ${readiness.probeModel ?? request.requestedModel}.${detailSuffix}`,
)
}
function isAtomicChatUrl(baseUrl: string): boolean { function isAtomicChatUrl(baseUrl: string): boolean {
try { try {
const parsed = new URL(baseUrl) const parsed = new URL(baseUrl)
@@ -664,7 +567,6 @@ async function main(): Promise<void> {
results.push(checkBuildArtifacts()) results.push(checkBuildArtifacts())
results.push(...checkOpenAIEnv()) results.push(...checkOpenAIEnv())
results.push(await checkBaseUrlReachability()) results.push(await checkBaseUrlReachability())
results.push(await checkProviderGenerationReadiness())
results.push(checkOllamaProcessorMode()) results.push(checkOllamaProcessorMode())
if (!options.json) { if (!options.json) {

View File

@@ -249,11 +249,6 @@ export type ToolUseContext = {
/** When true, canUseTool must always be called even when hooks auto-approve. /** When true, canUseTool must always be called even when hooks auto-approve.
* Used by speculation for overlay file path rewriting. */ * Used by speculation for overlay file path rewriting. */
requireCanUseTool?: boolean requireCanUseTool?: boolean
/**
* Optional callback used by hook-chain fallback actions that launch
* AgentTool from hook runtime paths.
*/
hookChainsCanUseTool?: CanUseToolFn
messages: Message[] messages: Message[]
fileReadingLimits?: { fileReadingLimits?: {
maxTokens?: number maxTokens?: number

View File

@@ -1,290 +0,0 @@
/**
* Tests for Bug Fixes applied to openclaude.
*
* Covers:
* 1. Gemini `store: false` rejection fix
* 2. Session timeout / 500 error fix (stream idle timeout)
* 3. Agent loop continuation nudge
* 4. Web search result count improvements
*/
import { describe, test, expect } from 'bun:test'
import { resolve } from 'path'
const SRC = resolve(import.meta.dir, '..')
const file = (relative: string) => Bun.file(resolve(SRC, relative))
// ---------------------------------------------------------------------------
// Fix 1: Gemini `store: false` rejection
// ---------------------------------------------------------------------------
describe('Gemini store field fix', () => {
test('isGeminiMode is imported and used in openaiShim', async () => {
const content = await file('services/api/openaiShim.ts').text()
// Verify the fix: store deletion should check for Gemini mode
expect(content).toContain('isGeminiMode()')
expect(content).toContain("mistral and gemini don't recognize body.store")
// Ensure the delete body.store is guarded for both Mistral and Gemini
expect(content).toMatch(/isMistral\s*\|\|\s*isGeminiMode\(\)/)
})
test('store: false is still set by default (OpenAI needs it)', async () => {
const content = await file('services/api/openaiShim.ts').text()
// The body should still have store: false by default
expect(content).toMatch(/store:\s*false/)
// But it should be deleted for non-OpenAI providers
expect(content).toMatch(/delete body\.store/)
})
})
// ---------------------------------------------------------------------------
// Fix 2: Session timeout — stream idle timeout
// ---------------------------------------------------------------------------
describe('Session timeout fix', () => {
test('openaiShim has idle timeout for SSE streams', async () => {
const content = await file('services/api/openaiShim.ts').text()
expect(content).toContain('STREAM_IDLE_TIMEOUT_MS')
expect(content).toContain('readWithTimeout')
expect(content).toMatch(/readWithTimeout\(\)/)
})
test('codexShim has idle timeout for SSE streams', async () => {
const content = await file('services/api/codexShim.ts').text()
expect(content).toContain('STREAM_IDLE_TIMEOUT_MS')
expect(content).toContain('readWithTimeout')
expect(content).toMatch(/readWithTimeout\(\)/)
})
test('idle timeout is set to a reasonable value (>= 60s)', async () => {
const content = await file('services/api/openaiShim.ts').text()
// Extract the timeout value (supports numeric separators like 120_000)
const match = content.match(/STREAM_IDLE_TIMEOUT_MS\s*=\s*([\d_]+)/)
expect(match).not.toBeNull()
const timeoutMs = parseInt(match![1].replace(/_/g, ''), 10)
expect(timeoutMs).toBeGreaterThanOrEqual(60_000)
})
})
// ---------------------------------------------------------------------------
// Fix 3: Agent loop continuation nudge
// ---------------------------------------------------------------------------
describe('Agent loop continuation nudge', () => {
test('query.ts has continuation signal detection', async () => {
const content = await file('query.ts').text()
expect(content).toContain('continuationSignals')
expect(content).toContain('Continuation nudge triggered')
expect(content).toContain('continuation_nudge')
})
test('continuation signals include tightened patterns', async () => {
const content = await file('query.ts').text()
// Should detect tightened patterns requiring explicit action verbs
expect(content).toMatch(/so now \(i\|let me\|we\)/)
expect(content).toContain('completionMarkers')
expect(content).toContain('MAX_CONTINUATION_NUDGES')
// Verify the nudge counter guard exists
expect(content).toMatch(/continuationNudgeCount\s*<\s*MAX_CONTINUATION_NUDGES/)
})
test('nudge creates a meta user message to continue', async () => {
const content = await file('query.ts').text()
expect(content).toContain(
'Continue with the task. Use the appropriate tools to proceed.',
)
})
})
// ---------------------------------------------------------------------------
// Fix 4: Web search result count improvements
// ---------------------------------------------------------------------------
describe('Web search result count improvements', () => {
test('Bing provider requests at least 15 results', async () => {
const content = await file(
'tools/WebSearchTool/providers/bing.ts',
).text()
expect(content).toMatch(/count.*['"]15['"]/)
})
test('Tavily provider requests at least 15 results', async () => {
const content = await file(
'tools/WebSearchTool/providers/tavily.ts',
).text()
expect(content).toMatch(/max_results:\s*15/)
})
test('Exa provider requests at least 15 results', async () => {
const content = await file(
'tools/WebSearchTool/providers/exa.ts',
).text()
expect(content).toMatch(/numResults:\s*15/)
})
test('Firecrawl provider requests at least 15 results', async () => {
const content = await file(
'tools/WebSearchTool/providers/firecrawl.ts',
).text()
expect(content).toMatch(/limit:\s*15/)
})
test('Mojeek provider requests at least 10 results', async () => {
const content = await file(
'tools/WebSearchTool/providers/mojeek.ts',
).text()
// Mojeek uses 't' param for result count — verify it's set to 10
expect(content).toMatch(/searchParams\.set\('t',\s*'10'\)/)
})
test('You.com provider requests at least 10 results', async () => {
const content = await file(
'tools/WebSearchTool/providers/you.ts',
).text()
expect(content).toMatch(/num_web_results.*['"]10['"]/)
})
test('Jina provider requests at least 10 results', async () => {
const content = await file(
'tools/WebSearchTool/providers/jina.ts',
).text()
expect(content).toMatch(/count.*['"]10['"]/)
})
test('Native Anthropic web search max_uses increased to 15', async () => {
const content = await file(
'tools/WebSearchTool/WebSearchTool.ts',
).text()
expect(content).toMatch(/max_uses:\s*15/)
})
test('codex web search path guarantees a non-empty result body', async () => {
const content = await file(
'tools/WebSearchTool/WebSearchTool.ts',
).text()
expect(content).toContain("results.push('No results found.')")
})
})
// ---------------------------------------------------------------------------
// Fix 5: MCP tool timeout fix
// ---------------------------------------------------------------------------
describe('MCP tool timeout fix', () => {
test('default MCP tool timeout is reasonable (not 27 hours)', async () => {
const content = await file('services/mcp/client.ts').text()
// Should NOT have the old ~27.8 hour default
expect(content).not.toContain('100_000_000')
// Should have a reasonable timeout (5 minutes = 300_000ms)
expect(content).toMatch(/DEFAULT_MCP_TOOL_TIMEOUT_MS\s*=\s*300_000/)
})
test('MCP tools/list has retry logic', async () => {
const content = await file('services/mcp/client.ts').text()
expect(content).toContain('tools/list failed (attempt')
expect(content).toContain('Retrying...')
})
test('MCP URL elicitation checks abort signal', async () => {
const content = await file('services/mcp/client.ts').text()
expect(content).toContain('signal.aborted')
expect(content).toContain('Tool call aborted during URL elicitation')
})
test('MCP tool error messages include server and tool name in telemetry', async () => {
const content = await file('services/mcp/client.ts').text()
// Telemetry message should include context like "MCP tool [serverName] toolName: error"
// The human-readable message stays unchanged to avoid breaking error consumers
expect(content).toContain('MCP tool [${name}] ${tool}:')
})
})
// ---------------------------------------------------------------------------
// Cross-cutting: verify no regressions
// ---------------------------------------------------------------------------
describe('Regression checks', () => {
test('store field is still set for OpenAI (not deleted unconditionally)', async () => {
const content = await file('services/api/openaiShim.ts').text()
// store: false should exist in body construction
expect(content).toMatch(/store:\s*false/)
// But delete body.store should be conditional (guarded by if)
const deleteLines = content.split('\n').filter(l => l.includes('delete body.store'))
expect(deleteLines.length).toBeGreaterThan(0)
// Verify the delete is inside a conditional block by checking surrounding context
for (const line of deleteLines) {
const trimmed = line.trim()
// Should be either inside an if block (indented delete) or a comment
expect(
trimmed.startsWith('delete') && !trimmed.includes('// unconditional'),
).toBe(true)
}
})
})
// ---------------------------------------------------------------------------
// Fix 6: SendMessageTool race condition guard
// ---------------------------------------------------------------------------
describe('SendMessageTool race condition fix', () => {
test('SendMessageTool has double-check for concurrent resume', async () => {
const content = await file('tools/SendMessageTool/SendMessageTool.ts').text()
// Should have a second status check before resuming to prevent race
expect(content).toContain('was concurrently resumed')
// The freshTask check should re-read from getAppState
expect(content).toMatch(/const freshTask = context\.getAppState\(\)\.tasks\[agentId\]/)
})
})
// ---------------------------------------------------------------------------
// Fix 7: AgentTool dump state cleanup
// ---------------------------------------------------------------------------
describe('AgentTool cleanup fix', () => {
test('backgrounded agent always cleans up dump state', async () => {
const content = await file('tools/AgentTool/AgentTool.tsx').text()
// The backgrounded agent's finally block should clean up regardless
// of whether the agent crashed or completed normally
expect(content).toContain('Defensive cleanup: wrap each call so one failure')
// Verify cleanup is wrapped in try/catch for defensive execution
expect(content).toMatch(/try\s*\{\s*clearInvokedSkillsForAgent/)
expect(content).toMatch(/try\s*\{\s*clearDumpState/)
})
})
// ---------------------------------------------------------------------------
// Fix 8: Context overflow 500 error handling
// ---------------------------------------------------------------------------
describe('Context overflow 500 fix', () => {
test('errors.ts has handler for context overflow 500 errors', async () => {
const content = await file('services/api/errors.ts').text()
expect(content).toContain('500 errors caused by context overflow')
expect(content).toContain('too many tokens')
expect(content).toContain('The conversation has grown too large')
})
test('query.ts has circuit breaker safety net for oversized context', async () => {
const content = await file('query.ts').text()
expect(content).toContain('Safety net: when auto-compact')
expect(content).toContain('circuit breaker has tripped')
expect(content).toContain('automatic compaction has failed')
})
})

View File

@@ -1,55 +0,0 @@
/**
* Tests for Web Search Provider result count configurations.
*/
import { describe, test, expect } from 'bun:test'
import { resolve } from 'path'
const SRC = resolve(import.meta.dir, '..', 'tools', 'WebSearchTool', 'providers')
const file = (name: string) => Bun.file(resolve(SRC, name))
describe('Provider result counts', () => {
const providers = [
'bing.ts',
'tavily.ts',
'exa.ts',
'firecrawl.ts',
'mojeek.ts',
'you.ts',
'jina.ts',
'duckduckgo.ts',
// linkup.ts excluded — uses depth param, not a result count field
]
for (const name of providers) {
test(`${name} exists and is readable`, async () => {
const f = file(name)
expect(await f.exists()).toBe(true)
const content = await f.text()
expect(content.length).toBeGreaterThan(100)
})
}
test('No provider hardcodes a limit below 10', async () => {
const suspiciousPatterns = [
/count['":\s]*['"]([1-9])['"]/i,
/limit['":\s]*([1-9])\b/,
/max_results['":\s]*([1-9])\b/,
/numResults['":\s]*([1-9])\b/,
]
for (const name of providers) {
const content = await file(name).text()
for (const pattern of suspiciousPatterns) {
const match = content.match(pattern)
if (match) {
const num = parseInt(match[1], 10)
expect(num).toBeGreaterThanOrEqual(
10,
`${name} has suspiciously low result count: ${match[0]}`,
)
}
}
}
})
})

View File

@@ -1,191 +0,0 @@
/**
* Security hardening regression tests.
*
* Covers:
* 1. MCP tool result Unicode sanitization
* 2. Sandbox settings source filtering (exclude projectSettings)
* 3. Plugin git clone/pull hooks disabled
* 4. ANTHROPIC_FOUNDRY_API_KEY removed from SAFE_ENV_VARS
* 5. WebFetch SSRF protection via ssrfGuardedLookup
*/
import { describe, test, expect } from 'bun:test'
import { resolve } from 'path'
const SRC = resolve(import.meta.dir, '..')
const file = (relative: string) => Bun.file(resolve(SRC, relative))
// ---------------------------------------------------------------------------
// Fix 1: MCP tool result Unicode sanitization
// ---------------------------------------------------------------------------
describe('MCP tool result sanitization', () => {
test('transformResultContent sanitizes text content', async () => {
const content = await file('services/mcp/client.ts').text()
// Tool definitions are already sanitized (line ~1798)
expect(content).toContain('recursivelySanitizeUnicode(result.tools)')
// Tool results must also be sanitized
expect(content).toMatch(
/case 'text':[\s\S]*?recursivelySanitizeUnicode\(resultContent\.text\)/,
)
})
test('resource text content is also sanitized', async () => {
const content = await file('services/mcp/client.ts').text()
expect(content).toMatch(
/recursivelySanitizeUnicode\(\s*`\$\{prefix\}\$\{resource\.text\}`/,
)
})
})
// ---------------------------------------------------------------------------
// Fix 2: Sandbox settings source filtering
// ---------------------------------------------------------------------------
describe('Sandbox settings trust boundary', () => {
test('getSandboxEnabledSetting does not use getSettings_DEPRECATED', async () => {
const content = await file('utils/sandbox/sandbox-adapter.ts').text()
// Extract the getSandboxEnabledSetting function body
const fnMatch = content.match(
/function getSandboxEnabledSetting\(\)[^{]*\{([\s\S]*?)\n\}/,
)
expect(fnMatch).not.toBeNull()
const fnBody = fnMatch![1]
// Must NOT use getSettings_DEPRECATED (reads all sources including project)
expect(fnBody).not.toContain('getSettings_DEPRECATED')
// Must use getSettingsForSource for individual trusted sources
expect(fnBody).toContain("getSettingsForSource('userSettings')")
expect(fnBody).toContain("getSettingsForSource('policySettings')")
// Must NOT read from projectSettings
expect(fnBody).not.toContain("'projectSettings'")
})
})
// ---------------------------------------------------------------------------
// Fix 3: Plugin git hooks disabled
// ---------------------------------------------------------------------------
describe('Plugin git operations disable hooks', () => {
test('gitClone includes core.hooksPath=/dev/null', async () => {
const content = await file('utils/plugins/marketplaceManager.ts').text()
// The clone args must disable hooks
const cloneSection = content.slice(
content.indexOf('export async function gitClone('),
content.indexOf('export async function gitClone(') + 2000,
)
expect(cloneSection).toContain("'core.hooksPath=/dev/null'")
})
test('gitPull includes core.hooksPath=/dev/null', async () => {
const content = await file('utils/plugins/marketplaceManager.ts').text()
const pullSection = content.slice(
content.indexOf('export async function gitPull('),
content.indexOf('export async function gitPull(') + 2000,
)
expect(pullSection).toContain("'core.hooksPath=/dev/null'")
})
test('gitSubmoduleUpdate includes core.hooksPath=/dev/null', async () => {
const content = await file('utils/plugins/marketplaceManager.ts').text()
const subSection = content.slice(
content.indexOf('async function gitSubmoduleUpdate('),
content.indexOf('async function gitSubmoduleUpdate(') + 1000,
)
expect(subSection).toContain("'core.hooksPath=/dev/null'")
})
})
// ---------------------------------------------------------------------------
// Fix 4: ANTHROPIC_FOUNDRY_API_KEY not in SAFE_ENV_VARS
// ---------------------------------------------------------------------------
describe('SAFE_ENV_VARS excludes credentials', () => {
test('ANTHROPIC_FOUNDRY_API_KEY is not in SAFE_ENV_VARS', async () => {
const content = await file('utils/managedEnvConstants.ts').text()
// Extract the SAFE_ENV_VARS set definition
const safeStart = content.indexOf('export const SAFE_ENV_VARS')
const safeEnd = content.indexOf('])', safeStart)
const safeSection = content.slice(safeStart, safeEnd)
expect(safeSection).not.toContain('ANTHROPIC_FOUNDRY_API_KEY')
})
})
// ---------------------------------------------------------------------------
// Fix 5: WebFetch SSRF protection
// ---------------------------------------------------------------------------
describe('WebFetch SSRF guard', () => {
test('getWithPermittedRedirects uses ssrfGuardedLookup', async () => {
const content = await file('tools/WebFetchTool/utils.ts').text()
expect(content).toContain(
"import { ssrfGuardedLookup } from '../../utils/hooks/ssrfGuard.js'",
)
// The axios.get call in getWithPermittedRedirects must include lookup
const fnSection = content.slice(
content.indexOf('export async function getWithPermittedRedirects('),
content.indexOf('export async function getWithPermittedRedirects(') +
1000,
)
expect(fnSection).toContain('lookup: ssrfGuardedLookup')
})
})
// ---------------------------------------------------------------------------
// Fix 6: Swarm permission file polling removed (security hardening)
// ---------------------------------------------------------------------------
describe('Swarm permission file polling removed', () => {
test('useSwarmPermissionPoller hook no longer exists', async () => {
const content = await file(
'hooks/useSwarmPermissionPoller.ts',
).text()
// The file-based polling hook must not exist — it read from an
// unauthenticated resolved/ directory where any local process could
// forge approval files.
expect(content).not.toContain('function useSwarmPermissionPoller(')
// The file-based processResponse must not exist
expect(content).not.toContain('function processResponse(')
})
test('poller does not import from permissionSync', async () => {
const content = await file(
'hooks/useSwarmPermissionPoller.ts',
).text()
// Must not import anything from permissionSync — all file-based
// functions have been removed from this module's dependencies
expect(content).not.toContain('permissionSync')
})
test('file-based permission functions are marked deprecated', async () => {
const content = await file(
'utils/swarm/permissionSync.ts',
).text()
// All file-based functions must have @deprecated JSDoc
const deprecatedFns = [
'writePermissionRequest',
'readPendingPermissions',
'readResolvedPermission',
'resolvePermission',
'pollForResponse',
'removeWorkerResponse',
]
for (const fn of deprecatedFns) {
// Find the function and check that @deprecated appears before it
const fnIndex = content.indexOf(`export async function ${fn}(`)
if (fnIndex === -1) continue // submitPermissionRequest is a const, not async function
const preceding = content.slice(Math.max(0, fnIndex - 500), fnIndex)
expect(preceding).toContain('@deprecated')
}
})
test('mailbox-based functions are NOT deprecated', async () => {
const content = await file(
'utils/swarm/permissionSync.ts',
).text()
// These are the active path — must not be deprecated
const activeFns = [
'sendPermissionRequestViaMailbox',
'sendPermissionResponseViaMailbox',
]
for (const fn of activeFns) {
const fnIndex = content.indexOf(`export async function ${fn}(`)
expect(fnIndex).not.toBe(-1)
const preceding = content.slice(Math.max(0, fnIndex - 300), fnIndex)
expect(preceding).not.toContain('@deprecated')
}
})
})

View File

@@ -1562,8 +1562,29 @@ export function clearInvokedSkillsForAgent(agentId: string): void {
} }
} }
// Slow operations tracking removed (was internal-only). // Slow operations tracking for dev bar
// Functions kept as no-ops to avoid breaking callers. const MAX_SLOW_OPERATIONS = 10
const SLOW_OPERATION_TTL_MS = 10000
export function addSlowOperation(operation: string, durationMs: number): void {
if (process.env.USER_TYPE !== 'ant') return
// Skip tracking for editor sessions (user editing a prompt file in $EDITOR)
// These are intentionally slow since the user is drafting text
if (operation.includes('exec') && operation.includes('claude-prompt-')) {
return
}
const now = Date.now()
// Remove stale operations
STATE.slowOperations = STATE.slowOperations.filter(
op => now - op.timestamp < SLOW_OPERATION_TTL_MS,
)
// Add new operation
STATE.slowOperations.push({ operation, durationMs, timestamp: now })
// Keep only the most recent operations
if (STATE.slowOperations.length > MAX_SLOW_OPERATIONS) {
STATE.slowOperations = STATE.slowOperations.slice(-MAX_SLOW_OPERATIONS)
}
}
const EMPTY_SLOW_OPERATIONS: ReadonlyArray<{ const EMPTY_SLOW_OPERATIONS: ReadonlyArray<{
operation: string operation: string
@@ -1571,17 +1592,32 @@ const EMPTY_SLOW_OPERATIONS: ReadonlyArray<{
timestamp: number timestamp: number
}> = [] }> = []
export function addSlowOperation(
_operation: string,
_durationMs: number,
): void {}
export function getSlowOperations(): ReadonlyArray<{ export function getSlowOperations(): ReadonlyArray<{
operation: string operation: string
durationMs: number durationMs: number
timestamp: number timestamp: number
}> { }> {
// Most common case: nothing tracked. Return a stable reference so the
// caller's setState() can bail via Object.is instead of re-rendering at 2fps.
if (STATE.slowOperations.length === 0) {
return EMPTY_SLOW_OPERATIONS return EMPTY_SLOW_OPERATIONS
}
const now = Date.now()
// Only allocate a new array when something actually expired; otherwise keep
// the reference stable across polls while ops are still fresh.
if (
STATE.slowOperations.some(op => now - op.timestamp >= SLOW_OPERATION_TTL_MS)
) {
STATE.slowOperations = STATE.slowOperations.filter(
op => now - op.timestamp < SLOW_OPERATION_TTL_MS,
)
if (STATE.slowOperations.length === 0) {
return EMPTY_SLOW_OPERATIONS
}
}
// Safe to return directly: addSlowOperation() reassigns STATE.slowOperations
// before pushing, so the array held in React state is never mutated.
return STATE.slowOperations
} }
export function getMainThreadAgentType(): string | undefined { export function getMainThreadAgentType(): string | undefined {

View File

@@ -14,14 +14,21 @@
import { getOauthConfig } from '../constants/oauth.js' import { getOauthConfig } from '../constants/oauth.js'
import { getClaudeAIOAuthTokens } from '../utils/auth.js' import { getClaudeAIOAuthTokens } from '../utils/auth.js'
/** Dev override: CLAUDE_BRIDGE_OAUTH_TOKEN, else undefined. */ /** Ant-only dev override: CLAUDE_BRIDGE_OAUTH_TOKEN, else undefined. */
export function getBridgeTokenOverride(): string | undefined { export function getBridgeTokenOverride(): string | undefined {
return process.env.CLAUDE_BRIDGE_OAUTH_TOKEN || undefined return (
(process.env.USER_TYPE === 'ant' &&
process.env.CLAUDE_BRIDGE_OAUTH_TOKEN) ||
undefined
)
} }
/** Dev override: CLAUDE_BRIDGE_BASE_URL, else undefined. */ /** Ant-only dev override: CLAUDE_BRIDGE_BASE_URL, else undefined. */
export function getBridgeBaseUrlOverride(): string | undefined { export function getBridgeBaseUrlOverride(): string | undefined {
return process.env.CLAUDE_BRIDGE_BASE_URL || undefined return (
(process.env.USER_TYPE === 'ant' && process.env.CLAUDE_BRIDGE_BASE_URL) ||
undefined
)
} }
/** /**

View File

@@ -2194,10 +2194,14 @@ export async function bridgeMain(args: string[]): Promise<void> {
// Session ingress URL for WebSocket connections. In production this is the // Session ingress URL for WebSocket connections. In production this is the
// same as baseUrl (Envoy routes /v1/session_ingress/* to session-ingress). // same as baseUrl (Envoy routes /v1/session_ingress/* to session-ingress).
// Locally, session-ingress may run on a different port, so // Locally, session-ingress runs on a different port (9413) than the
// CLAUDE_BRIDGE_SESSION_INGRESS_URL can override the default. // contain-provide-api (8211), so CLAUDE_BRIDGE_SESSION_INGRESS_URL must be
// set explicitly. Ant-only, matching CLAUDE_BRIDGE_BASE_URL.
const sessionIngressUrl = const sessionIngressUrl =
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl process.env.USER_TYPE === 'ant' &&
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
: baseUrl
const { getBranch, getRemoteUrl, findGitRoot } = await import( const { getBranch, getRemoteUrl, findGitRoot } = await import(
'../utils/git.js' '../utils/git.js'
@@ -2847,7 +2851,10 @@ export async function runBridgeHeadless(
) )
} }
const sessionIngressUrl = const sessionIngressUrl =
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl process.env.USER_TYPE === 'ant' &&
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
: baseUrl
const { getBranch, getRemoteUrl, findGitRoot } = await import( const { getBranch, getRemoteUrl, findGitRoot } = await import(
'../utils/git.js' '../utils/git.js'

View File

@@ -217,39 +217,25 @@ export async function getBridgeSession(
} }
const url = `${opts?.baseUrl ?? getOauthConfig().BASE_API_URL}/v1/sessions/${sessionId}` const url = `${opts?.baseUrl ?? getOauthConfig().BASE_API_URL}/v1/sessions/${sessionId}`
const timeoutMs = 10_000
logForDebugging(`[bridge] Fetching session ${sessionId}`) logForDebugging(`[bridge] Fetching session ${sessionId}`)
let response let response
try { try {
response = await axios.get<{ environment_id?: string; title?: string }>( response = await axios.get<{ environment_id?: string; title?: string }>(
url, url,
{ headers, timeout: timeoutMs, validateStatus: s => s < 500 }, { headers, timeout: 10_000, validateStatus: s => s < 500 },
) )
} catch (err: unknown) { } catch (err: unknown) {
if (axios.isAxiosError(err)) {
const status = err.response?.status ?? 'no-response'
const code = err.code ?? 'unknown-code'
const requestUrl = err.config?.url ?? url
const method = err.config?.method?.toUpperCase() ?? 'GET'
const message = err.message ?? errorMessage(err)
const timeout = err.config?.timeout ?? timeoutMs
logForDebugging( logForDebugging(
`[bridge] Session fetch request failed: status=${status} code=${code} method=${method} url=${requestUrl} timeout=${timeout} message=${message}`, `[bridge] Session fetch request failed: ${errorMessage(err)}`,
) )
} else {
logForDebugging(
`[bridge] Session fetch request failed: url=${url} timeout=${timeoutMs} message=${errorMessage(err)}`,
)
}
return null return null
} }
if (response.status !== 200) { if (response.status !== 200) {
const detail = extractErrorDetail(response.data) const detail = extractErrorDetail(response.data)
logForDebugging( logForDebugging(
`[bridge] Session fetch failed with status ${response.status} url=${url}${detail ? `: ${detail}` : ''}`, `[bridge] Session fetch failed with status ${response.status}${detail ? `: ${detail}` : ''}`,
) )
return null return null
} }

View File

@@ -465,7 +465,10 @@ export async function initReplBridge(
const branch = await getBranch() const branch = await getBranch()
const gitRepoUrl = await getRemoteUrl() const gitRepoUrl = await getRemoteUrl()
const sessionIngressUrl = const sessionIngressUrl =
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl process.env.USER_TYPE === 'ant' &&
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
: baseUrl
// Assistant-mode sessions advertise a distinct worker_type so the web UI // Assistant-mode sessions advertise a distinct worker_type so the web UI
// can filter them into a dedicated picker. KAIROS guard keeps the // can filter them into a dedicated picker. KAIROS guard keeps the

View File

@@ -11,12 +11,7 @@ import { MCPServerDesktopImportDialog } from '../../components/MCPServerDesktopI
import { render } from '../../ink.js'; import { render } from '../../ink.js';
import { KeybindingSetup } from '../../keybindings/KeybindingProviderSetup.js'; import { KeybindingSetup } from '../../keybindings/KeybindingProviderSetup.js';
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js'; import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
import { import { clearMcpClientConfig, clearServerTokensFromLocalStorage, readClientSecret, saveMcpClientSecret } from '../../services/mcp/auth.js';
clearMcpClientConfig,
clearServerTokensFromSecureStorage,
readClientSecret,
saveMcpClientSecret,
} from '../../services/mcp/auth.js'
import { doctorAllServers, doctorServer, type McpDoctorReport, type McpDoctorScopeFilter } from '../../services/mcp/doctor.js'; import { doctorAllServers, doctorServer, type McpDoctorReport, type McpDoctorScopeFilter } from '../../services/mcp/doctor.js';
import { connectToServer, getMcpServerConnectionBatchSize } from '../../services/mcp/client.js'; import { connectToServer, getMcpServerConnectionBatchSize } from '../../services/mcp/client.js';
import { addMcpConfig, getAllMcpConfigs, getMcpConfigByName, getMcpConfigsByScope, removeMcpConfig } from '../../services/mcp/config.js'; import { addMcpConfig, getAllMcpConfigs, getMcpConfigByName, getMcpConfigsByScope, removeMcpConfig } from '../../services/mcp/config.js';

View File

@@ -362,9 +362,15 @@ const proactiveModule =
feature('PROACTIVE') || feature('KAIROS') feature('PROACTIVE') || feature('KAIROS')
? (require('../proactive/index.js') as typeof import('../proactive/index.js')) ? (require('../proactive/index.js') as typeof import('../proactive/index.js'))
: null : null
const cronSchedulerModule = require('../utils/cronScheduler.js') as typeof import('../utils/cronScheduler.js') const cronSchedulerModule = feature('AGENT_TRIGGERS')
const cronJitterConfigModule = require('../utils/cronJitterConfig.js') as typeof import('../utils/cronJitterConfig.js') ? (require('../utils/cronScheduler.js') as typeof import('../utils/cronScheduler.js'))
const cronGate = require('../tools/ScheduleCronTool/prompt.js') as typeof import('../tools/ScheduleCronTool/prompt.js') : null
const cronJitterConfigModule = feature('AGENT_TRIGGERS')
? (require('../utils/cronJitterConfig.js') as typeof import('../utils/cronJitterConfig.js'))
: null
const cronGate = feature('AGENT_TRIGGERS')
? (require('../tools/ScheduleCronTool/prompt.js') as typeof import('../tools/ScheduleCronTool/prompt.js'))
: null
const extractMemoriesModule = feature('EXTRACT_MEMORIES') const extractMemoriesModule = feature('EXTRACT_MEMORIES')
? (require('../services/extractMemories/extractMemories.js') as typeof import('../services/extractMemories/extractMemories.js')) ? (require('../services/extractMemories/extractMemories.js') as typeof import('../services/extractMemories/extractMemories.js'))
: null : null
@@ -2695,7 +2701,11 @@ function runHeadlessStreaming(
// the end of run() picks up the queued command. // the end of run() picks up the queued command.
let cronScheduler: import('../utils/cronScheduler.js').CronScheduler | null = let cronScheduler: import('../utils/cronScheduler.js').CronScheduler | null =
null null
if (cronGate.isKairosCronEnabled()) { if (
feature('AGENT_TRIGGERS') &&
cronSchedulerModule &&
cronGate?.isKairosCronEnabled()
) {
cronScheduler = cronSchedulerModule.createCronScheduler({ cronScheduler = cronSchedulerModule.createCronScheduler({
onFire: prompt => { onFire: prompt => {
if (inputClosed) return if (inputClosed) return
@@ -2717,8 +2727,8 @@ function runHeadlessStreaming(
void run() void run()
}, },
isLoading: () => running || inputClosed, isLoading: () => running || inputClosed,
getJitterConfig: cronJitterConfigModule.getCronJitterConfig, getJitterConfig: cronJitterConfigModule?.getCronJitterConfig,
isKilled: () => !cronGate.isKairosCronEnabled(), isKilled: () => !cronGate?.isKairosCronEnabled(),
}) })
cronScheduler.start() cronScheduler.start()
} }
@@ -4582,7 +4592,7 @@ function handleSetPermissionMode(
subtype: 'error', subtype: 'error',
request_id: requestId, request_id: requestId,
error: error:
'Cannot set permission mode to bypassPermissions. Enable it with --allow-dangerously-skip-permissions or set permissions.allowBypassPermissionsMode in settings.json', 'Cannot set permission mode to bypassPermissions because the session was not launched with --dangerously-skip-permissions',
}, },
}) })
return toolPermissionContext return toolPermissionContext

View File

@@ -35,20 +35,15 @@ export async function update() {
// binary (without it). // binary (without it).
if (getAPIProvider() !== 'firstParty') { if (getAPIProvider() !== 'firstParty') {
writeToStdout( writeToStdout(
chalk.yellow( chalk.yellow('Auto-update is not available for third-party provider builds.\n') +
`Auto-update is not available for third-party provider builds.\n`, 'To update, pull the latest source from the repository and rebuild:\n' +
) + ' git pull && bun install && bun run build\n',
`Current version: ${MACRO.DISPLAY_VERSION}\n\n` +
`To update, reinstall from npm:\n` +
chalk.bold(` npm install -g ${MACRO.PACKAGE_URL}@latest`) + '\n\n' +
`Or, if you built from source, pull and rebuild:\n` +
chalk.bold(' git pull && bun install && bun run build') + '\n',
) )
await gracefulShutdown(0) return
} }
logEvent('tengu_update_check', {}) logEvent('tengu_update_check', {})
writeToStdout(`Current version: ${MACRO.DISPLAY_VERSION}\n`) writeToStdout(`Current version: ${MACRO.VERSION}\n`)
const channel = getInitialSettings()?.autoUpdatesChannel ?? 'latest' const channel = getInitialSettings()?.autoUpdatesChannel ?? 'latest'
writeToStdout(`Checking for updates to ${channel} version...\n`) writeToStdout(`Checking for updates to ${channel} version...\n`)
@@ -128,14 +123,9 @@ export async function update() {
if (diagnostic.installationType === 'development') { if (diagnostic.installationType === 'development') {
writeToStdout('\n') writeToStdout('\n')
writeToStdout( writeToStdout(
chalk.yellow('You are running a development build — auto-update is unavailable.') + '\n', chalk.yellow('Warning: Cannot update development build') + '\n',
) )
writeToStdout('To update, pull the latest source and rebuild:\n') await gracefulShutdown(1)
writeToStdout(chalk.bold(' git pull && bun install && bun run build') + '\n')
writeToStdout('\n')
writeToStdout('Or reinstall from npm:\n')
writeToStdout(chalk.bold(` npm install -g ${MACRO.PACKAGE_URL}@latest`) + '\n')
await gracefulShutdown(0)
} }
// Check if running from a package manager // Check if running from a package manager
@@ -146,8 +136,8 @@ export async function update() {
if (packageManager === 'homebrew') { if (packageManager === 'homebrew') {
writeToStdout('Claude is managed by Homebrew.\n') writeToStdout('Claude is managed by Homebrew.\n')
const latest = await getLatestVersion(channel) const latest = await getLatestVersion(channel)
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) { if (latest && !gte(MACRO.VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION}${latest}\n`) writeToStdout(`Update available: ${MACRO.VERSION}${latest}\n`)
writeToStdout('\n') writeToStdout('\n')
writeToStdout('To update, run:\n') writeToStdout('To update, run:\n')
writeToStdout(chalk.bold(' brew upgrade claude-code') + '\n') writeToStdout(chalk.bold(' brew upgrade claude-code') + '\n')
@@ -157,8 +147,8 @@ export async function update() {
} else if (packageManager === 'winget') { } else if (packageManager === 'winget') {
writeToStdout('Claude is managed by winget.\n') writeToStdout('Claude is managed by winget.\n')
const latest = await getLatestVersion(channel) const latest = await getLatestVersion(channel)
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) { if (latest && !gte(MACRO.VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION}${latest}\n`) writeToStdout(`Update available: ${MACRO.VERSION}${latest}\n`)
writeToStdout('\n') writeToStdout('\n')
writeToStdout('To update, run:\n') writeToStdout('To update, run:\n')
writeToStdout( writeToStdout(
@@ -170,8 +160,8 @@ export async function update() {
} else if (packageManager === 'apk') { } else if (packageManager === 'apk') {
writeToStdout('Claude is managed by apk.\n') writeToStdout('Claude is managed by apk.\n')
const latest = await getLatestVersion(channel) const latest = await getLatestVersion(channel)
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) { if (latest && !gte(MACRO.VERSION, latest)) {
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION}${latest}\n`) writeToStdout(`Update available: ${MACRO.VERSION}${latest}\n`)
writeToStdout('\n') writeToStdout('\n')
writeToStdout('To update, run:\n') writeToStdout('To update, run:\n')
writeToStdout(chalk.bold(' apk upgrade claude-code') + '\n') writeToStdout(chalk.bold(' apk upgrade claude-code') + '\n')
@@ -260,14 +250,14 @@ export async function update() {
await gracefulShutdown(1) await gracefulShutdown(1)
} }
if (result.latestVersion === MACRO.DISPLAY_VERSION) { if (result.latestVersion === MACRO.VERSION) {
writeToStdout( writeToStdout(
chalk.green(`OpenClaude is up to date (${MACRO.DISPLAY_VERSION})`) + '\n', chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
) )
} else { } else {
writeToStdout( writeToStdout(
chalk.green( chalk.green(
`Successfully updated from ${MACRO.DISPLAY_VERSION} to version ${result.latestVersion}`, `Successfully updated from ${MACRO.VERSION} to version ${result.latestVersion}`,
) + '\n', ) + '\n',
) )
await regenerateCompletionCache() await regenerateCompletionCache()
@@ -330,15 +320,15 @@ export async function update() {
} }
// Check if versions match exactly, including any build metadata (like SHA) // Check if versions match exactly, including any build metadata (like SHA)
if (latestVersion === MACRO.DISPLAY_VERSION) { if (latestVersion === MACRO.VERSION) {
writeToStdout( writeToStdout(
chalk.green(`OpenClaude is up to date (${MACRO.DISPLAY_VERSION})`) + '\n', chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
) )
await gracefulShutdown(0) await gracefulShutdown(0)
} }
writeToStdout( writeToStdout(
`New version available: ${latestVersion} (current: ${MACRO.DISPLAY_VERSION})\n`, `New version available: ${latestVersion} (current: ${MACRO.VERSION})\n`,
) )
writeToStdout('Installing update...\n') writeToStdout('Installing update...\n')
@@ -398,7 +388,7 @@ export async function update() {
case 'success': case 'success':
writeToStdout( writeToStdout(
chalk.green( chalk.green(
`Successfully updated from ${MACRO.DISPLAY_VERSION} to version ${latestVersion}`, `Successfully updated from ${MACRO.VERSION} to version ${latestVersion}`,
) + '\n', ) + '\n',
) )
await regenerateCompletionCache() await regenerateCompletionCache()

View File

@@ -1,30 +0,0 @@
import { formatDescriptionWithSource } from './commands.js'
describe('formatDescriptionWithSource', () => {
test('returns empty text for prompt commands missing a description', () => {
const command = {
name: 'example',
type: 'prompt',
source: 'builtin',
description: undefined,
} as any
expect(formatDescriptionWithSource(command)).toBe('')
})
test('formats plugin commands with missing description safely', () => {
const command = {
name: 'example',
type: 'prompt',
source: 'plugin',
description: undefined,
pluginInfo: {
pluginManifest: {
name: 'MyPlugin',
},
},
} as any
expect(formatDescriptionWithSource(command)).toBe('(MyPlugin) ')
})
})

View File

@@ -740,23 +740,23 @@ export function getCommand(commandName: string, commands: Command[]): Command {
*/ */
export function formatDescriptionWithSource(cmd: Command): string { export function formatDescriptionWithSource(cmd: Command): string {
if (cmd.type !== 'prompt') { if (cmd.type !== 'prompt') {
return cmd.description ?? '' return cmd.description
} }
if (cmd.kind === 'workflow') { if (cmd.kind === 'workflow') {
return `${cmd.description ?? ''} (workflow)` return `${cmd.description} (workflow)`
} }
if (cmd.source === 'plugin') { if (cmd.source === 'plugin') {
const pluginName = cmd.pluginInfo?.pluginManifest.name const pluginName = cmd.pluginInfo?.pluginManifest.name
if (pluginName) { if (pluginName) {
return `(${pluginName}) ${cmd.description ?? ''}` return `(${pluginName}) ${cmd.description}`
} }
return `${cmd.description ?? ''} (plugin)` return `${cmd.description} (plugin)`
} }
if (cmd.source === 'builtin' || cmd.source === 'mcp') { if (cmd.source === 'builtin' || cmd.source === 'mcp') {
return cmd.description ?? '' return cmd.description
} }
if (cmd.source === 'bundled') { if (cmd.source === 'bundled') {

View File

@@ -1,56 +0,0 @@
import type { ToolUseContext } from '../Tool.js'
import type { Command } from '../types/command.js'
import {
benchmarkModel,
benchmarkMultipleModels,
formatBenchmarkResults,
isBenchmarkSupported,
} from '../utils/model/benchmark.js'
import { getOllamaModelOptions } from '../utils/model/ollamaModels.js'
async function runBenchmark(
model?: string,
context?: ToolUseContext,
): Promise<void> {
if (!isBenchmarkSupported()) {
context?.stdout?.write(
'Benchmark not supported for this provider.\n' +
'Supported: OpenAI-compatible endpoints (Ollama, NVIDIA NIM, MiniMax)\n',
)
return
}
let modelsToBenchmark: string[]
if (model) {
modelsToBenchmark = [model]
} else {
const ollamaModels = getOllamaModelOptions()
modelsToBenchmark = ollamaModels.slice(0, 3).map((m) => m.value)
}
context?.stdout?.write(`Benchmarking ${modelsToBenchmark.length} model(s)...\n`)
const results = await benchmarkMultipleModels(
modelsToBenchmark,
(completed, total, result) => {
context?.stdout?.write(
`[${completed}/${total}] ${result.model}: ` +
`${result.success ? result.tokensPerSecond.toFixed(1) + ' tps' : 'FAILED'}\n`,
)
},
)
context?.stdout?.write('\n' + formatBenchmarkResults(results) + '\n')
}
export const benchmark: Command = {
name: 'benchmark',
async onExecute(context: ToolUseContext): Promise<void> {
const args = context.args ?? {}
const model = args.model as string | undefined
await runBenchmark(model, context)
},
}

View File

@@ -45,7 +45,7 @@ function getPromptContent(
<!-- CHANGELOG:END -->` <!-- CHANGELOG:END -->`
let slackStep = ` let slackStep = `
5. After creating/updating the PR, check if the user's AGENTS.md or CLAUDE.md mentions posting to Slack channels. If it does, use ToolSearch to search for "slack send message" tools. If ToolSearch finds a Slack tool, ask the user if they'd like you to post the PR URL to the relevant Slack channel. Only post if the user confirms. If ToolSearch returns no results or errors, skip this step silently—do not mention the failure, do not attempt workarounds, and do not try alternative approaches.` 5. After creating/updating the PR, check if the user's CLAUDE.md mentions posting to Slack channels. If it does, use ToolSearch to search for "slack send message" tools. If ToolSearch finds a Slack tool, ask the user if they'd like you to post the PR URL to the relevant Slack channel. Only post if the user confirms. If ToolSearch returns no results or errors, skip this step silently—do not mention the failure, do not attempt workarounds, and do not try alternative approaches.`
if (process.env.USER_TYPE === 'ant' && isUndercover()) { if (process.env.USER_TYPE === 'ant' && isUndercover()) {
prefix = getUndercoverInstructions() + '\n' prefix = getUndercoverInstructions() + '\n'
reviewerArg = '' reviewerArg = ''

View File

@@ -1,43 +0,0 @@
import { afterEach, expect, mock, test } from 'bun:test'
const originalClaudeCodeNewInit = process.env.CLAUDE_CODE_NEW_INIT
async function importInitCommand() {
return (await import(`./init.ts?ts=${Date.now()}-${Math.random()}`)).default
}
afterEach(() => {
mock.restore()
if (originalClaudeCodeNewInit === undefined) {
delete process.env.CLAUDE_CODE_NEW_INIT
} else {
process.env.CLAUDE_CODE_NEW_INIT = originalClaudeCodeNewInit
}
})
test('NEW_INIT prompt preserves existing root CLAUDE.md by default', async () => {
process.env.CLAUDE_CODE_NEW_INIT = '1'
mock.module('../projectOnboardingState.js', () => ({
maybeMarkProjectOnboardingComplete: () => {},
}))
mock.module('./initMode.js', () => ({
isNewInitEnabled: () => true,
}))
const command = await importInitCommand()
const blocks = await command.getPromptForCommand()
expect(blocks).toHaveLength(1)
expect(blocks[0]?.type).toBe('text')
expect(String(blocks[0]?.text)).toContain(
'checked-in root `CLAUDE.md` and does NOT already have a root `AGENTS.md`',
)
expect(String(blocks[0]?.text)).toContain(
'do NOT silently create a second root instruction file',
)
expect(String(blocks[0]?.text)).toContain(
'update the existing root `CLAUDE.md` in place by default',
)
})

View File

@@ -1,6 +1,7 @@
import { feature } from 'bun:bundle'
import type { Command } from '../commands.js' import type { Command } from '../commands.js'
import { maybeMarkProjectOnboardingComplete } from '../projectOnboardingState.js' import { maybeMarkProjectOnboardingComplete } from '../projectOnboardingState.js'
import { isNewInitEnabled } from './initMode.js' import { isEnvTruthy } from '../utils/envUtils.js'
const OLD_INIT_PROMPT = `Please analyze this codebase and create a CLAUDE.md file, which will be given to future instances of Claude Code to operate in this repository. const OLD_INIT_PROMPT = `Please analyze this codebase and create a CLAUDE.md file, which will be given to future instances of Claude Code to operate in this repository.
@@ -24,19 +25,19 @@ Usage notes:
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
\`\`\`` \`\`\``
const NEW_INIT_PROMPT = `Set up a minimal AGENTS.md (and optionally CLAUDE.local.md, skills, and hooks) for this repo. The root project instruction file is loaded into every Claude Code session, so it must be concise — only include what Claude would get wrong without it. const NEW_INIT_PROMPT = `Set up a minimal CLAUDE.md (and optionally skills and hooks) for this repo. CLAUDE.md is loaded into every Claude Code session, so it must be concise — only include what Claude would get wrong without it.
## Phase 1: Ask what to set up ## Phase 1: Ask what to set up
Use AskUserQuestion to find out what the user wants: Use AskUserQuestion to find out what the user wants:
- "Which instruction files should /init set up?" - "Which CLAUDE.md files should /init set up?"
Options: "Project AGENTS.md" | "Personal CLAUDE.local.md" | "Both project + personal" Options: "Project CLAUDE.md" | "Personal CLAUDE.local.md" | "Both project + personal"
Description for project: "Team-shared instructions checked into source control — architecture, coding standards, common workflows." Description for project: "Team-shared instructions checked into source control — architecture, coding standards, common workflows."
Description for personal: "Your private preferences for this project (gitignored, not shared) — your role, sandbox URLs, preferred test data, workflow quirks." Description for personal: "Your private preferences for this project (gitignored, not shared) — your role, sandbox URLs, preferred test data, workflow quirks."
- "Also set up skills and hooks?" - "Also set up skills and hooks?"
Options: "Skills + hooks" | "Skills only" | "Hooks only" | "Neither, just the instruction file(s)" Options: "Skills + hooks" | "Skills only" | "Hooks only" | "Neither, just CLAUDE.md"
Description for skills: "On-demand capabilities you or Claude invoke with \`/skill-name\` — good for repeatable workflows and reference knowledge." Description for skills: "On-demand capabilities you or Claude invoke with \`/skill-name\` — good for repeatable workflows and reference knowledge."
Description for hooks: "Deterministic shell commands that run on tool events (e.g., format after every edit). Claude can't skip them." Description for hooks: "Deterministic shell commands that run on tool events (e.g., format after every edit). Claude can't skip them."
@@ -58,24 +59,24 @@ Note what you could NOT figure out from code alone — these become interview qu
## Phase 3: Fill in the gaps ## Phase 3: Fill in the gaps
Use AskUserQuestion to gather what you still need to write good instruction files and skills. Ask only things the code can't answer. Use AskUserQuestion to gather what you still need to write good CLAUDE.md files and skills. Ask only things the code can't answer.
If the user chose project AGENTS.md or both: ask about codebase practices — non-obvious commands, gotchas, branch/PR conventions, required env setup, testing quirks. Skip things already in README or obvious from manifest files. Do not mark any options as "recommended" — this is about how their team works, not best practices. If the user chose project CLAUDE.md or both: ask about codebase practices — non-obvious commands, gotchas, branch/PR conventions, required env setup, testing quirks. Skip things already in README or obvious from manifest files. Do not mark any options as "recommended" — this is about how their team works, not best practices.
If the user chose personal CLAUDE.local.md or both: ask about them, not the codebase. Do not mark any options as "recommended" — this is about their personal preferences, not best practices. Examples of questions: If the user chose personal CLAUDE.local.md or both: ask about them, not the codebase. Do not mark any options as "recommended" — this is about their personal preferences, not best practices. Examples of questions:
- What's their role on the team? (e.g., "backend engineer", "data scientist", "new hire onboarding") - What's their role on the team? (e.g., "backend engineer", "data scientist", "new hire onboarding")
- How familiar are they with this codebase and its languages/frameworks? (so Claude can calibrate explanation depth) - How familiar are they with this codebase and its languages/frameworks? (so Claude can calibrate explanation depth)
- Do they have personal sandbox URLs, test accounts, API key paths, or local setup details Claude should know? - Do they have personal sandbox URLs, test accounts, API key paths, or local setup details Claude should know?
- Only if Phase 2 found multiple git worktrees: ask whether their worktrees are nested inside the main repo (e.g., \`.claude/worktrees/<name>/\`) or siblings/external (e.g., \`../myrepo-feature/\`). If nested, the upward file walk finds the main repo's CLAUDE.local.md automatically — no special handling needed. If sibling/external, the personal content should live in a home-directory file (e.g., \`~/.claude/<project-name>-instructions.md\`) and each worktree gets a one-line CLAUDE.local.md stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. Never put this import in the project AGENTS.md — that would check a personal reference into the team-shared file. - Only if Phase 2 found multiple git worktrees: ask whether their worktrees are nested inside the main repo (e.g., \`.claude/worktrees/<name>/\`) or siblings/external (e.g., \`../myrepo-feature/\`). If nested, the upward file walk finds the main repo's CLAUDE.local.md automatically — no special handling needed. If sibling/external, the personal content should live in a home-directory file (e.g., \`~/.claude/<project-name>-instructions.md\`) and each worktree gets a one-line CLAUDE.local.md stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. Never put this import in the project CLAUDE.md — that would check a personal reference into the team-shared file.
- Any communication preferences? (e.g., "be terse", "always explain tradeoffs", "don't summarize at the end") - Any communication preferences? (e.g., "be terse", "always explain tradeoffs", "don't summarize at the end")
**Synthesize a proposal from Phase 2 findings** — e.g., format-on-edit if a formatter exists, a project verification workflow if tests exist, an AGENTS.md note for anything from the gap-fill answers that's a guideline rather than a workflow. For each, pick the artifact type that fits, **constrained by the Phase 1 skills+hooks choice**: **Synthesize a proposal from Phase 2 findings** — e.g., format-on-edit if a formatter exists, a project verification workflow if tests exist, a CLAUDE.md note for anything from the gap-fill answers that's a guideline rather than a workflow. For each, pick the artifact type that fits, **constrained by the Phase 1 skills+hooks choice**:
- **Hook** (stricter) — deterministic shell command on a tool event; Claude can't skip it. Fits mechanical, fast, per-edit steps: formatting, linting, running a quick test on the changed file. - **Hook** (stricter) — deterministic shell command on a tool event; Claude can't skip it. Fits mechanical, fast, per-edit steps: formatting, linting, running a quick test on the changed file.
- **Skill** (on-demand) — you or Claude invoke \`/skill-name\` when you want it. Fits workflows that don't belong on every edit: deep verification, session reports, deploys. - **Skill** (on-demand) — you or Claude invoke \`/skill-name\` when you want it. Fits workflows that don't belong on every edit: deep verification, session reports, deploys.
- **AGENTS.md note** (looser) — influences Claude's behavior but not enforced. Fits communication/thinking preferences: "plan before coding", "be terse", "explain tradeoffs". - **CLAUDE.md note** (looser) — influences Claude's behavior but not enforced. Fits communication/thinking preferences: "plan before coding", "be terse", "explain tradeoffs".
**Respect Phase 1's skills+hooks choice as a hard filter**: if the user picked "Skills only", downgrade any hook you'd suggest to a skill or an AGENTS.md note. If "Hooks only", downgrade skills to hooks (where mechanically possible) or notes. If "Neither", everything becomes an AGENTS.md note. Never propose an artifact type the user didn't opt into. **Respect Phase 1's skills+hooks choice as a hard filter**: if the user picked "Skills only", downgrade any hook you'd suggest to a skill or a CLAUDE.md note. If "Hooks only", downgrade skills to hooks (where mechanically possible) or notes. If "Neither", everything becomes a CLAUDE.md note. Never propose an artifact type the user didn't opt into.
**Show the proposal via AskUserQuestion's \`preview\` field, not as a separate text message** — the dialog overlays your output, so preceding text is hidden. The \`preview\` field renders markdown in a side-panel (like plan mode); the \`question\` field is plain-text-only. Structure it as: **Show the proposal via AskUserQuestion's \`preview\` field, not as a separate text message** — the dialog overlays your output, so preceding text is hidden. The \`preview\` field renders markdown in a side-panel (like plan mode); the \`question\` field is plain-text-only. Structure it as:
@@ -85,19 +86,17 @@ If the user chose personal CLAUDE.local.md or both: ask about them, not the code
• **Format-on-edit hook** (automatic) — \`ruff format <file>\` via PostToolUse • **Format-on-edit hook** (automatic) — \`ruff format <file>\` via PostToolUse
• **Verification workflow** (on-demand) — \`make lint && make typecheck && make test\` • **Verification workflow** (on-demand) — \`make lint && make typecheck && make test\`
• **AGENTS.md note** (guideline) — "run lint/typecheck/test before marking done" • **CLAUDE.md note** (guideline) — "run lint/typecheck/test before marking done"
- Option labels stay short ("Looks good", "Drop the hook", "Drop the skill") — the tool auto-adds an "Other" free-text option, so don't add your own catch-all. - Option labels stay short ("Looks good", "Drop the hook", "Drop the skill") — the tool auto-adds an "Other" free-text option, so don't add your own catch-all.
**Build the preference queue** from the accepted proposal. Each entry: {type: hook|skill|note, description, target file, any Phase-2-sourced details like the actual test/format command}. Phases 4-7 consume this queue. **Build the preference queue** from the accepted proposal. Each entry: {type: hook|skill|note, description, target file, any Phase-2-sourced details like the actual test/format command}. Phases 4-7 consume this queue.
## Phase 4: Write AGENTS.md (if user chose project or both) ## Phase 4: Write CLAUDE.md (if user chose project or both)
Write a minimal AGENTS.md at the project root. Every line must pass this test: "Would removing this cause Claude to make mistakes?" If no, cut it. Write a minimal CLAUDE.md at the project root. Every line must pass this test: "Would removing this cause Claude to make mistakes?" If no, cut it.
If the repo already has a checked-in root \`CLAUDE.md\` and does NOT already have a root \`AGENTS.md\`, do NOT silently create a second root instruction file. In that case, update the existing root \`CLAUDE.md\` in place by default. Only create or migrate to root \`AGENTS.md\` if the user explicitly asks to migrate. **Consume \`note\` entries from the Phase 3 preference queue whose target is CLAUDE.md** (team-level notes) — add each as a concise line in the most relevant section. These are the behaviors the user wants Claude to follow but didn't need guaranteed (e.g., "propose a plan before implementing", "explain the tradeoffs when refactoring"). Leave personal-targeted notes for Phase 5.
**Consume \`note\` entries from the Phase 3 preference queue whose target is AGENTS.md** (team-level notes) — add each as a concise line in the most relevant section. These are the behaviors the user wants Claude to follow but didn't need guaranteed (e.g., "propose a plan before implementing", "explain the tradeoffs when refactoring"). Leave personal-targeted notes for Phase 5.
Include: Include:
- Build/test/lint commands Claude can't guess (non-standard scripts, flags, or sequences) - Build/test/lint commands Claude can't guess (non-standard scripts, flags, or sequences)
@@ -112,7 +111,7 @@ Exclude:
- File-by-file structure or component lists (Claude can discover these by reading the codebase) - File-by-file structure or component lists (Claude can discover these by reading the codebase)
- Standard language conventions Claude already knows - Standard language conventions Claude already knows
- Generic advice ("write clean code", "handle errors") - Generic advice ("write clean code", "handle errors")
- Detailed API docs or long references — use \`@path/to/import\` syntax instead (e.g., \`@docs/api-reference.md\`) to inline content on demand without bloating AGENTS.md - Detailed API docs or long references — use \`@path/to/import\` syntax instead (e.g., \`@docs/api-reference.md\`) to inline content on demand without bloating CLAUDE.md
- Information that changes frequently — reference the source with \`@path/to/import\` so Claude always reads the current version - Information that changes frequently — reference the source with \`@path/to/import\` so Claude always reads the current version
- Long tutorials or walkthroughs (move to a separate file and reference with \`@path/to/import\`, or put in a skill) - Long tutorials or walkthroughs (move to a separate file and reference with \`@path/to/import\`, or put in a skill)
- Commands obvious from manifest files (e.g., standard "npm test", "cargo test", "pytest") - Commands obvious from manifest files (e.g., standard "npm test", "cargo test", "pytest")
@@ -124,20 +123,20 @@ Do not repeat yourself and do not make up sections like "Common Development Task
Prefix the file with: Prefix the file with:
\`\`\` \`\`\`
# AGENTS.md # CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
\`\`\` \`\`\`
If AGENTS.md already exists: read it, propose specific changes as diffs, and explain why each change improves it. Do not silently overwrite. If CLAUDE.md already exists: read it, propose specific changes as diffs, and explain why each change improves it. Do not silently overwrite.
For projects with multiple concerns, suggest organizing instructions into \`.claude/rules/\` as separate focused files (e.g., \`code-style.md\`, \`testing.md\`, \`security.md\`). These are loaded automatically alongside AGENTS.md and can be scoped to specific file paths using \`paths\` frontmatter. For projects with multiple concerns, suggest organizing instructions into \`.claude/rules/\` as separate focused files (e.g., \`code-style.md\`, \`testing.md\`, \`security.md\`). These are loaded automatically alongside CLAUDE.md and can be scoped to specific file paths using \`paths\` frontmatter.
For projects with distinct subdirectories (monorepos, multi-module projects, etc.): mention that subdirectory AGENTS.md files can be added for module-specific instructions (they're loaded automatically when Claude works in those directories). Offer to create them if the user wants. For projects with distinct subdirectories (monorepos, multi-module projects, etc.): mention that subdirectory CLAUDE.md files can be added for module-specific instructions (they're loaded automatically when Claude works in those directories). Offer to create them if the user wants.
## Phase 5: Write CLAUDE.local.md (if user chose personal or both) ## Phase 5: Write CLAUDE.local.md (if user chose personal or both)
Write a minimal CLAUDE.local.md at the project root. This file is automatically loaded alongside AGENTS.md. After creating it, add \`CLAUDE.local.md\` to the project's .gitignore so it stays private. Write a minimal CLAUDE.local.md at the project root. This file is automatically loaded alongside CLAUDE.md. After creating it, add \`CLAUDE.local.md\` to the project's .gitignore so it stays private.
**Consume \`note\` entries from the Phase 3 preference queue whose target is CLAUDE.local.md** (personal-level notes) — add each as a concise line. If the user chose personal-only in Phase 1, this is the sole consumer of note entries. **Consume \`note\` entries from the Phase 3 preference queue whose target is CLAUDE.local.md** (personal-level notes) — add each as a concise line. If the user chose personal-only in Phase 1, this is the sole consumer of note entries.
@@ -148,7 +147,7 @@ Include:
Keep it short — only include what would make Claude's responses noticeably better for this user. Keep it short — only include what would make Claude's responses noticeably better for this user.
If Phase 2 found multiple git worktrees and the user confirmed they use sibling/external worktrees (not nested inside the main repo): the upward file walk won't find a single CLAUDE.local.md from all worktrees. Write the actual personal content to \`~/.claude/<project-name>-instructions.md\` and make CLAUDE.local.md a one-line stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. The user can copy this one-line stub to each sibling worktree. Never put this import in the project AGENTS.md. If worktrees are nested inside the main repo (e.g., \`.claude/worktrees/\`), no special handling is needed — the main repo's CLAUDE.local.md is found automatically. If Phase 2 found multiple git worktrees and the user confirmed they use sibling/external worktrees (not nested inside the main repo): the upward file walk won't find a single CLAUDE.local.md from all worktrees. Write the actual personal content to \`~/.claude/<project-name>-instructions.md\` and make CLAUDE.local.md a one-line stub that imports it: \`@~/.claude/<project-name>-instructions.md\`. The user can copy this one-line stub to each sibling worktree. Never put this import in the project CLAUDE.md. If worktrees are nested inside the main repo (e.g., \`.claude/worktrees/\`), no special handling is needed — the main repo's CLAUDE.local.md is found automatically.
If CLAUDE.local.md already exists: read it, propose specific additions, and do not silently overwrite. If CLAUDE.local.md already exists: read it, propose specific additions, and do not silently overwrite.
@@ -184,7 +183,7 @@ Both the user (\`/<skill-name>\`) and Claude can invoke skills by default. For w
## Phase 7: Suggest additional optimizations ## Phase 7: Suggest additional optimizations
Tell the user you're going to suggest a few additional optimizations now that AGENTS.md and skills (if chosen) are in place. Tell the user you're going to suggest a few additional optimizations now that CLAUDE.md and skills (if chosen) are in place.
Check the environment and ask about each gap you find (use AskUserQuestion): Check the environment and ask about each gap you find (use AskUserQuestion):
@@ -196,7 +195,7 @@ Check the environment and ask about each gap you find (use AskUserQuestion):
For each hook preference (from the queue or the formatter fallback): For each hook preference (from the queue or the formatter fallback):
1. Target file: default based on the Phase 1 instruction-file choice — project → \`.claude/settings.json\` (team-shared, committed); personal → \`.claude/settings.local.json\`. Only ask if the user chose "both" in Phase 1 or the preference is ambiguous. Ask once for all hooks, not per-hook. 1. Target file: default based on the Phase 1 CLAUDE.md choice — project → \`.claude/settings.json\` (team-shared, committed); personal → \`.claude/settings.local.json\`. Only ask if the user chose "both" in Phase 1 or the preference is ambiguous. Ask once for all hooks, not per-hook.
2. Pick the event and matcher from the preference: 2. Pick the event and matcher from the preference:
- "after every edit" → \`PostToolUse\` with matcher \`Write|Edit\` - "after every edit" → \`PostToolUse\` with matcher \`Write|Edit\`
@@ -228,9 +227,11 @@ const command = {
type: 'prompt', type: 'prompt',
name: 'init', name: 'init',
get description() { get description() {
return isNewInitEnabled() return feature('NEW_INIT') &&
? 'Initialize new project instruction file(s) and optional skills/hooks with codebase documentation' (process.env.USER_TYPE === 'ant' ||
: 'Initialize a new project instruction file with codebase documentation' isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT))
? 'Initialize new CLAUDE.md file(s) and optional skills/hooks with codebase documentation'
: 'Initialize a new CLAUDE.md file with codebase documentation'
}, },
contentLength: 0, // Dynamic content contentLength: 0, // Dynamic content
progressMessage: 'analyzing your codebase', progressMessage: 'analyzing your codebase',
@@ -241,7 +242,12 @@ const command = {
return [ return [
{ {
type: 'text', type: 'text',
text: isNewInitEnabled() ? NEW_INIT_PROMPT : OLD_INIT_PROMPT, text:
feature('NEW_INIT') &&
(process.env.USER_TYPE === 'ant' ||
isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT))
? NEW_INIT_PROMPT
: OLD_INIT_PROMPT,
}, },
] ]
}, },

View File

@@ -1,13 +0,0 @@
import { feature } from 'bun:bundle'
import { isEnvTruthy } from '../utils/envUtils.js'
export function isNewInitEnabled(): boolean {
if (feature('NEW_INIT')) {
return (
process.env.USER_TYPE === 'ant' ||
isEnvTruthy(process.env.CLAUDE_CODE_NEW_INIT)
)
}
return false
}

View File

@@ -1,12 +1,17 @@
import { execFileSync } from 'child_process' import { execFileSync } from 'child_process'
import { diffLines } from 'diff' import { diffLines } from 'diff'
import { constants as fsConstants } from 'fs'
import { import {
copyFile,
mkdir, mkdir,
mkdtemp,
readdir, readdir,
readFile, readFile,
rm,
unlink, unlink,
writeFile, writeFile,
} from 'fs/promises' } from 'fs/promises'
import { tmpdir } from 'os'
import { extname, join } from 'path' import { extname, join } from 'path'
import type { Command } from '../commands.js' import type { Command } from '../commands.js'
import { queryWithModel } from '../services/api/claude.js' import { queryWithModel } from '../services/api/claude.js'
@@ -17,6 +22,7 @@ import {
import type { LogOption } from '../types/logs.js' import type { LogOption } from '../types/logs.js'
import { getClaudeConfigHomeDir } from '../utils/envUtils.js' import { getClaudeConfigHomeDir } from '../utils/envUtils.js'
import { toError } from '../utils/errors.js' import { toError } from '../utils/errors.js'
import { execFileNoThrow } from '../utils/execFileNoThrow.js'
import { logError } from '../utils/log.js' import { logError } from '../utils/log.js'
import { extractTextContent } from '../utils/messages.js' import { extractTextContent } from '../utils/messages.js'
import { getDefaultOpusModel } from '../utils/model/model.js' import { getDefaultOpusModel } from '../utils/model/model.js'
@@ -41,6 +47,180 @@ function getInsightsModel(): string {
return getDefaultOpusModel() return getDefaultOpusModel()
} }
// ============================================================================
// Homespace Data Collection
// ============================================================================
type RemoteHostInfo = {
name: string
sessionCount: number
}
/* eslint-disable custom-rules/no-process-env-top-level */
const getRunningRemoteHosts: () => Promise<string[]> =
process.env.USER_TYPE === 'ant'
? async () => {
const { stdout, code } = await execFileNoThrow(
'coder',
['list', '-o', 'json'],
{ timeout: 30000 },
)
if (code !== 0) return []
try {
const workspaces = jsonParse(stdout) as Array<{
name: string
latest_build?: { status?: string }
}>
return workspaces
.filter(w => w.latest_build?.status === 'running')
.map(w => w.name)
} catch {
return []
}
}
: async () => []
const getRemoteHostSessionCount: (hs: string) => Promise<number> =
process.env.USER_TYPE === 'ant'
? async (homespace: string) => {
const { stdout, code } = await execFileNoThrow(
'ssh',
[
`${homespace}.coder`,
'find /root/.claude/projects -name "*.jsonl" 2>/dev/null | wc -l',
],
{ timeout: 30000 },
)
if (code !== 0) return 0
return parseInt(stdout.trim(), 10) || 0
}
: async () => 0
const collectFromRemoteHost: (
hs: string,
destDir: string,
) => Promise<{ copied: number; skipped: number }> =
process.env.USER_TYPE === 'ant'
? async (homespace: string, destDir: string) => {
const result = { copied: 0, skipped: 0 }
// Create temp directory
const tempDir = await mkdtemp(join(tmpdir(), 'claude-hs-'))
try {
// SCP the projects folder
const scpResult = await execFileNoThrow(
'scp',
['-rq', `${homespace}.coder:/root/.claude/projects/`, tempDir],
{ timeout: 300000 },
)
if (scpResult.code !== 0) {
// SCP failed
return result
}
const projectsDir = join(tempDir, 'projects')
let projectDirents: Awaited<ReturnType<typeof readdir>>
try {
projectDirents = await readdir(projectsDir, { withFileTypes: true })
} catch {
return result
}
// Merge into destination (parallel per project directory)
await Promise.all(
projectDirents.map(async dirent => {
const projectName = dirent.name
const projectPath = join(projectsDir, projectName)
// Skip if not a directory
if (!dirent.isDirectory()) return
const destProjectName = `${projectName}__${homespace}`
const destProjectPath = join(destDir, destProjectName)
try {
await mkdir(destProjectPath, { recursive: true })
} catch {
// Directory may already exist
}
// Copy session files (skip existing)
let files: Awaited<ReturnType<typeof readdir>>
try {
files = await readdir(projectPath, { withFileTypes: true })
} catch {
return
}
await Promise.all(
files.map(async fileDirent => {
const fileName = fileDirent.name
if (!fileName.endsWith('.jsonl')) return
const srcFile = join(projectPath, fileName)
const destFile = join(destProjectPath, fileName)
try {
await copyFile(srcFile, destFile, fsConstants.COPYFILE_EXCL)
result.copied++
} catch {
// EEXIST from COPYFILE_EXCL means dest already exists
result.skipped++
}
}),
)
}),
)
} finally {
try {
await rm(tempDir, { recursive: true, force: true })
} catch {
// Ignore cleanup errors
}
}
return result
}
: async () => ({ copied: 0, skipped: 0 })
const collectAllRemoteHostData: (destDir: string) => Promise<{
hosts: RemoteHostInfo[]
totalCopied: number
totalSkipped: number
}> =
process.env.USER_TYPE === 'ant'
? async (destDir: string) => {
const rHosts = await getRunningRemoteHosts()
const result: RemoteHostInfo[] = []
let totalCopied = 0
let totalSkipped = 0
// Collect from all hosts in parallel (SCP per host can take seconds)
const hostResults = await Promise.all(
rHosts.map(async hs => {
const sessionCount = await getRemoteHostSessionCount(hs)
if (sessionCount > 0) {
const { copied, skipped } = await collectFromRemoteHost(
hs,
destDir,
)
return { name: hs, sessionCount, copied, skipped }
}
return { name: hs, sessionCount, copied: 0, skipped: 0 }
}),
)
for (const hr of hostResults) {
result.push({ name: hr.name, sessionCount: hr.sessionCount })
totalCopied += hr.copied
totalSkipped += hr.skipped
}
return { hosts: result, totalCopied, totalSkipped }
}
: async () => ({ hosts: [], totalCopied: 0, totalSkipped: 0 })
/* eslint-enable custom-rules/no-process-env-top-level */
// ============================================================================ // ============================================================================
// Types // Types
// ============================================================================ // ============================================================================
@@ -2479,6 +2659,7 @@ export type InsightsExport = {
claude_code_version: string claude_code_version: string
date_range: { start: string; end: string } date_range: { start: string; end: string }
session_count: number session_count: number
remote_hosts_collected?: string[]
} }
aggregated_data: AggregatedData aggregated_data: AggregatedData
insights: InsightResults insights: InsightResults
@@ -2499,9 +2680,14 @@ export function buildExportData(
data: AggregatedData, data: AggregatedData,
insights: InsightResults, insights: InsightResults,
facets: Map<string, SessionFacets>, facets: Map<string, SessionFacets>,
remoteStats?: { hosts: RemoteHostInfo[]; totalCopied: number },
): InsightsExport { ): InsightsExport {
const version = typeof MACRO !== 'undefined' ? MACRO.VERSION : 'unknown' const version = typeof MACRO !== 'undefined' ? MACRO.VERSION : 'unknown'
const remote_hosts_collected = remoteStats?.hosts
.filter(h => h.sessionCount > 0)
.map(h => h.name)
const facets_summary = { const facets_summary = {
total: facets.size, total: facets.size,
goal_categories: {} as Record<string, number>, goal_categories: {} as Record<string, number>,
@@ -2539,6 +2725,10 @@ export function buildExportData(
claude_code_version: version, claude_code_version: version,
date_range: data.date_range, date_range: data.date_range,
session_count: data.total_sessions, session_count: data.total_sessions,
...(remote_hosts_collected &&
remote_hosts_collected.length > 0 && {
remote_hosts_collected,
}),
}, },
aggregated_data: data, aggregated_data: data,
insights, insights,
@@ -2603,12 +2793,24 @@ async function scanAllSessions(): Promise<LiteSessionInfo[]> {
// Main Function // Main Function
// ============================================================================ // ============================================================================
export async function generateUsageReport(): Promise<{ export async function generateUsageReport(options?: {
collectRemote?: boolean
}): Promise<{
insights: InsightResults insights: InsightResults
htmlPath: string htmlPath: string
data: AggregatedData data: AggregatedData
remoteStats?: { hosts: RemoteHostInfo[]; totalCopied: number }
facets: Map<string, SessionFacets> facets: Map<string, SessionFacets>
}> { }> {
let remoteStats: { hosts: RemoteHostInfo[]; totalCopied: number } | undefined
// Optionally collect data from remote hosts first (internal-only)
if (process.env.USER_TYPE === 'ant' && options?.collectRemote) {
const destDir = join(getClaudeConfigHomeDir(), 'projects')
const { hosts, totalCopied } = await collectAllRemoteHostData(destDir)
remoteStats = { hosts, totalCopied }
}
// Phase 1: Lite scan — filesystem metadata only (no JSONL parsing) // Phase 1: Lite scan — filesystem metadata only (no JSONL parsing)
const allScannedSessions = await scanAllSessions() const allScannedSessions = await scanAllSessions()
const totalSessionsScanned = allScannedSessions.length const totalSessionsScanned = allScannedSessions.length
@@ -2815,6 +3017,7 @@ export async function generateUsageReport(): Promise<{
insights, insights,
htmlPath, htmlPath,
data: aggregated, data: aggregated,
remoteStats,
facets: substantiveFacets, facets: substantiveFacets,
} }
} }
@@ -2840,8 +3043,31 @@ const usageReport: Command = {
contentLength: 0, // Dynamic content contentLength: 0, // Dynamic content
progressMessage: 'analyzing your sessions', progressMessage: 'analyzing your sessions',
source: 'builtin', source: 'builtin',
async getPromptForCommand(_args) { async getPromptForCommand(args) {
const { insights, htmlPath, data } = await generateUsageReport() let collectRemote = false
let remoteHosts: string[] = []
let hasRemoteHosts = false
if (process.env.USER_TYPE === 'ant') {
// Parse --homespaces flag
collectRemote = args?.includes('--homespaces') ?? false
// Check for available remote hosts
remoteHosts = await getRunningRemoteHosts()
hasRemoteHosts = remoteHosts.length > 0
// Show collection message if collecting
if (collectRemote && hasRemoteHosts) {
// biome-ignore lint/suspicious/noConsole: intentional
console.error(
`Collecting sessions from ${remoteHosts.length} homespace(s): ${remoteHosts.join(', ')}...`,
)
}
}
const { insights, htmlPath, data, remoteStats } = await generateUsageReport(
{ collectRemote },
)
let reportUrl = `file://${htmlPath}` let reportUrl = `file://${htmlPath}`
let uploadHint = '' let uploadHint = ''
@@ -2859,6 +3085,20 @@ const usageReport: Command = {
`${data.git_commits} commits`, `${data.git_commits} commits`,
].join(' · ') ].join(' · ')
// Build remote host info (internal-only)
let remoteInfo = ''
if (process.env.USER_TYPE === 'ant') {
if (remoteStats && remoteStats.totalCopied > 0) {
const hsNames = remoteStats.hosts
.filter(h => h.sessionCount > 0)
.map(h => h.name)
.join(', ')
remoteInfo = `\n_Collected ${remoteStats.totalCopied} new sessions from: ${hsNames}_\n`
} else if (!collectRemote && hasRemoteHosts) {
// Suggest using --homespaces if they have remote hosts but didn't use the flag
remoteInfo = `\n_Tip: Run \`/insights --homespaces\` to include sessions from your ${remoteHosts.length} running homespace(s)_\n`
}
}
// Build markdown summary from insights // Build markdown summary from insights
const atAGlance = insights.at_a_glance const atAGlance = insights.at_a_glance
@@ -2878,6 +3118,7 @@ ${atAGlance.ambitious_workflows ? `**Ambitious workflows:** ${atAGlance.ambitiou
${stats} ${stats}
${data.date_range.start} to ${data.date_range.end} ${data.date_range.start} to ${data.date_range.end}
${remoteInfo}
` `
const userSummary = `${header}${summaryText} const userSummary = `${header}${summaryText}

View File

@@ -1,28 +1,20 @@
import { PassThrough } from 'node:stream' import { PassThrough } from 'node:stream'
import { afterEach, expect, mock, test } from 'bun:test' import { expect, test } from 'bun:test'
import React from 'react' import React from 'react'
import stripAnsi from 'strip-ansi' import stripAnsi from 'strip-ansi'
import { createRoot, render, useApp } from '../../ink.js' import { createRoot, render, useApp } from '../../ink.js'
import { AppStateProvider } from '../../state/AppState.js' import { AppStateProvider } from '../../state/AppState.js'
import { import {
applySavedProfileToCurrentSession,
buildCodexOAuthProfileEnv,
buildCurrentProviderSummary, buildCurrentProviderSummary,
buildProfileSaveMessage, buildProfileSaveMessage,
getProviderWizardDefaults, getProviderWizardDefaults,
ProviderWizard,
TextEntryDialog, TextEntryDialog,
} from './provider.js' } from './provider.js'
import { createProfileFile } from '../../utils/providerProfile.js'
const SYNC_START = '\x1B[?2026h' const SYNC_START = '\x1B[?2026h'
const SYNC_END = '\x1B[?2026l' const SYNC_END = '\x1B[?2026l'
const ORIGINAL_SIMPLE_ENV = process.env.CLAUDE_CODE_SIMPLE
const ORIGINAL_CODEX_API_KEY = process.env.CODEX_API_KEY
const ORIGINAL_CHATGPT_ACCOUNT_ID = process.env.CHATGPT_ACCOUNT_ID
const ORIGINAL_CODEX_ACCOUNT_ID = process.env.CODEX_ACCOUNT_ID
function extractLastFrame(output: string): string { function extractLastFrame(output: string): string {
let lastFrame: string | null = null let lastFrame: string | null = null
@@ -68,51 +60,6 @@ async function renderFinalFrame(node: React.ReactNode): Promise<string> {
return stripAnsi(extractLastFrame(getOutput())) return stripAnsi(extractLastFrame(getOutput()))
} }
async function waitForOutput(
getOutput: () => string,
predicate: (output: string) => boolean,
timeoutMs = 2500,
): Promise<string> {
const startedAt = Date.now()
while (Date.now() - startedAt < timeoutMs) {
const output = stripAnsi(extractLastFrame(getOutput()))
if (predicate(output)) {
return output
}
await Bun.sleep(10)
}
throw new Error('Timed out waiting for ProviderWizard test output')
}
async function renderProviderWizardFrame(): Promise<string> {
const { stdout, stdin, getOutput } = createTestStreams()
const root = await createRoot({
stdout: stdout as unknown as NodeJS.WriteStream,
stdin: stdin as unknown as NodeJS.ReadStream,
patchConsole: false,
})
root.render(
<AppStateProvider>
<ProviderWizard onDone={() => {}} />
</AppStateProvider>,
)
try {
return await waitForOutput(
getOutput,
output => output.includes('Set up a provider profile'),
)
} finally {
root.unmount()
stdin.end()
stdout.end()
await Bun.sleep(0)
}
}
function createTestStreams(): { function createTestStreams(): {
stdout: PassThrough stdout: PassThrough
stdin: PassThrough & { stdin: PassThrough & {
@@ -147,34 +94,6 @@ function createTestStreams(): {
} }
} }
afterEach(() => {
mock.restore()
if (ORIGINAL_SIMPLE_ENV === undefined) {
delete process.env.CLAUDE_CODE_SIMPLE
} else {
process.env.CLAUDE_CODE_SIMPLE = ORIGINAL_SIMPLE_ENV
}
if (ORIGINAL_CODEX_API_KEY === undefined) {
delete process.env.CODEX_API_KEY
} else {
process.env.CODEX_API_KEY = ORIGINAL_CODEX_API_KEY
}
if (ORIGINAL_CHATGPT_ACCOUNT_ID === undefined) {
delete process.env.CHATGPT_ACCOUNT_ID
} else {
process.env.CHATGPT_ACCOUNT_ID = ORIGINAL_CHATGPT_ACCOUNT_ID
}
if (ORIGINAL_CODEX_ACCOUNT_ID === undefined) {
delete process.env.CODEX_ACCOUNT_ID
} else {
process.env.CODEX_ACCOUNT_ID = ORIGINAL_CODEX_ACCOUNT_ID
}
})
function StepChangeHarness(): React.ReactNode { function StepChangeHarness(): React.ReactNode {
const { exit } = useApp() const { exit } = useApp()
const [step, setStep] = React.useState<'api' | 'model'>('api') const [step, setStep] = React.useState<'api' | 'model'>('api')
@@ -314,167 +233,6 @@ test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly',
expect(message).not.toContain('AIza') expect(message).not.toContain('AIza')
}) })
test('buildProfileSaveMessage reflects immediate Codex activation for existing credentials', () => {
const message = buildProfileSaveMessage(
'codex',
{
OPENAI_MODEL: 'codexplan',
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
CHATGPT_ACCOUNT_ID: 'acct_codex',
},
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
{
activatedInSession: true,
},
)
expect(message).toContain('Saved Codex profile.')
expect(message).toContain('OpenClaude switched to it for this session.')
expect(message).not.toContain('Restart OpenClaude to use it.')
})
test('buildProfileSaveMessage reflects immediate Codex OAuth activation when the session switched successfully', () => {
const message = buildProfileSaveMessage(
'codex',
{
OPENAI_MODEL: 'codexplan',
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
CHATGPT_ACCOUNT_ID: 'acct_codex',
CODEX_CREDENTIAL_SOURCE: 'oauth',
},
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
{
activatedInSession: true,
},
)
expect(message).toContain('Saved Codex profile.')
expect(message).toContain('OpenClaude switched to it for this session.')
expect(message).not.toContain('Restart OpenClaude to use it.')
})
test('buildCodexOAuthProfileEnv uses the fresh OAuth account id without persisting an API key', () => {
process.env.CODEX_API_KEY = 'stale-codex-key'
process.env.CHATGPT_ACCOUNT_ID = 'acct_stale'
const env = buildCodexOAuthProfileEnv({
accessToken: 'oauth-access-token',
accountId: 'acct_oauth',
})
expect(env).toEqual({
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
CHATGPT_ACCOUNT_ID: 'acct_oauth',
CODEX_CREDENTIAL_SOURCE: 'oauth',
})
expect(env).not.toHaveProperty('CODEX_API_KEY')
})
test('buildCodexProfileEnv derives oauth source from secure storage when no explicit source is provided', async () => {
const actualProviderConfig = await import('../../services/api/providerConfig.js')
mock.module('../../services/api/providerConfig.js', () => ({
...actualProviderConfig,
resolveCodexApiCredentials: () => ({
apiKey: 'stored-access-token',
accountId: 'acct_secure_storage',
source: 'secure-storage' as const,
}),
}))
// @ts-expect-error cache-busting query string for Bun module mocks
const { buildCodexProfileEnv } = await import(
'../../utils/providerProfile.js?secure-storage-codex-source'
)
const env = buildCodexProfileEnv({
model: 'codexplan',
processEnv: {},
})
expect(env).toEqual({
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
CHATGPT_ACCOUNT_ID: 'acct_secure_storage',
CODEX_CREDENTIAL_SOURCE: 'oauth',
})
})
test('explicitly declared env takes precedence over applySavedProfileToCurrentSession', async () => {
// @ts-expect-error cache-busting query string for Bun module mocks
const { applySavedProfileToCurrentSession } = await import(
'../../utils/providerProfile.js?apply-saved-profile-codex'
)
const processEnv: NodeJS.ProcessEnv = {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_API_KEY: 'sk-openai',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_codex',
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID: 'provider_old',
}
const profileFile = createProfileFile('codex', {
OPENAI_MODEL: 'codexplan',
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_codex',
})
const warning = await applySavedProfileToCurrentSession({
profileFile,
processEnv,
})
expect(warning).toBeNull()
expect(processEnv.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(processEnv.OPENAI_MODEL).toBe('gpt-4o')
expect(processEnv.OPENAI_BASE_URL).toBe(
"https://api.openai.com/v1",
)
expect(processEnv.CODEX_API_KEY).toBeUndefined()
expect(processEnv.CHATGPT_ACCOUNT_ID).toBeUndefined()
expect(processEnv.OPENAI_API_KEY).toBe("sk-openai")
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBeUndefined()
})
test('explicitly declared env takes precedence over applySavedProfileToCurrentSession', async () => {
// @ts-expect-error cache-busting query string for Bun module mocks
const { applySavedProfileToCurrentSession } = await import(
'../../utils/providerProfile.js?apply-saved-profile-codex-oauth'
)
const processEnv: NodeJS.ProcessEnv = {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_BASE_URL: 'https://api.openai.com/v1',
CODEX_API_KEY: 'stale-codex-key',
CHATGPT_ACCOUNT_ID: 'acct_stale',
}
const profileFile = createProfileFile('codex', {
OPENAI_MODEL: 'codexplan',
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
CHATGPT_ACCOUNT_ID: 'acct_oauth',
CODEX_CREDENTIAL_SOURCE: 'oauth',
})
const warning = await applySavedProfileToCurrentSession({
profileFile,
processEnv,
})
expect(warning).not.toBeUndefined()
expect(processEnv.OPENAI_MODEL).toBe('gpt-4o')
expect(processEnv.OPENAI_BASE_URL).toBe(
"https://api.openai.com/v1",
)
expect(processEnv.CODEX_API_KEY).toBe("stale-codex-key")
expect(processEnv.CHATGPT_ACCOUNT_ID).toBe('acct_stale')
expect(processEnv.CHATGPT_ACCOUNT_ID).toBeTruthy()
})
test('buildCurrentProviderSummary redacts poisoned model and endpoint values', () => { test('buildCurrentProviderSummary redacts poisoned model and endpoint values', () => {
const summary = buildCurrentProviderSummary({ const summary = buildCurrentProviderSummary({
processEnv: { processEnv: {
@@ -487,8 +245,8 @@ test('buildCurrentProviderSummary redacts poisoned model and endpoint values', (
}) })
expect(summary.providerLabel).toBe('OpenAI-compatible') expect(summary.providerLabel).toBe('OpenAI-compatible')
expect(summary.modelLabel).toBe('sk-...678') expect(summary.modelLabel).toBe('sk-...5678')
expect(summary.endpointLabel).toBe('sk-...678') expect(summary.endpointLabel).toBe('sk-...5678')
}) })
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => { test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
@@ -506,7 +264,7 @@ test('buildCurrentProviderSummary labels generic local openai-compatible provide
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1') expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
}) })
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex when custom base URL is set', () => { test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => {
const summary = buildCurrentProviderSummary({ const summary = buildCurrentProviderSummary({
processEnv: { processEnv: {
CLAUDE_CODE_USE_OPENAI: '1', CLAUDE_CODE_USE_OPENAI: '1',
@@ -549,12 +307,3 @@ test('getProviderWizardDefaults ignores poisoned current provider values', () =>
expect(defaults.openAIBaseUrl).toBe('https://api.openai.com/v1') expect(defaults.openAIBaseUrl).toBe('https://api.openai.com/v1')
expect(defaults.geminiModel).toBe('gemini-2.0-flash') expect(defaults.geminiModel).toBe('gemini-2.0-flash')
}) })
test('ProviderWizard hides Codex OAuth while running in bare mode', async () => {
process.env.CLAUDE_CODE_SIMPLE = '1'
const output = await renderProviderWizardFrame()
expect(output).toContain('Set up a provider profile')
expect(output).not.toContain('Codex OAuth')
})

View File

@@ -10,12 +10,8 @@ import {
} from '../../components/CustomSelect/index.js' } from '../../components/CustomSelect/index.js'
import { Dialog } from '../../components/design-system/Dialog.js' import { Dialog } from '../../components/design-system/Dialog.js'
import { LoadingState } from '../../components/design-system/LoadingState.js' import { LoadingState } from '../../components/design-system/LoadingState.js'
import { useCodexOAuthFlow } from '../../components/useCodexOAuthFlow.js'
import { useTerminalSize } from '../../hooks/useTerminalSize.js' import { useTerminalSize } from '../../hooks/useTerminalSize.js'
import { Box, Text } from '../../ink.js' import { Box, Text } from '../../ink.js'
import {
type CodexOAuthTokens,
} from '../../services/api/codexOAuth.js'
import { import {
DEFAULT_CODEX_BASE_URL, DEFAULT_CODEX_BASE_URL,
DEFAULT_OPENAI_BASE_URL, DEFAULT_OPENAI_BASE_URL,
@@ -24,8 +20,6 @@ import {
resolveProviderRequest, resolveProviderRequest,
} from '../../services/api/providerConfig.js' } from '../../services/api/providerConfig.js'
import { import {
applySavedProfileToCurrentSession as applySharedProfileToCurrentSession,
buildCodexOAuthProfileEnv as buildSharedCodexOAuthProfileEnv,
buildCodexProfileEnv, buildCodexProfileEnv,
buildGeminiProfileEnv, buildGeminiProfileEnv,
buildMistralProfileEnv, buildMistralProfileEnv,
@@ -55,7 +49,6 @@ import {
readGeminiAccessToken, readGeminiAccessToken,
saveGeminiAccessToken, saveGeminiAccessToken,
} from '../../utils/geminiCredentials.js' } from '../../utils/geminiCredentials.js'
import { isBareMode } from '../../utils/envUtils.js'
import { import {
getGoalDefaultOpenAIModel, getGoalDefaultOpenAIModel,
normalizeRecommendationGoal, normalizeRecommendationGoal,
@@ -64,47 +57,12 @@ import {
type RecommendationGoal, type RecommendationGoal,
} from '../../utils/providerRecommendation.js' } from '../../utils/providerRecommendation.js'
import { import {
getOllamaChatBaseUrl,
getLocalOpenAICompatibleProviderLabel, getLocalOpenAICompatibleProviderLabel,
probeOllamaGenerationReadiness, hasLocalOllama,
type OllamaGenerationReadiness, listOllamaModels,
} from '../../utils/providerDiscovery.js' } from '../../utils/providerDiscovery.js'
function describeOllamaReadinessIssue( type ProviderChoice = 'auto' | ProviderProfile | 'clear'
readiness: OllamaGenerationReadiness,
options?: {
baseUrl?: string
allowManualFallback?: boolean
},
): string {
const endpoint = options?.baseUrl ?? 'http://localhost:11434'
if (readiness.state === 'unreachable') {
return `Could not reach Ollama at ${endpoint}. Start Ollama first, then run /provider again.`
}
if (readiness.state === 'no_models') {
const manualSuffix = options?.allowManualFallback
? ', or enter details manually'
: ''
return `Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first${manualSuffix}.`
}
if (readiness.state === 'generation_failed') {
const modelHint = readiness.probeModel ?? 'the selected model'
const detailSuffix = readiness.detail
? ` Details: ${readiness.detail}.`
: ''
const manualSuffix = options?.allowManualFallback
? ' You can also enter details manually.'
: ''
return `Ollama is reachable and models are installed, but a generation probe failed for ${modelHint}.${detailSuffix} Run "ollama run ${modelHint}" once and retry.${manualSuffix}`
}
return ''
}
type ProviderChoice = 'auto' | ProviderProfile | 'codex-oauth' | 'clear'
type Step = type Step =
| { name: 'choose' } | { name: 'choose' }
@@ -135,7 +93,6 @@ type Step =
apiKey?: string apiKey?: string
authMode: 'api-key' | 'access-token' | 'adc' authMode: 'api-key' | 'access-token' | 'adc'
} }
| { name: 'codex-oauth' }
| { name: 'codex-check' } | { name: 'codex-check' }
type CurrentProviderSummary = { type CurrentProviderSummary = {
@@ -174,8 +131,6 @@ type ProviderWizardDefaults = {
mistralBaseUrl: string mistralBaseUrl: string
} }
type SecretSourceEnv = NodeJS.ProcessEnv & Partial<ProfileEnv>
function isEnvTruthy(value: string | undefined): boolean { function isEnvTruthy(value: string | undefined): boolean {
if (!value) return false if (!value) return false
const normalized = value.trim().toLowerCase() const normalized = value.trim().toLowerCase()
@@ -184,7 +139,7 @@ function isEnvTruthy(value: string | undefined): boolean {
function getSafeDisplayValue( function getSafeDisplayValue(
value: string | undefined, value: string | undefined,
processEnv: SecretSourceEnv, processEnv: NodeJS.ProcessEnv,
profileEnv?: ProfileEnv, profileEnv?: ProfileEnv,
fallback = '(not set)', fallback = '(not set)',
): string { ): string {
@@ -196,15 +151,14 @@ function getSafeDisplayValue(
export function getProviderWizardDefaults( export function getProviderWizardDefaults(
processEnv: NodeJS.ProcessEnv = process.env, processEnv: NodeJS.ProcessEnv = process.env,
): ProviderWizardDefaults { ): ProviderWizardDefaults {
const secretSource = processEnv as SecretSourceEnv
const safeOpenAIModel = const safeOpenAIModel =
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) || sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, processEnv) ||
'gpt-4o' 'gpt-4o'
const safeOpenAIBaseUrl = const safeOpenAIBaseUrl =
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) || sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, processEnv) ||
DEFAULT_OPENAI_BASE_URL DEFAULT_OPENAI_BASE_URL
const safeGeminiModel = const safeGeminiModel =
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) || sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, processEnv) ||
DEFAULT_GEMINI_MODEL DEFAULT_GEMINI_MODEL
const safeMistralModel = const safeMistralModel =
sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, processEnv) || sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, processEnv) ||
@@ -227,7 +181,6 @@ export function buildCurrentProviderSummary(options?: {
persisted?: ProfileFile | null persisted?: ProfileFile | null
}): CurrentProviderSummary { }): CurrentProviderSummary {
const processEnv = options?.processEnv ?? process.env const processEnv = options?.processEnv ?? process.env
const secretSource = processEnv as SecretSourceEnv
const persisted = options?.persisted ?? loadProfileFile() const persisted = options?.persisted ?? loadProfileFile()
const savedProfileLabel = persisted?.profile ?? 'none' const savedProfileLabel = persisted?.profile ?? 'none'
@@ -236,11 +189,11 @@ export function buildCurrentProviderSummary(options?: {
providerLabel: 'Google Gemini', providerLabel: 'Google Gemini',
modelLabel: getSafeDisplayValue( modelLabel: getSafeDisplayValue(
processEnv.GEMINI_MODEL ?? DEFAULT_GEMINI_MODEL, processEnv.GEMINI_MODEL ?? DEFAULT_GEMINI_MODEL,
secretSource, processEnv,
), ),
endpointLabel: getSafeDisplayValue( endpointLabel: getSafeDisplayValue(
processEnv.GEMINI_BASE_URL ?? DEFAULT_GEMINI_BASE_URL, processEnv.GEMINI_BASE_URL ?? DEFAULT_GEMINI_BASE_URL,
secretSource, processEnv,
), ),
savedProfileLabel, savedProfileLabel,
} }
@@ -266,13 +219,13 @@ export function buildCurrentProviderSummary(options?: {
providerLabel: 'GitHub Models', providerLabel: 'GitHub Models',
modelLabel: getSafeDisplayValue( modelLabel: getSafeDisplayValue(
processEnv.OPENAI_MODEL ?? 'github:copilot', processEnv.OPENAI_MODEL ?? 'github:copilot',
secretSource, processEnv,
), ),
endpointLabel: getSafeDisplayValue( endpointLabel: getSafeDisplayValue(
processEnv.OPENAI_BASE_URL ?? processEnv.OPENAI_BASE_URL ??
processEnv.OPENAI_API_BASE ?? processEnv.OPENAI_API_BASE ??
'https://models.github.ai/inference', 'https://models.github.ai/inference',
secretSource, processEnv,
), ),
savedProfileLabel, savedProfileLabel,
} }
@@ -293,8 +246,8 @@ export function buildCurrentProviderSummary(options?: {
return { return {
providerLabel, providerLabel,
modelLabel: getSafeDisplayValue(request.requestedModel, secretSource), modelLabel: getSafeDisplayValue(request.requestedModel, processEnv),
endpointLabel: getSafeDisplayValue(request.baseUrl, secretSource), endpointLabel: getSafeDisplayValue(request.baseUrl, processEnv),
savedProfileLabel, savedProfileLabel,
} }
} }
@@ -305,11 +258,11 @@ export function buildCurrentProviderSummary(options?: {
processEnv.ANTHROPIC_MODEL ?? processEnv.ANTHROPIC_MODEL ??
processEnv.CLAUDE_MODEL ?? processEnv.CLAUDE_MODEL ??
'claude-sonnet-4-6', 'claude-sonnet-4-6',
secretSource, processEnv,
), ),
endpointLabel: getSafeDisplayValue( endpointLabel: getSafeDisplayValue(
processEnv.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com', processEnv.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com',
secretSource, processEnv,
), ),
savedProfileLabel, savedProfileLabel,
} }
@@ -423,10 +376,6 @@ export function buildProfileSaveMessage(
profile: ProviderProfile, profile: ProviderProfile,
env: ProfileEnv, env: ProfileEnv,
filePath: string, filePath: string,
options?: {
activatedInSession?: boolean
activationWarning?: string | null
},
): string { ): string {
const summary = buildSavedProfileSummary(profile, env) const summary = buildSavedProfileSummary(profile, env)
const lines = [ const lines = [
@@ -440,24 +389,13 @@ export function buildProfileSaveMessage(
} }
lines.push(`Profile: ${filePath}`) lines.push(`Profile: ${filePath}`)
if (options?.activatedInSession) {
lines.push('OpenClaude switched to it for this session.')
} else if (options?.activationWarning) {
lines.push(
`Saved for next startup. Warning: could not activate it in this session (${options.activationWarning}).`,
)
} else {
lines.push('Restart OpenClaude to use it.') lines.push('Restart OpenClaude to use it.')
}
return lines.join('\n') return lines.join('\n')
} }
function buildUsageText(): string { function buildUsageText(): string {
const summary = buildCurrentProviderSummary() const summary = buildCurrentProviderSummary()
const availableProviders = isBareMode()
? 'Choose Auto, Ollama, OpenAI-compatible, Gemini, or Codex, then save a provider profile.'
: 'Choose Auto, Ollama, OpenAI-compatible, Gemini, Codex, or Codex OAuth, then save a provider profile.'
return [ return [
'Usage: /provider', 'Usage: /provider',
'', '',
@@ -468,7 +406,7 @@ function buildUsageText(): string {
`Current endpoint: ${summary.endpointLabel}`, `Current endpoint: ${summary.endpointLabel}`,
`Saved profile: ${summary.savedProfileLabel}`, `Saved profile: ${summary.savedProfileLabel}`,
'', '',
availableProviders, 'Choose Auto, Ollama, OpenAI-compatible, Gemini, or Codex, then save a profile for the next OpenClaude restart.',
].join('\n') ].join('\n')
} }
@@ -477,45 +415,12 @@ function finishProfileSave(
profile: ProviderProfile, profile: ProviderProfile,
env: ProfileEnv, env: ProfileEnv,
): void { ): void {
void saveProfileAndNotify(onDone, profile, env)
}
export function buildCodexOAuthProfileEnv(
tokens: Pick<CodexOAuthTokens, 'accessToken' | 'idToken' | 'accountId'>,
): ProfileEnv | null {
return buildSharedCodexOAuthProfileEnv(tokens)
}
export async function applySavedProfileToCurrentSession(options: {
profileFile: ProfileFile
processEnv?: NodeJS.ProcessEnv
}): Promise<string | null> {
return applySharedProfileToCurrentSession(options)
}
async function saveProfileAndNotify(
onDone: LocalJSXCommandOnDone,
profile: ProviderProfile,
env: ProfileEnv,
): Promise<void> {
try { try {
const profileFile = createProfileFile(profile, env) const profileFile = createProfileFile(profile, env)
const filePath = saveProfileFile(profileFile) const filePath = saveProfileFile(profileFile)
const shouldActivateInSession = profile === 'codex' onDone(buildProfileSaveMessage(profile, env, filePath), {
const activationWarning = shouldActivateInSession
? await applySharedProfileToCurrentSession({ profileFile })
: null
onDone(
buildProfileSaveMessage(profile, env, filePath, {
activatedInSession:
shouldActivateInSession && activationWarning === null,
activationWarning,
}),
{
display: 'system', display: 'system',
}, })
)
} catch (error) { } catch (error) {
const message = error instanceof Error ? error.message : String(error) const message = error instanceof Error ? error.message : String(error)
onDone(`Failed to save provider profile: ${message}`, { onDone(`Failed to save provider profile: ${message}`, {
@@ -599,10 +504,6 @@ function ProviderChooser({
onCancel: () => void onCancel: () => void
}): React.ReactNode { }): React.ReactNode {
const summary = buildCurrentProviderSummary() const summary = buildCurrentProviderSummary()
const canUseCodexOAuth = !isBareMode()
const helperText = canUseCodexOAuth
? 'Save a provider profile without editing environment variables first. Codex profiles backed by env, auth.json, or OpenClaude secure storage can switch this session immediately when validation succeeds.'
: 'Save a provider profile without editing environment variables first. Codex profiles backed by env or auth.json can switch this session immediately.'
const options: OptionWithDescription<ProviderChoice>[] = [ const options: OptionWithDescription<ProviderChoice>[] = [
{ {
label: 'Auto', label: 'Auto',
@@ -636,16 +537,6 @@ function ProviderChooser({
value: 'codex', value: 'codex',
description: 'Use existing ChatGPT Codex CLI auth or env credentials', description: 'Use existing ChatGPT Codex CLI auth or env credentials',
}, },
...(canUseCodexOAuth
? [
{
label: 'Codex OAuth',
value: 'codex-oauth' as const,
description:
'Sign in with ChatGPT in your browser and store Codex tokens securely',
},
]
: []),
] ]
if (summary.savedProfileLabel !== 'none') { if (summary.savedProfileLabel !== 'none') {
@@ -663,7 +554,10 @@ function ProviderChooser({
onCancel={onCancel} onCancel={onCancel}
> >
<Box flexDirection="column" gap={1}> <Box flexDirection="column" gap={1}>
<Text>{helperText}</Text> <Text>
Save a provider profile for the next OpenClaude restart without
editing environment variables first.
</Text>
<Box flexDirection="column"> <Box flexDirection="column">
<Text dimColor>Current model: {summary.modelLabel}</Text> <Text dimColor>Current model: {summary.modelLabel}</Text>
<Text dimColor>Current endpoint: {summary.endpointLabel}</Text> <Text dimColor>Current endpoint: {summary.endpointLabel}</Text>
@@ -749,7 +643,6 @@ function AutoRecommendationStep({
| { | {
state: 'openai' state: 'openai'
defaultModel: string defaultModel: string
reason: string
} }
| { | {
state: 'error' state: 'error'
@@ -763,27 +656,19 @@ function AutoRecommendationStep({
void (async () => { void (async () => {
const defaultModel = getGoalDefaultOpenAIModel(goal) const defaultModel = getGoalDefaultOpenAIModel(goal)
try { try {
const readiness = await probeOllamaGenerationReadiness() const ollamaAvailable = await hasLocalOllama()
if (readiness.state !== 'ready') { if (!ollamaAvailable) {
if (!cancelled) { if (!cancelled) {
setStatus({ setStatus({ state: 'openai', defaultModel })
state: 'openai',
defaultModel,
reason: describeOllamaReadinessIssue(readiness),
})
} }
return return
} }
const recommended = recommendOllamaModel(readiness.models, goal) const models = await listOllamaModels()
const recommended = recommendOllamaModel(models, goal)
if (!recommended) { if (!recommended) {
if (!cancelled) { if (!cancelled) {
setStatus({ setStatus({ state: 'openai', defaultModel })
state: 'openai',
defaultModel,
reason:
'Ollama responded to a generation probe, but no recommended chat model matched this goal.',
})
} }
return return
} }
@@ -824,9 +709,7 @@ function AutoRecommendationStep({
{ label: 'Back', value: 'back' }, { label: 'Back', value: 'back' },
{ label: 'Cancel', value: 'cancel' }, { label: 'Cancel', value: 'cancel' },
]} ]}
onChange={(value: string) => onChange={value => (value === 'back' ? onBack() : onCancel())}
value === 'back' ? onBack() : onCancel()
}
onCancel={onCancel} onCancel={onCancel}
/> />
</Box> </Box>
@@ -839,17 +722,17 @@ function AutoRecommendationStep({
<Dialog title="Auto setup fallback" onCancel={onCancel}> <Dialog title="Auto setup fallback" onCancel={onCancel}>
<Box flexDirection="column" gap={1}> <Box flexDirection="column" gap={1}>
<Text> <Text>
Auto setup can continue into OpenAI-compatible setup with a default model of{' '} No viable local Ollama chat model was detected. Auto setup can
continue into OpenAI-compatible setup with a default model of{' '}
{status.defaultModel}. {status.defaultModel}.
</Text> </Text>
<Text dimColor>{status.reason}</Text>
<Select <Select
options={[ options={[
{ label: 'Continue to OpenAI-compatible setup', value: 'continue' }, { label: 'Continue to OpenAI-compatible setup', value: 'continue' },
{ label: 'Back', value: 'back' }, { label: 'Back', value: 'back' },
{ label: 'Cancel', value: 'cancel' }, { label: 'Cancel', value: 'cancel' },
]} ]}
onChange={(value: string) => { onChange={value => {
if (value === 'continue') { if (value === 'continue') {
onNeedOpenAI(status.defaultModel) onNeedOpenAI(status.defaultModel)
} else if (value === 'back') { } else if (value === 'back') {
@@ -882,7 +765,7 @@ function AutoRecommendationStep({
{ label: 'Back', value: 'back' }, { label: 'Back', value: 'back' },
{ label: 'Cancel', value: 'cancel' }, { label: 'Cancel', value: 'cancel' },
]} ]}
onChange={(value: string) => { onChange={value => {
if (value === 'save') { if (value === 'save') {
onSave( onSave(
'ollama', 'ollama',
@@ -926,19 +809,32 @@ function OllamaModelStep({
let cancelled = false let cancelled = false
void (async () => { void (async () => {
const readiness = await probeOllamaGenerationReadiness() const available = await hasLocalOllama()
if (readiness.state !== 'ready') { if (!available) {
if (!cancelled) { if (!cancelled) {
setStatus({ setStatus({
state: 'unavailable', state: 'unavailable',
message: describeOllamaReadinessIssue(readiness), message:
'Could not reach Ollama at http://localhost:11434. Start Ollama first, then run /provider again.',
}) })
} }
return return
} }
const ranked = rankOllamaModels(readiness.models, 'balanced') const models = await listOllamaModels()
const recommended = recommendOllamaModel(readiness.models, 'balanced') if (models.length === 0) {
if (!cancelled) {
setStatus({
state: 'unavailable',
message:
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first.',
})
}
return
}
const ranked = rankOllamaModels(models, 'balanced')
const recommended = recommendOllamaModel(models, 'balanced')
if (!cancelled) { if (!cancelled) {
setStatus({ setStatus({
state: 'ready', state: 'ready',
@@ -971,9 +867,7 @@ function OllamaModelStep({
{ label: 'Back', value: 'back' }, { label: 'Back', value: 'back' },
{ label: 'Cancel', value: 'cancel' }, { label: 'Cancel', value: 'cancel' },
]} ]}
onChange={(value: string) => onChange={value => (value === 'back' ? onBack() : onCancel())}
value === 'back' ? onBack() : onCancel()
}
onCancel={onCancel} onCancel={onCancel}
/> />
</Box> </Box>
@@ -994,7 +888,7 @@ function OllamaModelStep({
defaultFocusValue={status.defaultValue} defaultFocusValue={status.defaultValue}
inlineDescriptions inlineDescriptions
visibleOptionCount={Math.min(8, status.options.length)} visibleOptionCount={Math.min(8, status.options.length)}
onChange={(value: string) => { onChange={value => {
onSave( onSave(
'ollama', 'ollama',
buildOllamaProfileEnv(value, { buildOllamaProfileEnv(value, {
@@ -1009,84 +903,6 @@ function OllamaModelStep({
) )
} }
function CodexOAuthStep({
onSave,
onBack,
onCancel,
}: {
onSave: (profile: ProviderProfile, env: ProfileEnv) => void
onBack: () => void
onCancel: () => void
}): React.ReactNode {
const handleAuthenticated = React.useCallback(async (
tokens: CodexOAuthTokens,
persistCredentials: (options?: { profileId?: string }) => void,
) => {
const env = buildCodexOAuthProfileEnv(tokens)
if (!env) {
throw new Error(
'Codex OAuth succeeded, but OpenClaude could not build a Codex profile from the stored credentials.',
)
}
persistCredentials()
onSave('codex', env)
}, [onSave])
const status = useCodexOAuthFlow({
onAuthenticated: handleAuthenticated,
})
if (status.state === 'error') {
return (
<Dialog title="Codex OAuth failed" onCancel={onCancel} color="warning">
<Box flexDirection="column" gap={1}>
<Text>{status.message}</Text>
<Select
options={[
{ label: 'Back', value: 'back' },
{ label: 'Cancel', value: 'cancel' },
]}
onChange={(value: string) =>
value === 'back' ? onBack() : onCancel()
}
onCancel={onCancel}
/>
</Box>
</Dialog>
)
}
if (status.state === 'starting') {
return <LoadingState message="Starting Codex OAuth..." />
}
return (
<Dialog title="Codex OAuth" onCancel={onBack}>
<Box flexDirection="column" gap={1}>
<Text>
Finish signing in with ChatGPT in your browser. OpenClaude will store
the resulting Codex credentials securely for future sessions.
</Text>
{status.browserOpened === false ? (
<Text color="warning">
Browser did not open automatically. Visit this URL to continue:
</Text>
) : status.browserOpened === true ? (
<Text dimColor>
Browser opened. Complete the sign-in there, then OpenClaude will
finish setup automatically.
</Text>
) : (
<Text dimColor>Opening your browser...</Text>
)}
<Text>{status.authUrl}</Text>
<Text dimColor>Press Esc to cancel and go back.</Text>
</Box>
</Dialog>
)
}
function CodexCredentialStep({ function CodexCredentialStep({
onSave, onSave,
onBack, onBack,
@@ -1108,9 +924,7 @@ function CodexCredentialStep({
{ label: 'Back', value: 'back' }, { label: 'Back', value: 'back' },
{ label: 'Cancel', value: 'cancel' }, { label: 'Cancel', value: 'cancel' },
]} ]}
onChange={(value: string) => onChange={value => (value === 'back' ? onBack() : onCancel())}
value === 'back' ? onBack() : onCancel()
}
onCancel={onCancel} onCancel={onCancel}
/> />
</Box> </Box>
@@ -1144,10 +958,9 @@ function CodexCredentialStep({
defaultFocusValue="codexplan" defaultFocusValue="codexplan"
inlineDescriptions inlineDescriptions
visibleOptionCount={options.length} visibleOptionCount={options.length}
onChange={(value: string) => { onChange={value => {
const env = buildCodexProfileEnv({ const env = buildCodexProfileEnv({
model: value, model: value,
credentialSource: credentials.credentialSource,
processEnv: process.env, processEnv: process.env,
}) })
if (env) { if (env) {
@@ -1162,16 +975,9 @@ function CodexCredentialStep({
} }
function resolveCodexCredentials(processEnv: NodeJS.ProcessEnv): function resolveCodexCredentials(processEnv: NodeJS.ProcessEnv):
| { | { ok: true; sourceDescription: string }
ok: true
sourceDescription: string
credentialSource: 'oauth' | 'existing'
}
| { ok: false; message: string } { | { ok: false; message: string } {
const credentials = resolveCodexApiCredentials(processEnv) const credentials = resolveCodexApiCredentials(processEnv)
const oauthHint = isBareMode()
? 'Re-login with the Codex CLI'
: 'Choose Codex OAuth in /provider, or re-login with the Codex CLI'
if (!credentials.apiKey) { if (!credentials.apiKey) {
const authHint = credentials.authPath const authHint = credentials.authPath
@@ -1179,7 +985,7 @@ function resolveCodexCredentials(processEnv: NodeJS.ProcessEnv):
: 'Set CODEX_API_KEY or re-login with the Codex CLI.' : 'Set CODEX_API_KEY or re-login with the Codex CLI.'
return { return {
ok: false, ok: false,
message: `Codex setup needs existing credentials. ${oauthHint}, or set CODEX_API_KEY. ${authHint}`, message: `Codex setup needs existing credentials. Re-login with the Codex CLI or set CODEX_API_KEY. ${authHint}`,
} }
} }
@@ -1187,19 +993,15 @@ function resolveCodexCredentials(processEnv: NodeJS.ProcessEnv):
return { return {
ok: false, ok: false,
message: message:
`Codex auth is missing chatgpt_account_id. ${oauthHint}, or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID first.`, 'Codex auth is missing chatgpt_account_id. Re-login with the Codex CLI or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID first.',
} }
} }
return { return {
ok: true, ok: true,
credentialSource:
credentials.source === 'secure-storage' ? 'oauth' : 'existing',
sourceDescription: sourceDescription:
credentials.source === 'env' credentials.source === 'env'
? 'the current shell environment' ? 'the current shell environment'
: credentials.source === 'secure-storage'
? 'OpenClaude secure storage'
: credentials.authPath ?? DEFAULT_CODEX_BASE_URL, : credentials.authPath ?? DEFAULT_CODEX_BASE_URL,
} }
} }
@@ -1233,8 +1035,6 @@ export function ProviderWizard({
name: 'mistral-key', name: 'mistral-key',
defaultModel: defaults.mistralModel, defaultModel: defaults.mistralModel,
}) })
} else if (value === 'codex-oauth') {
setStep({ name: 'codex-oauth' })
} else if (value === 'clear') { } else if (value === 'clear') {
const filePath = deleteProfileFile() const filePath = deleteProfileFile()
onDone(`Removed saved provider profile at ${filePath}. Restart OpenClaude to go back to normal startup.`, { onDone(`Removed saved provider profile at ${filePath}. Restart OpenClaude to go back to normal startup.`, {
@@ -1514,7 +1314,7 @@ export function ProviderWizard({
options={options} options={options}
inlineDescriptions inlineDescriptions
visibleOptionCount={options.length} visibleOptionCount={options.length}
onChange={(value: string) => { onChange={value => {
if (value === 'api-key') { if (value === 'api-key') {
setStep({ name: 'gemini-key' }) setStep({ name: 'gemini-key' })
} else if (value === 'access-token') { } else if (value === 'access-token') {
@@ -1670,15 +1470,6 @@ export function ProviderWizard({
onCancel={() => onDone()} onCancel={() => onDone()}
/> />
) )
case 'codex-oauth':
return (
<CodexOAuthStep
onSave={(profile, env) => finishProfileSave(onDone, profile, env)}
onBack={() => setStep({ name: 'choose' })}
onCancel={() => onDone()}
/>
)
} }
} }

View File

@@ -112,10 +112,8 @@ test('third-party provider branch opens the first-run provider manager', async (
) )
expect(output).toContain('Set up provider') expect(output).toContain('Set up provider')
// Use alphabetically-early sentinels so they remain visible in the
// 13-row test frame after the provider list was sorted A→Z.
expect(output).toContain('Anthropic') expect(output).toContain('Anthropic')
expect(output).toContain('Azure OpenAI') expect(output).toContain('OpenAI')
expect(output).toContain('DeepSeek') expect(output).toContain('Ollama')
expect(output).toContain('Google Gemini') expect(output).toContain('LM Studio')
}) })

View File

@@ -101,7 +101,7 @@ export function EffortPicker({ onSelect, onCancel }: Props) {
<Box marginBottom={1} flexDirection="column"> <Box marginBottom={1} flexDirection="column">
<Text color="remember" bold={true}>Set effort level</Text> <Text color="remember" bold={true}>Set effort level</Text>
<Text dimColor={true}> <Text dimColor={true}>
{supportsEffort && usesOpenAIEffort {usesOpenAIEffort
? `OpenAI/Codex provider (${provider})` ? `OpenAI/Codex provider (${provider})`
: supportsEffort : supportsEffort
? `Claude model · ${provider} provider` ? `Claude model · ${provider} provider`

View File

@@ -5,14 +5,13 @@ import React from 'react'
import stripAnsi from 'strip-ansi' import stripAnsi from 'strip-ansi'
import { createRoot } from '../ink.js' import { createRoot } from '../ink.js'
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
import { AppStateProvider } from '../state/AppState.js' import { AppStateProvider } from '../state/AppState.js'
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
const SYNC_START = '\x1B[?2026h' const SYNC_START = '\x1B[?2026h'
const SYNC_END = '\x1B[?2026l' const SYNC_END = '\x1B[?2026l'
const ORIGINAL_ENV = { const ORIGINAL_ENV = {
CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE,
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
GITHUB_TOKEN: process.env.GITHUB_TOKEN, GITHUB_TOKEN: process.env.GITHUB_TOKEN,
GH_TOKEN: process.env.GH_TOKEN, GH_TOKEN: process.env.GH_TOKEN,
@@ -97,47 +96,6 @@ async function waitForCondition(
throw new Error('Timed out waiting for ProviderManager test condition') throw new Error('Timed out waiting for ProviderManager test condition')
} }
// Provider list is sorted alphabetically by label in the preset picker, so
// reaching a given provider takes more keypresses than it used to. Keep the
// target-by-label indirection here so these tests survive future list edits
// without further churn.
//
// Order matches ProviderManager.renderPresetSelection() when
// canUseCodexOAuth === true (default in mocked tests).
const PRESET_ORDER = [
'Alibaba Coding Plan',
'Alibaba Coding Plan (China)',
'Anthropic',
'Atomic Chat',
'Azure OpenAI',
'Codex OAuth',
'DeepSeek',
'Google Gemini',
'Groq',
'LM Studio',
'MiniMax',
'Mistral',
'Moonshot AI',
'NVIDIA NIM',
'Ollama',
'OpenAI',
'OpenRouter',
'Together AI',
'Custom',
] as const
async function navigateToPreset(
stdin: { write: (data: string) => void },
label: (typeof PRESET_ORDER)[number],
): Promise<void> {
const index = PRESET_ORDER.indexOf(label)
if (index < 0) throw new Error(`Unknown preset label: ${label}`)
for (let i = 0; i < index; i++) {
stdin.write('j')
await Bun.sleep(25)
}
}
function createDeferred<T>(): { function createDeferred<T>(): {
promise: Promise<T> promise: Promise<T>
resolve: (value: T) => void resolve: (value: T) => void
@@ -151,9 +109,6 @@ function createDeferred<T>(): {
function mockProviderProfilesModule(options?: { function mockProviderProfilesModule(options?: {
addProviderProfile?: (...args: unknown[]) => unknown addProviderProfile?: (...args: unknown[]) => unknown
getProviderProfiles?: () => unknown[]
updateProviderProfile?: (...args: unknown[]) => unknown
setActiveProviderProfile?: (...args: unknown[]) => unknown
}): void { }): void {
mock.module('../utils/providerProfiles.js', () => ({ mock.module('../utils/providerProfiles.js', () => ({
addProviderProfile: options?.addProviderProfile ?? (() => null), addProviderProfile: options?.addProviderProfile ?? (() => null),
@@ -176,135 +131,48 @@ function mockProviderProfilesModule(options?: {
model: 'mock-model', model: 'mock-model',
apiKey: '', apiKey: '',
}, },
getProviderProfiles: options?.getProviderProfiles ?? (() => []), getProviderProfiles: () => [],
setActiveProviderProfile: options?.setActiveProviderProfile ?? (() => null), setActiveProviderProfile: () => null,
updateProviderProfile: options?.updateProviderProfile ?? (() => null), updateProviderProfile: () => null,
})) }))
} }
function mockProviderManagerDependencies( function mockProviderManagerDependencies(
githubSyncRead: () => string | undefined, syncRead: () => string | undefined,
githubAsyncRead: () => Promise<string | undefined>, asyncRead: () => Promise<string | undefined>,
options?: { options?: {
addProviderProfile?: (...args: unknown[]) => unknown addProviderProfile?: (...args: unknown[]) => unknown
applySavedProfileToCurrentSession?: (...args: unknown[]) => Promise<string | null> hasLocalOllama?: () => Promise<boolean>
clearCodexCredentials?: () => { success: boolean; warning?: string } listOllamaModels?: () => Promise<
getProviderProfiles?: () => unknown[] Array<{
probeOllamaGenerationReadiness?: () => Promise<{
state: 'ready' | 'unreachable' | 'no_models' | 'generation_failed'
models: Array<
{
name: string name: string
sizeBytes?: number | null sizeBytes?: number | null
family?: string | null family?: string | null
families?: string[] families?: string[]
parameterSize?: string | null parameterSize?: string | null
quantizationLevel?: string | null quantizationLevel?: string | null
}
>
probeModel?: string
detail?: string
}> }>
codexSyncRead?: () => unknown >
codexAsyncRead?: () => Promise<unknown>
updateProviderProfile?: (...args: unknown[]) => unknown
setActiveProviderProfile?: (...args: unknown[]) => unknown
useCodexOAuthFlow?: (options: {
onAuthenticated: (tokens: {
accessToken: string
refreshToken: string
accountId?: string
idToken?: string
apiKey?: string
}, persistCredentials: (options?: { profileId?: string }) => void) =>
void | Promise<void>
}) => {
state: 'starting' | 'waiting' | 'error'
authUrl?: string
browserOpened?: boolean | null
message?: string
}
}, },
): void { ): void {
mockProviderProfilesModule({ mockProviderProfilesModule({ addProviderProfile: options?.addProviderProfile })
addProviderProfile: options?.addProviderProfile,
getProviderProfiles: options?.getProviderProfiles,
updateProviderProfile: options?.updateProviderProfile,
setActiveProviderProfile: options?.setActiveProviderProfile,
})
mock.module('../utils/providerDiscovery.js', () => ({ mock.module('../utils/providerDiscovery.js', () => ({
probeOllamaGenerationReadiness: hasLocalOllama: options?.hasLocalOllama ?? (async () => false),
options?.probeOllamaGenerationReadiness ?? listOllamaModels: options?.listOllamaModels ?? (async () => []),
(async () => ({
state: 'unreachable' as const,
models: [],
})),
})) }))
mock.module('../utils/githubModelsCredentials.js', () => ({ mock.module('../utils/githubModelsCredentials.js', () => ({
clearGithubModelsToken: () => ({ success: true }), clearGithubModelsToken: () => ({ success: true }),
GITHUB_MODELS_HYDRATED_ENV_MARKER: 'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED', GITHUB_MODELS_HYDRATED_ENV_MARKER: 'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED',
hydrateGithubModelsTokenFromSecureStorage: () => {}, hydrateGithubModelsTokenFromSecureStorage: () => {},
readGithubModelsToken: githubSyncRead, readGithubModelsToken: syncRead,
readGithubModelsTokenAsync: githubAsyncRead, readGithubModelsTokenAsync: asyncRead,
}))
mock.module('../utils/codexCredentials.js', () => ({
attachCodexProfileIdToStoredCredentials: () => ({ success: true }),
clearCodexCredentials:
options?.clearCodexCredentials ?? (() => ({ success: true })),
readCodexCredentials:
options?.codexSyncRead ?? (() => undefined),
readCodexCredentialsAsync:
options?.codexAsyncRead ?? (async () => undefined),
}))
mock.module('../utils/providerProfile.js', () => ({
applySavedProfileToCurrentSession:
options?.applySavedProfileToCurrentSession ?? (async () => null),
buildCodexOAuthProfileEnv: (tokens: {
accessToken: string
accountId?: string
idToken?: string
}) => {
const accountId =
tokens.accountId ??
(tokens.idToken ? 'acct_from_id_token' : undefined) ??
(tokens.accessToken ? 'acct_from_access_token' : undefined)
if (!accountId) {
return null
}
return {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
CHATGPT_ACCOUNT_ID: accountId,
CODEX_CREDENTIAL_SOURCE: 'oauth' as const,
}
},
clearPersistedCodexOAuthProfile: () => null,
createProfileFile: (profile: string, env: Record<string, unknown>) => ({
profile,
env,
createdAt: '2026-04-10T00:00:00.000Z',
}),
})) }))
mock.module('../utils/settings/settings.js', () => ({ mock.module('../utils/settings/settings.js', () => ({
updateSettingsForSource: () => ({ error: null }), updateSettingsForSource: () => ({ error: null }),
})) }))
mock.module('./useCodexOAuthFlow.js', () => ({
useCodexOAuthFlow:
options?.useCodexOAuthFlow ??
(() => ({
state: 'waiting' as const,
authUrl: 'https://chatgpt.com/codex',
browserOpened: true,
})),
}))
} }
async function waitForFrameOutput( async function waitForFrameOutput(
@@ -372,9 +240,9 @@ async function renderProviderManagerFrame(
onDone: (result?: unknown) => void onDone: (result?: unknown) => void
}>, }>,
options?: { options?: {
mode?: 'first-run' | 'manage'
waitForOutput?: (output: string) => boolean waitForOutput?: (output: string) => boolean
timeoutMs?: number timeoutMs?: number
mode?: 'first-run' | 'manage'
}, },
): Promise<string> { ): Promise<string> {
const mounted = await mountProviderManager(ProviderManager, { const mounted = await mountProviderManager(ProviderManager, {
@@ -437,6 +305,96 @@ test('ProviderManager resolves GitHub virtual provider from async storage withou
expect(asyncRead).toHaveBeenCalled() expect(asyncRead).toHaveBeenCalled()
}) })
test('ProviderManager first-run Ollama preset auto-detects installed models', async () => {
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const onDone = mock(() => {})
const addProviderProfile = mock((payload: {
provider: string
name: string
baseUrl: string
model: string
apiKey?: string
}) => ({
id: 'provider_ollama',
provider: payload.provider,
name: payload.name,
baseUrl: payload.baseUrl,
model: payload.model,
apiKey: payload.apiKey,
}))
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
addProviderProfile,
hasLocalOllama: async () => true,
listOllamaModels: async () => [
{
name: 'gemma4:31b-cloud',
family: 'gemma',
parameterSize: '31b',
},
{
name: 'kimi-k2.5:cloud',
family: 'kimi',
parameterSize: '2.5b',
},
],
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
mode: 'first-run',
onDone,
})
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Set up provider') && frame.includes('Ollama'),
)
mounted.stdin.write('j')
await Bun.sleep(50)
mounted.stdin.write('\r')
const modelFrame = await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Choose an Ollama model') &&
frame.includes('gemma4:31b-cloud') &&
frame.includes('kimi-k2.5:cloud'),
)
expect(modelFrame).toContain('Choose an Ollama model')
expect(modelFrame).toContain('gemma4:31b-cloud')
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForCondition(() => onDone.mock.calls.length > 0)
expect(addProviderProfile).toHaveBeenCalled()
expect(addProviderProfile.mock.calls[0]?.[0]).toMatchObject({
name: 'Ollama',
baseUrl: 'http://localhost:11434/v1',
model: 'gemma4:31b-cloud',
})
expect(onDone).toHaveBeenCalledWith(
expect.objectContaining({
action: 'saved',
message: 'Provider configured: Ollama',
}),
)
await mounted.dispose()
})
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => { test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN delete process.env.GITHUB_TOKEN
@@ -477,489 +435,3 @@ test('ProviderManager avoids first-frame false negative while stored-token looku
expect(syncRead).not.toHaveBeenCalled() expect(syncRead).not.toHaveBeenCalled()
expect(asyncRead).toHaveBeenCalled() expect(asyncRead).toHaveBeenCalled()
}) })
test('ProviderManager first-run Ollama preset auto-detects installed models', async () => {
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const onDone = mock(() => {})
const addProviderProfile = mock((payload: {
provider: string
name: string
baseUrl: string
model: string
apiKey?: string
}) => ({
id: 'provider_ollama',
provider: payload.provider,
name: payload.name,
baseUrl: payload.baseUrl,
model: payload.model,
apiKey: payload.apiKey,
}))
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
addProviderProfile,
probeOllamaGenerationReadiness: async () => ({
state: 'ready',
models: [
{
name: 'gemma4:31b-cloud',
family: 'gemma',
parameterSize: '31b',
},
{
name: 'kimi-k2.5:cloud',
family: 'kimi',
parameterSize: '2.5b',
},
],
probeModel: 'gemma4:31b-cloud',
}),
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
mode: 'first-run',
onDone,
})
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Set up provider'),
)
await navigateToPreset(mounted.stdin, 'Ollama')
mounted.stdin.write('\r')
const modelFrame = await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Choose an Ollama model') &&
frame.includes('gemma4:31b-cloud') &&
frame.includes('kimi-k2.5:cloud'),
)
expect(modelFrame).toContain('Choose an Ollama model')
expect(modelFrame).toContain('gemma4:31b-cloud')
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForCondition(() => onDone.mock.calls.length > 0)
expect(addProviderProfile).toHaveBeenCalled()
expect(addProviderProfile.mock.calls[0]?.[0]).toMatchObject({
name: 'Ollama',
baseUrl: 'http://localhost:11434/v1',
model: 'gemma4:31b-cloud',
})
expect(onDone).toHaveBeenCalledWith(
expect.objectContaining({
action: 'saved',
message: 'Provider configured: Ollama',
}),
)
await mounted.dispose()
})
test('ProviderManager first-run Codex OAuth switches the current session after login completes', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const onDone = mock(() => {})
const applySavedProfileToCurrentSession = mock(async () => null)
const persistCredentials = mock(() => {})
const addProviderProfile = mock((payload: {
provider: string
name: string
baseUrl: string
model: string
apiKey?: string
}) => ({
id: 'provider_codex_oauth',
provider: payload.provider,
name: payload.name,
baseUrl: payload.baseUrl,
model: payload.model,
apiKey: payload.apiKey,
}))
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
addProviderProfile,
applySavedProfileToCurrentSession,
useCodexOAuthFlow: ({ onAuthenticated }) => {
React.useEffect(() => {
void onAuthenticated({
accessToken: 'oauth-access-token',
refreshToken: 'oauth-refresh-token',
accountId: 'acct_oauth',
}, persistCredentials)
}, [onAuthenticated])
return {
state: 'waiting',
authUrl: 'https://chatgpt.com/codex',
browserOpened: true,
}
},
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
mode: 'first-run',
onDone,
})
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Set up provider') && frame.includes('Codex OAuth'),
)
await navigateToPreset(mounted.stdin, 'Codex OAuth')
mounted.stdin.write('\r')
await waitForCondition(() => onDone.mock.calls.length > 0)
expect(addProviderProfile).toHaveBeenCalledWith(
expect.objectContaining({
provider: 'openai',
name: 'Codex OAuth',
baseUrl: 'https://chatgpt.com/backend-api/codex',
model: 'codexplan',
apiKey: '',
}),
expect.objectContaining({ makeActive: true }),
)
expect(applySavedProfileToCurrentSession).toHaveBeenCalled()
expect(persistCredentials).toHaveBeenCalledWith({
profileId: 'provider_codex_oauth',
})
expect(onDone).toHaveBeenCalledWith(
expect.objectContaining({
action: 'saved',
message:
'Codex OAuth configured. OpenClaude switched to it for this session.',
}),
)
await mounted.dispose()
})
test('ProviderManager first-run Codex OAuth reports next-startup fallback when session activation fails', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const onDone = mock(() => {})
const applySavedProfileToCurrentSession = mock(
async () => 'validation failed',
)
const persistCredentials = mock(() => {})
const addProviderProfile = mock((payload: {
provider: string
name: string
baseUrl: string
model: string
apiKey?: string
}) => ({
id: 'provider_codex_oauth',
provider: payload.provider,
name: payload.name,
baseUrl: payload.baseUrl,
model: payload.model,
apiKey: payload.apiKey,
}))
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
addProviderProfile,
applySavedProfileToCurrentSession,
useCodexOAuthFlow: ({ onAuthenticated }) => {
React.useEffect(() => {
void onAuthenticated({
accessToken: 'oauth-access-token',
refreshToken: 'oauth-refresh-token',
accountId: 'acct_oauth',
}, persistCredentials)
}, [onAuthenticated])
return {
state: 'waiting',
authUrl: 'https://chatgpt.com/codex',
browserOpened: true,
}
},
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
mode: 'first-run',
onDone,
})
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Set up provider') && frame.includes('Codex OAuth'),
)
await navigateToPreset(mounted.stdin, 'Codex OAuth')
mounted.stdin.write('\r')
await waitForCondition(() => onDone.mock.calls.length > 0)
expect(persistCredentials).toHaveBeenCalledWith({
profileId: 'provider_codex_oauth',
})
expect(onDone).toHaveBeenCalledWith(
expect.objectContaining({
action: 'saved',
message:
'Codex OAuth configured. Saved for next startup. Warning: validation failed.',
}),
)
await mounted.dispose()
})
test('ProviderManager does not hijack a manual Codex profile when OAuth credentials are not yet linked', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const onDone = mock(() => {})
const manualProfile = {
id: 'provider_manual_codex',
provider: 'openai',
name: 'Codex OAuth',
baseUrl: 'https://chatgpt.com/backend-api/codex',
model: 'gpt-5.4',
apiKey: 'manual-key',
}
const addProviderProfile = mock((payload: {
provider: string
name: string
baseUrl: string
model: string
apiKey?: string
}) => ({
id: 'provider_codex_oauth',
provider: payload.provider,
name: payload.name,
baseUrl: payload.baseUrl,
model: payload.model,
apiKey: payload.apiKey,
}))
const updateProviderProfile = mock(() => manualProfile)
const persistCredentials = mock(() => {})
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
addProviderProfile,
getProviderProfiles: () => [manualProfile],
updateProviderProfile,
useCodexOAuthFlow: ({ onAuthenticated }) => {
const hasAuthenticated = React.useRef(false)
React.useEffect(() => {
if (hasAuthenticated.current) {
return
}
hasAuthenticated.current = true
void onAuthenticated({
accessToken: 'oauth-access-token',
refreshToken: 'oauth-refresh-token',
accountId: 'acct_oauth',
}, persistCredentials)
}, [onAuthenticated])
return {
state: 'waiting',
authUrl: 'https://chatgpt.com/codex',
browserOpened: true,
}
},
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager, {
mode: 'first-run',
onDone,
})
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Set up provider') && frame.includes('Codex OAuth'),
)
await navigateToPreset(mounted.stdin, 'Codex OAuth')
mounted.stdin.write('\r')
await waitForCondition(() => onDone.mock.calls.length > 0)
expect(addProviderProfile).toHaveBeenCalledTimes(1)
expect(updateProviderProfile).not.toHaveBeenCalled()
expect(persistCredentials).toHaveBeenCalledWith({
profileId: 'provider_codex_oauth',
})
await mounted.dispose()
})
test('ProviderManager keeps Codex OAuth as next-startup only when activating the session fails from the menu', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const codexProfile = {
id: 'provider_codex_oauth',
provider: 'openai',
name: 'Codex OAuth',
baseUrl: 'https://chatgpt.com/backend-api/codex',
model: 'codexplan',
apiKey: '',
}
const applySavedProfileToCurrentSession = mock(
async () => 'validation failed',
)
const setActiveProviderProfile = mock(() => codexProfile)
mockProviderManagerDependencies(
() => undefined,
async () => undefined,
{
applySavedProfileToCurrentSession,
getProviderProfiles: () => [codexProfile],
setActiveProviderProfile,
codexAsyncRead: async () => ({
accessToken: 'oauth-access-token',
refreshToken: 'oauth-refresh-token',
accountId: 'acct_oauth',
profileId: 'provider_codex_oauth',
}),
},
)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const mounted = await mountProviderManager(ProviderManager)
await waitForFrameOutput(
mounted.getOutput,
frame =>
frame.includes('Provider manager') &&
frame.includes('Set active provider') &&
frame.includes('Log out Codex OAuth'),
)
mounted.stdin.write('j')
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForFrameOutput(
mounted.getOutput,
frame => frame.includes('Set active provider') && frame.includes('Codex OAuth'),
)
await Bun.sleep(25)
mounted.stdin.write('\r')
await waitForCondition(() => setActiveProviderProfile.mock.calls.length > 0)
await waitForCondition(
() => applySavedProfileToCurrentSession.mock.calls.length > 0,
)
await Bun.sleep(50)
const output = stripAnsi(extractLastFrame(mounted.getOutput()))
expect(output).toContain(
'Active provider: Codex OAuth. Saved for next startup. Warning: validation failed.',
)
expect(applySavedProfileToCurrentSession).toHaveBeenCalled()
expect(setActiveProviderProfile).toHaveBeenCalledWith('provider_codex_oauth')
await mounted.dispose()
})
test('ProviderManager resolves Codex OAuth state from async storage without sync reads in render flow', async () => {
delete process.env.CLAUDE_CODE_SIMPLE
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const githubSyncRead = mock(() => undefined)
const githubAsyncRead = mock(async () => undefined)
const codexSyncRead = mock(() => {
throw new Error('sync codex credential read should not run in ProviderManager render flow')
})
const codexAsyncRead = mock(async () => ({
accessToken: 'codex-access-token',
refreshToken: 'codex-refresh-token',
}))
mockProviderManagerDependencies(githubSyncRead, githubAsyncRead, {
codexSyncRead,
codexAsyncRead,
})
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const output = await renderProviderManagerFrame(ProviderManager, {
waitForOutput: frame =>
frame.includes('Provider manager') &&
frame.includes('Log out Codex OAuth'),
})
expect(output).toContain('Provider manager')
expect(output).toContain('Log out Codex OAuth')
expect(codexSyncRead).not.toHaveBeenCalled()
expect(codexAsyncRead).toHaveBeenCalled()
})
test('ProviderManager hides Codex OAuth setup in bare mode', async () => {
process.env.CLAUDE_CODE_SIMPLE = '1'
delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env.GITHUB_TOKEN
delete process.env.GH_TOKEN
const githubSyncRead = mock(() => undefined)
const githubAsyncRead = mock(async () => undefined)
mockProviderManagerDependencies(githubSyncRead, githubAsyncRead)
const nonce = `${Date.now()}-${Math.random()}`
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
const output = await renderProviderManagerFrame(ProviderManager, {
mode: 'first-run',
waitForOutput: frame =>
frame.includes('Set up provider') && frame.includes('OpenAI'),
})
expect(output).toContain('Set up provider')
expect(output).not.toContain('Codex OAuth')
})

File diff suppressed because it is too large Load Diff

View File

@@ -281,24 +281,6 @@ export function Config({
enabled: autoCompactEnabled enabled: autoCompactEnabled
}); });
} }
}, {
id: 'toolHistoryCompressionEnabled',
label: 'Tool history compression',
value: globalConfig.toolHistoryCompressionEnabled,
type: 'boolean' as const,
onChange(toolHistoryCompressionEnabled: boolean) {
saveGlobalConfig(current => ({
...current,
toolHistoryCompressionEnabled
}));
setGlobalConfig({
...getGlobalConfig(),
toolHistoryCompressionEnabled
});
logEvent('tengu_tool_history_compression_setting_changed', {
enabled: toolHistoryCompressionEnabled
});
}
}, { }, {
id: 'spinnerTipsEnabled', id: 'spinnerTipsEnabled',
label: 'Show tips', label: 'Show tips',
@@ -1176,9 +1158,6 @@ export function Config({
if (globalConfig.autoCompactEnabled !== initialConfig.current.autoCompactEnabled) { if (globalConfig.autoCompactEnabled !== initialConfig.current.autoCompactEnabled) {
formattedChanges.push(`${globalConfig.autoCompactEnabled ? 'Enabled' : 'Disabled'} auto-compact`); formattedChanges.push(`${globalConfig.autoCompactEnabled ? 'Enabled' : 'Disabled'} auto-compact`);
} }
if (globalConfig.toolHistoryCompressionEnabled !== initialConfig.current.toolHistoryCompressionEnabled) {
formattedChanges.push(`${globalConfig.toolHistoryCompressionEnabled ? 'Enabled' : 'Disabled'} tool history compression`);
}
if (globalConfig.respectGitignore !== initialConfig.current.respectGitignore) { if (globalConfig.respectGitignore !== initialConfig.current.respectGitignore) {
formattedChanges.push(`${globalConfig.respectGitignore ? 'Enabled' : 'Disabled'} respect .gitignore in file picker`); formattedChanges.push(`${globalConfig.respectGitignore ? 'Enabled' : 'Disabled'} respect .gitignore in file picker`);
} }

View File

@@ -1,158 +0,0 @@
import { afterEach, beforeEach, describe, expect, test } from 'bun:test'
import { detectProvider } from './StartupScreen.js'
const ENV_KEYS = [
'CLAUDE_CODE_USE_OPENAI',
'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_GITHUB',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'CLAUDE_CODE_USE_MISTRAL',
'OPENAI_BASE_URL',
'OPENAI_API_KEY',
'OPENAI_MODEL',
'GEMINI_MODEL',
'MISTRAL_MODEL',
'ANTHROPIC_MODEL',
'NVIDIA_NIM',
'MINIMAX_API_KEY',
]
const originalEnv: Record<string, string | undefined> = {}
beforeEach(() => {
for (const key of ENV_KEYS) {
originalEnv[key] = process.env[key]
delete process.env[key]
}
})
afterEach(() => {
for (const key of ENV_KEYS) {
if (originalEnv[key] === undefined) {
delete process.env[key]
} else {
process.env[key] = originalEnv[key]
}
}
})
function setupOpenAIMode(baseUrl: string, model: string): void {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = baseUrl
process.env.OPENAI_MODEL = model
process.env.OPENAI_API_KEY = 'test-key'
}
// --- Issue #855: aggregator URL must win over vendor-prefixed model name ---
describe('detectProvider — aggregator URL authoritative over model-name substring (#855)', () => {
test('OpenRouter + deepseek/deepseek-chat labels as OpenRouter', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'deepseek/deepseek-chat')
expect(detectProvider().name).toBe('OpenRouter')
})
test('OpenRouter + moonshotai/kimi-k2 labels as OpenRouter', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'moonshotai/kimi-k2')
expect(detectProvider().name).toBe('OpenRouter')
})
test('OpenRouter + mistralai/mistral-large labels as OpenRouter', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'mistralai/mistral-large')
expect(detectProvider().name).toBe('OpenRouter')
})
test('OpenRouter + meta-llama/llama-3.3 labels as OpenRouter', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'meta-llama/llama-3.3-70b-instruct')
expect(detectProvider().name).toBe('OpenRouter')
})
test('Together + deepseek-ai/DeepSeek-V3 labels as Together AI', () => {
setupOpenAIMode('https://api.together.xyz/v1', 'deepseek-ai/DeepSeek-V3')
expect(detectProvider().name).toBe('Together AI')
})
test('Together + meta-llama/Llama-3.3 labels as Together AI', () => {
setupOpenAIMode('https://api.together.xyz/v1', 'meta-llama/Llama-3.3-70B-Instruct-Turbo')
expect(detectProvider().name).toBe('Together AI')
})
test('Groq + deepseek-r1-distill-llama-70b labels as Groq', () => {
setupOpenAIMode('https://api.groq.com/openai/v1', 'deepseek-r1-distill-llama-70b')
expect(detectProvider().name).toBe('Groq')
})
test('Groq + llama-3.3-70b-versatile labels as Groq', () => {
setupOpenAIMode('https://api.groq.com/openai/v1', 'llama-3.3-70b-versatile')
expect(detectProvider().name).toBe('Groq')
})
test('Azure + any deepseek deployment labels as Azure OpenAI', () => {
setupOpenAIMode('https://my-resource.openai.azure.com/', 'deepseek-chat')
expect(detectProvider().name).toBe('Azure OpenAI')
})
})
// --- Direct vendor endpoints still label correctly (regression) ---
describe('detectProvider — direct vendor endpoints', () => {
test('api.deepseek.com labels as DeepSeek', () => {
setupOpenAIMode('https://api.deepseek.com/v1', 'deepseek-chat')
expect(detectProvider().name).toBe('DeepSeek')
})
test('api.moonshot.cn labels as Moonshot (Kimi)', () => {
setupOpenAIMode('https://api.moonshot.cn/v1', 'moonshot-v1-8k')
expect(detectProvider().name).toBe('Moonshot (Kimi)')
})
test('api.mistral.ai labels as Mistral', () => {
setupOpenAIMode('https://api.mistral.ai/v1', 'mistral-large-latest')
expect(detectProvider().name).toBe('Mistral')
})
test('default OpenAI URL + gpt-4o labels as OpenAI', () => {
setupOpenAIMode('https://api.openai.com/v1', 'gpt-4o')
expect(detectProvider().name).toBe('OpenAI')
})
})
// --- rawModel fallback for generic/custom endpoints ---
describe('detectProvider — rawModel fallback when URL is generic', () => {
test('custom proxy + deepseek-chat falls back to DeepSeek', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'deepseek-chat')
expect(detectProvider().name).toBe('DeepSeek')
})
test('custom proxy + kimi-k2 falls back to Moonshot (Kimi)', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'kimi-k2-instruct')
expect(detectProvider().name).toBe('Moonshot (Kimi)')
})
test('custom proxy + llama-3.3 falls back to Meta Llama', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'llama-3.3-70b')
expect(detectProvider().name).toBe('Meta Llama')
})
test('custom proxy + mistral-large falls back to Mistral', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'mistral-large-latest')
expect(detectProvider().name).toBe('Mistral')
})
})
// --- Explicit env flags win over URL heuristics ---
describe('detectProvider — explicit dedicated-provider env flags', () => {
test('NVIDIA_NIM=1 overrides aggregator URL', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'some-nim-model')
process.env.NVIDIA_NIM = '1'
expect(detectProvider().name).toBe('NVIDIA NIM')
})
test('MINIMAX_API_KEY overrides aggregator URL', () => {
setupOpenAIMode('https://openrouter.ai/api/v1', 'any-model')
process.env.MINIMAX_API_KEY = 'test-key'
expect(detectProvider().name).toBe('MiniMax')
})
})

View File

@@ -5,7 +5,7 @@
* Addresses: https://github.com/Gitlawb/openclaude/issues/55 * Addresses: https://github.com/Gitlawb/openclaude/issues/55
*/ */
import { isLocalProviderUrl, resolveProviderRequest } from '../services/api/providerConfig.js' import { isLocalProviderUrl } from '../services/api/providerConfig.js'
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js' import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
import { getSettings_DEPRECATED } from '../utils/settings/settings.js' import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
import { parseUserSpecifiedModel } from '../utils/model/model.js' import { parseUserSpecifiedModel } from '../utils/model/model.js'
@@ -83,7 +83,7 @@ const LOGO_CLAUDE = [
// ─── Provider detection ─────────────────────────────────────────────────────── // ─── Provider detection ───────────────────────────────────────────────────────
export function detectProvider(): { name: string; model: string; baseUrl: string; isLocal: boolean } { function detectProvider(): { name: string; model: string; baseUrl: string; isLocal: boolean } {
const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true' const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true'
const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true' const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true'
const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true' const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true'
@@ -110,46 +110,39 @@ export function detectProvider(): { name: string; model: string; baseUrl: string
if (useOpenAI) { if (useOpenAI) {
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o' const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const resolvedRequest = resolveProviderRequest({ const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
model: rawModel,
baseUrl: process.env.OPENAI_BASE_URL,
})
const baseUrl = resolvedRequest.baseUrl
const isLocal = isLocalProviderUrl(baseUrl) const isLocal = isLocalProviderUrl(baseUrl)
let name = 'OpenAI' let name = 'OpenAI'
// Explicit dedicated-provider env flags win. if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
if (process.env.NVIDIA_NIM) name = 'NVIDIA NIM'
else if (process.env.MINIMAX_API_KEY) name = 'MiniMax'
else if (
resolvedRequest.transport === 'codex_responses' ||
baseUrl.includes('chatgpt.com/backend-api/codex')
)
name = 'Codex'
// Base URL is authoritative — must precede rawModel checks so aggregators
// (OpenRouter/Together/Groq) aren't mislabelled as DeepSeek/Kimi/etc.
// when routed to models whose IDs contain a vendor prefix. See issue #855.
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter' else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
else if (/together/i.test(baseUrl)) name = 'Together AI' else if (/together/i.test(baseUrl)) name = 'Together AI'
else if (/groq/i.test(baseUrl)) name = 'Groq' else if (/groq/i.test(baseUrl)) name = 'Groq'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI' else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
else if (/nvidia/i.test(baseUrl)) name = 'NVIDIA NIM'
else if (/minimax/i.test(baseUrl)) name = 'MiniMax'
else if (/moonshot/i.test(baseUrl)) name = 'Moonshot (Kimi)'
else if (/deepseek/i.test(baseUrl)) name = 'DeepSeek'
else if (/mistral/i.test(baseUrl)) name = 'Mistral'
// rawModel fallback — fires only when base URL is generic/custom.
else if (/nvidia/i.test(rawModel)) name = 'NVIDIA NIM'
else if (/minimax/i.test(rawModel)) name = 'MiniMax'
else if (/kimi/i.test(rawModel)) name = 'Moonshot (Kimi)'
else if (/deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/mistral/i.test(rawModel)) name = 'Mistral'
else if (/llama/i.test(rawModel)) name = 'Meta Llama' else if (/llama/i.test(rawModel)) name = 'Meta Llama'
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl) else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
// Resolve model alias to actual model name + reasoning effort // Resolve model alias to actual model name + reasoning effort
let displayModel = resolvedRequest.resolvedModel let displayModel = rawModel
if (resolvedRequest.reasoning?.effort) { const codexAliases: Record<string, { model: string; reasoningEffort?: string }> = {
displayModel = `${displayModel} (${resolvedRequest.reasoning.effort})` codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' },
'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' },
codexspark: { model: 'gpt-5.3-codex-spark' },
'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' },
'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' },
'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' },
'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' },
'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' },
}
const alias = rawModel.toLowerCase()
if (alias in codexAliases) {
const resolved = codexAliases[alias]
displayModel = resolved.model
if (resolved.reasoningEffort) {
displayModel = `${displayModel} (${resolved.reasoningEffort})`
}
} }
return { name, model: displayModel, baseUrl, isLocal } return { name, model: displayModel, baseUrl, isLocal }
@@ -159,9 +152,7 @@ export function detectProvider(): { name: string; model: string; baseUrl: string
const settings = getSettings_DEPRECATED() || {} const settings = getSettings_DEPRECATED() || {}
const modelSetting = settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6' const modelSetting = settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
const resolvedModel = parseUserSpecifiedModel(modelSetting) const resolvedModel = parseUserSpecifiedModel(modelSetting)
const baseUrl = process.env.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com' return { name: 'Anthropic', model: resolvedModel, baseUrl: 'https://api.anthropic.com', isLocal: false }
const isLocal = isLocalProviderUrl(baseUrl)
return { name: 'Anthropic', model: resolvedModel, baseUrl, isLocal }
} }
// ─── Box drawing ────────────────────────────────────────────────────────────── // ─── Box drawing ──────────────────────────────────────────────────────────────

View File

@@ -6,7 +6,6 @@ import stripAnsi from 'strip-ansi'
import { createRoot } from '../ink.js' import { createRoot } from '../ink.js'
import { AppStateProvider } from '../state/AppState.js' import { AppStateProvider } from '../state/AppState.js'
import { maskTextWithVisibleEdges } from '../utils/Cursor.js'
import TextInput from './TextInput.js' import TextInput from './TextInput.js'
import VimTextInput from './VimTextInput.js' import VimTextInput from './VimTextInput.js'
@@ -200,13 +199,6 @@ test('TextInput renders typed characters before delayed parent value commits', a
expect(output).not.toContain('Type here...') expect(output).not.toContain('Type here...')
}) })
test('maskTextWithVisibleEdges preserves only the first and last three chars', () => {
expect(maskTextWithVisibleEdges('sk-secret-12345678', '*')).toBe(
'sk-************678',
)
expect(maskTextWithVisibleEdges('abcdef', '*')).toBe('******')
})
test('VimTextInput preserves rapid typed characters before delayed parent value commits', async () => { test('VimTextInput preserves rapid typed characters before delayed parent value commits', async () => {
const { stdout, stdin, getOutput } = createTestStreams() const { stdout, stdin, getOutput } = createTestStreams()
const root = await createRoot({ const root = await createRoot({

View File

@@ -1,4 +1,3 @@
import { c as _c } from "react-compiler-runtime";
import { feature } from 'bun:bundle'; import { feature } from 'bun:bundle';
import React, { createContext, useContext, useEffect, useMemo, useState } from 'react'; import React, { createContext, useContext, useEffect, useMemo, useState } from 'react';
import useStdin from '../../ink/hooks/use-stdin.js'; import useStdin from '../../ink/hooks/use-stdin.js';
@@ -120,21 +119,8 @@ export function ThemeProvider({
* accepts any ThemeSetting (including 'auto'). * accepts any ThemeSetting (including 'auto').
*/ */
export function useTheme() { export function useTheme() {
const $ = _c(3); const { currentTheme, setThemeSetting } = useContext(ThemeContext);
const { return [currentTheme, setThemeSetting] as const;
currentTheme,
setThemeSetting
} = useContext(ThemeContext);
let t0;
if ($[0] !== currentTheme || $[1] !== setThemeSetting) {
t0 = [currentTheme, setThemeSetting];
$[0] = currentTheme;
$[1] = setThemeSetting;
$[2] = t0;
} else {
t0 = $[2];
}
return t0;
} }
/** /**
@@ -145,25 +131,10 @@ export function useThemeSetting() {
return useContext(ThemeContext).themeSetting; return useContext(ThemeContext).themeSetting;
} }
export function usePreviewTheme() { export function usePreviewTheme() {
const $ = _c(4); const { setPreviewTheme, savePreview, cancelPreview } = useContext(ThemeContext);
const { return {
setPreviewTheme, setPreviewTheme,
savePreview, savePreview,
cancelPreview cancelPreview,
} = useContext(ThemeContext);
let t0;
if ($[0] !== cancelPreview || $[1] !== savePreview || $[2] !== setPreviewTheme) {
t0 = {
setPreviewTheme,
savePreview,
cancelPreview
}; };
$[0] = cancelPreview;
$[1] = savePreview;
$[2] = setPreviewTheme;
$[3] = t0;
} else {
t0 = $[3];
}
return t0;
} }

View File

@@ -2,7 +2,7 @@ import { c as _c } from "react-compiler-runtime";
import { feature } from 'bun:bundle'; import { feature } from 'bun:bundle';
import chalk from 'chalk'; import chalk from 'chalk';
import { mkdir } from 'fs/promises'; import { mkdir } from 'fs/promises';
import { basename, join } from 'path'; import { join } from 'path';
import * as React from 'react'; import * as React from 'react';
import { use, useEffect, useState } from 'react'; import { use, useEffect, useState } from 'react';
import { getOriginalCwd } from '../../bootstrap/state.js'; import { getOriginalCwd } from '../../bootstrap/state.js';
@@ -24,7 +24,6 @@ import { projectIsInGitRepo } from '../../utils/memory/versions.js';
import { updateSettingsForSource } from '../../utils/settings/settings.js'; import { updateSettingsForSource } from '../../utils/settings/settings.js';
import { Select } from '../CustomSelect/index.js'; import { Select } from '../CustomSelect/index.js';
import { ListItem } from '../design-system/ListItem.js'; import { ListItem } from '../design-system/ListItem.js';
import { getProjectMemoryPathForSelector } from './memoryFileSelectorPaths.js';
/* eslint-disable @typescript-eslint/no-require-imports */ /* eslint-disable @typescript-eslint/no-require-imports */
const teamMemPaths = feature('TEAMMEM') ? require('../../memdir/teamMemPaths.js') as typeof import('../../memdir/teamMemPaths.js') : null; const teamMemPaths = feature('TEAMMEM') ? require('../../memdir/teamMemPaths.js') as typeof import('../../memdir/teamMemPaths.js') : null;
@@ -49,10 +48,8 @@ export function MemoryFileSelector(t0) {
onCancel onCancel
} = t0; } = t0;
const existingMemoryFiles = use(getMemoryFiles()); const existingMemoryFiles = use(getMemoryFiles());
const originalCwd = getOriginalCwd();
const userMemoryPath = join(getClaudeConfigHomeDir(), "CLAUDE.md"); const userMemoryPath = join(getClaudeConfigHomeDir(), "CLAUDE.md");
const projectMemoryPath = getProjectMemoryPathForSelector(existingMemoryFiles, originalCwd); const projectMemoryPath = join(getOriginalCwd(), "CLAUDE.md");
const projectMemoryFileName = basename(projectMemoryPath);
const hasUserMemory = existingMemoryFiles.some(f => f.path === userMemoryPath); const hasUserMemory = existingMemoryFiles.some(f => f.path === userMemoryPath);
const hasProjectMemory = existingMemoryFiles.some(f_0 => f_0.path === projectMemoryPath); const hasProjectMemory = existingMemoryFiles.some(f_0 => f_0.path === projectMemoryPath);
const allMemoryFiles = [...existingMemoryFiles.filter(_temp).map(_temp2), ...(hasUserMemory ? [] : [{ const allMemoryFiles = [...existingMemoryFiles.filter(_temp).map(_temp2), ...(hasUserMemory ? [] : [{
@@ -88,12 +85,12 @@ export function MemoryFileSelector(t0) {
} }
} }
let description; let description;
const isGit = projectIsInGitRepo(originalCwd); const isGit = projectIsInGitRepo(getOriginalCwd());
if (file.type === "User" && !file.isNested) { if (file.type === "User" && !file.isNested) {
description = "Saved in ~/.claude/CLAUDE.md"; description = "Saved in ~/.claude/CLAUDE.md";
} else { } else {
if (file.type === "Project" && !file.isNested && file.path === projectMemoryPath) { if (file.type === "Project" && !file.isNested && file.path === projectMemoryPath) {
description = `${isGit ? "Checked in at" : "Saved in"} ./${projectMemoryFileName}`; description = `${isGit ? "Checked in at" : "Saved in"} ./CLAUDE.md`;
} else { } else {
if (file.parent) { if (file.parent) {
description = "@-imported"; description = "@-imported";

View File

@@ -1,72 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { join } from 'node:path'
import type { MemoryFileInfo } from '../../utils/claudemd.js'
import { getProjectMemoryPathForSelector } from './memoryFileSelectorPaths.js'
function projectFile(path: string): MemoryFileInfo {
return {
path,
type: 'Project',
content: '',
}
}
describe('getProjectMemoryPathForSelector', () => {
test('uses the loaded repo-level AGENTS.md from a nested cwd', () => {
const repoDir = '/repo'
const nestedDir = join(repoDir, 'packages', 'app')
expect(
getProjectMemoryPathForSelector(
[projectFile(join(repoDir, 'AGENTS.md'))],
nestedDir,
),
).toBe(join(repoDir, 'AGENTS.md'))
})
test('uses the loaded repo-level CLAUDE.md fallback from a nested cwd', () => {
const repoDir = '/repo'
const nestedDir = join(repoDir, 'packages', 'app')
expect(
getProjectMemoryPathForSelector(
[projectFile(join(repoDir, 'CLAUDE.md'))],
nestedDir,
),
).toBe(join(repoDir, 'CLAUDE.md'))
})
test('prefers the closest loaded ancestor instruction file', () => {
const repoDir = '/repo'
const nestedProjectDir = join(repoDir, 'packages', 'app')
expect(
getProjectMemoryPathForSelector(
[
projectFile(join(repoDir, 'AGENTS.md')),
projectFile(join(nestedProjectDir, 'CLAUDE.md')),
],
join(nestedProjectDir, 'src'),
),
).toBe(join(nestedProjectDir, 'CLAUDE.md'))
})
test('defaults to a new AGENTS.md in the current cwd when no project file is loaded', () => {
const cwd = join('/repo', 'packages', 'app')
expect(getProjectMemoryPathForSelector([], cwd)).toBe(
join(cwd, 'AGENTS.md'),
)
})
test('ignores loaded project instruction files outside the current cwd ancestry', () => {
const outsideRepoPath = join('/other-worktree', 'AGENTS.md')
const cwd = join('/repo', 'packages', 'app')
expect(
getProjectMemoryPathForSelector(
[projectFile(outsideRepoPath)],
cwd,
),
).toBe(join(cwd, 'AGENTS.md'))
})
})

View File

@@ -1,34 +0,0 @@
import { basename, join } from 'path'
import type { MemoryFileInfo } from '../../utils/claudemd.js'
import {
findProjectInstructionFilePathInAncestors,
isProjectInstructionFileName,
PRIMARY_PROJECT_INSTRUCTION_FILE,
} from '../../utils/projectInstructions.js'
function isLoadedProjectInstructionFile(file: MemoryFileInfo): boolean {
return (
file.type === 'Project' &&
file.parent === undefined &&
isProjectInstructionFileName(basename(file.path))
)
}
export function getProjectMemoryPathForSelector(
existingMemoryFiles: MemoryFileInfo[],
cwd: string,
): string {
const loadedProjectInstructionPaths = new Set(
existingMemoryFiles
.filter(isLoadedProjectInstructionFile)
.map(file => file.path),
)
return (
findProjectInstructionFilePathInAncestors(
cwd,
path => loadedProjectInstructionPaths.has(path),
) ?? join(cwd, PRIMARY_PROJECT_INSTRUCTION_FILE)
)
}

View File

@@ -1,173 +0,0 @@
import React from 'react'
import { getOriginalCwd } from '../../../bootstrap/state.js'
import { Box, Text } from '../../../ink.js'
import { sanitizeToolNameForAnalytics } from '../../../services/analytics/metadata.js'
import { env } from '../../../utils/env.js'
import { shouldShowAlwaysAllowOptions } from '../../../utils/permissions/permissionsLoader.js'
import { usePermissionRequestLogging } from '../hooks.js'
import { PermissionDialog } from '../PermissionDialog.js'
import {
PermissionPrompt,
type PermissionPromptOption,
} from '../PermissionPrompt.js'
import type { PermissionRequestProps } from '../PermissionRequest.js'
import { PermissionRuleExplanation } from '../PermissionRuleExplanation.js'
import { logUnaryPermissionEvent } from '../utils.js'
type OptionValue = 'yes' | 'yes-dont-ask-again' | 'no'
export function MonitorPermissionRequest({
toolUseConfirm,
onDone,
onReject,
workerBadge,
}: PermissionRequestProps) {
const { command, description } = toolUseConfirm.input as {
command?: string
description?: string
}
usePermissionRequestLogging(toolUseConfirm, {
completion_type: 'tool_use_single',
language_name: 'none',
})
const handleSelect = (
value: OptionValue,
feedback?: string,
) => {
switch (value) {
case 'yes': {
logUnaryPermissionEvent({
completion_type: 'tool_use_single',
event: 'accept',
metadata: {
language_name: 'none',
message_id: toolUseConfirm.assistantMessage.message.id,
platform: env.platform,
},
})
toolUseConfirm.onAllow(toolUseConfirm.input, [], feedback)
onDone()
break
}
case 'yes-dont-ask-again': {
logUnaryPermissionEvent({
completion_type: 'tool_use_single',
event: 'accept',
metadata: {
language_name: 'none',
message_id: toolUseConfirm.assistantMessage.message.id,
platform: env.platform,
},
})
// Save the rule under 'Bash' toolName because checkPermissions
// delegates to bashToolHasPermission which matches rules against
// BashTool. Using 'Monitor' here would create a rule that's never
// checked. Command-specific prefix (like BashTool's shellRuleMatching).
const cmdForRule = command?.trim() || ''
const prefix = cmdForRule.split(/\s+/).slice(0, 2).join(' ')
toolUseConfirm.onAllow(toolUseConfirm.input, prefix ? [
{
type: 'addRules',
rules: [{ toolName: 'Bash', ruleContent: `${prefix}:*` }],
behavior: 'allow',
destination: 'localSettings',
},
] : [])
onDone()
break
}
case 'no': {
logUnaryPermissionEvent({
completion_type: 'tool_use_single',
event: 'reject',
metadata: {
language_name: 'none',
message_id: toolUseConfirm.assistantMessage.message.id,
platform: env.platform,
},
})
toolUseConfirm.onReject(feedback)
onReject()
onDone()
break
}
}
}
const handleCancel = () => {
logUnaryPermissionEvent({
completion_type: 'tool_use_single',
event: 'reject',
metadata: {
language_name: 'none',
message_id: toolUseConfirm.assistantMessage.message.id,
platform: env.platform,
},
})
toolUseConfirm.onReject()
onReject()
onDone()
}
const showAlwaysAllow = shouldShowAlwaysAllowOptions()
const originalCwd = getOriginalCwd()
const options: PermissionPromptOption<OptionValue>[] = [
{
label: 'Yes',
value: 'yes',
feedbackConfig: { type: 'accept' },
},
]
if (showAlwaysAllow) {
options.push({
label: (
<Text>
Yes, and don&apos;t ask again for{' '}
<Text bold>Monitor</Text> commands in{' '}
<Text bold>{originalCwd}</Text>
</Text>
),
value: 'yes-dont-ask-again',
})
}
options.push({
label: 'No',
value: 'no',
feedbackConfig: { type: 'reject' },
})
const toolAnalyticsContext = {
toolName: sanitizeToolNameForAnalytics(toolUseConfirm.tool.name),
isMcp: toolUseConfirm.tool.isMcp ?? false,
}
return (
<PermissionDialog title="Monitor" workerBadge={workerBadge}>
<Box flexDirection="column" paddingX={2} paddingY={1}>
<Text>
Monitor({command ?? ''})
</Text>
{description ? (
<Text dimColor>{description}</Text>
) : null}
</Box>
<Box flexDirection="column">
<PermissionRuleExplanation
permissionResult={toolUseConfirm.permissionResult}
toolType="tool"
/>
<PermissionPrompt
options={options}
onSelect={handleSelect}
onCancel={handleCancel}
toolAnalyticsContext={toolAnalyticsContext}
/>
</Box>
</PermissionDialog>
)
}

View File

@@ -1,220 +0,0 @@
import { PassThrough } from 'node:stream'
import { afterEach, expect, mock, test } from 'bun:test'
import React from 'react'
import { createRoot, Text } from '../ink.js'
const SYNC_START = '\x1B[?2026h'
const SYNC_END = '\x1B[?2026l'
function createTestStreams(): {
stdout: PassThrough
stdin: PassThrough & {
isTTY: boolean
setRawMode: (mode: boolean) => void
ref: () => void
unref: () => void
}
getOutput: () => string
} {
let output = ''
const stdout = new PassThrough()
const stdin = new PassThrough() as PassThrough & {
isTTY: boolean
setRawMode: (mode: boolean) => void
ref: () => void
unref: () => void
}
stdin.isTTY = true
stdin.setRawMode = () => {}
stdin.ref = () => {}
stdin.unref = () => {}
;(stdout as unknown as { columns: number }).columns = 120
stdout.on('data', chunk => {
output += chunk.toString()
})
return {
stdout,
stdin,
getOutput: () => output,
}
}
async function waitForCondition(
predicate: () => boolean,
options?: { timeoutMs?: number; intervalMs?: number },
): Promise<void> {
const timeoutMs = options?.timeoutMs ?? 5000
const intervalMs = options?.intervalMs ?? 10
const startedAt = Date.now()
while (Date.now() - startedAt < timeoutMs) {
if (predicate()) {
return
}
await Bun.sleep(intervalMs)
}
throw new Error('Timed out waiting for useCodexOAuthFlow test condition')
}
function extractLastFrame(output: string): string {
let lastFrame: string | null = null
let cursor = 0
while (cursor < output.length) {
const start = output.indexOf(SYNC_START, cursor)
if (start === -1) break
const contentStart = start + SYNC_START.length
const end = output.indexOf(SYNC_END, contentStart)
if (end === -1) break
const frame = output.slice(contentStart, end)
if (frame.trim().length > 0) {
lastFrame = frame
}
cursor = end + SYNC_END.length
}
return lastFrame ?? output
}
const TOKENS = {
accessToken: 'oauth-access-token',
refreshToken: 'oauth-refresh-token',
accountId: 'acct_oauth',
idToken: 'oauth-id-token',
apiKey: 'oauth-api-key',
}
afterEach(() => {
mock.restore()
})
test('does not persist credentials when downstream setup rejects', async () => {
const saveCodexCredentials = mock(() => ({ success: true }))
const cleanup = mock(() => {})
const onAuthenticated = mock(async () => {
throw new Error('profile save failed')
})
const deps = {
createOAuthService: () => ({
async startOAuthFlow(
onAuthorizationUrl: (authUrl: string) => void | Promise<void>,
) {
await onAuthorizationUrl('https://chatgpt.com/codex')
return TOKENS
},
cleanup,
}),
openBrowser: async () => true,
saveCodexCredentials,
isBareMode: () => false,
}
const { useCodexOAuthFlow } = await import(
`./useCodexOAuthFlow.js?real-reject-${Date.now()}-${Math.random()}`
)
function Harness(): React.ReactNode {
const handleAuthenticated = React.useCallback(onAuthenticated, [onAuthenticated])
const status = useCodexOAuthFlow({
onAuthenticated: handleAuthenticated,
deps,
})
return <Text>{status.state === 'error' ? status.message : status.state}</Text>
}
const streams = createTestStreams()
const root = await createRoot({
stdout: streams.stdout as unknown as NodeJS.WriteStream,
stdin: streams.stdin as unknown as NodeJS.ReadStream,
patchConsole: false,
})
root.render(<Harness />)
try {
await waitForCondition(() => onAuthenticated.mock.calls.length === 1)
await Bun.sleep(0)
await Bun.sleep(0)
expect(onAuthenticated).toHaveBeenCalled()
expect(saveCodexCredentials).not.toHaveBeenCalled()
} finally {
root.unmount()
streams.stdin.end()
streams.stdout.end()
await Bun.sleep(0)
}
})
test('persists credentials with profile linkage after downstream setup succeeds', async () => {
const saveCodexCredentials = mock(() => ({ success: true }))
const onAuthenticated = mock(
async (
_tokens: typeof TOKENS,
persistCredentials: (options?: { profileId?: string }) => void,
) => {
persistCredentials({ profileId: 'profile_codex_oauth' })
},
)
const cleanup = mock(() => {})
const deps = {
createOAuthService: () => ({
async startOAuthFlow(
onAuthorizationUrl: (authUrl: string) => void | Promise<void>,
) {
await onAuthorizationUrl('https://chatgpt.com/codex')
return TOKENS
},
cleanup,
}),
openBrowser: async () => true,
saveCodexCredentials,
isBareMode: () => false,
}
const { useCodexOAuthFlow } = await import(
`./useCodexOAuthFlow.js?real-persist-${Date.now()}-${Math.random()}`
)
function Harness(): React.ReactNode {
const handleAuthenticated = React.useCallback(onAuthenticated, [onAuthenticated])
useCodexOAuthFlow({
onAuthenticated: handleAuthenticated,
deps,
})
return <Text>waiting</Text>
}
const streams = createTestStreams()
const root = await createRoot({
stdout: streams.stdout as unknown as NodeJS.WriteStream,
stdin: streams.stdin as unknown as NodeJS.ReadStream,
patchConsole: false,
})
root.render(<Harness />)
try {
await waitForCondition(() => onAuthenticated.mock.calls.length === 1)
await waitForCondition(() => saveCodexCredentials.mock.calls.length === 1)
expect(onAuthenticated).toHaveBeenCalled()
expect(saveCodexCredentials).toHaveBeenCalledWith({
apiKey: TOKENS.apiKey,
accessToken: TOKENS.accessToken,
refreshToken: TOKENS.refreshToken,
idToken: TOKENS.idToken,
accountId: TOKENS.accountId,
profileId: 'profile_codex_oauth',
})
} finally {
root.unmount()
streams.stdin.end()
streams.stdout.end()
await Bun.sleep(0)
}
})

View File

@@ -1,134 +0,0 @@
import * as React from 'react'
import {
CodexOAuthService,
type CodexOAuthTokens,
} from '../services/api/codexOAuth.js'
import { openBrowser } from '../utils/browser.js'
import { saveCodexCredentials } from '../utils/codexCredentials.js'
import { isBareMode } from '../utils/envUtils.js'
export type CodexOAuthFlowStatus =
| { state: 'starting' }
| {
state: 'waiting'
authUrl: string
browserOpened: boolean | null
}
| {
state: 'error'
message: string
}
type PersistCodexOAuthCredentials = (options?: {
profileId?: string
}) => void
type CodexOAuthFlowDependencies = {
createOAuthService?: () => Pick<
CodexOAuthService,
'startOAuthFlow' | 'cleanup'
>
openBrowser?: typeof openBrowser
saveCodexCredentials?: typeof saveCodexCredentials
isBareMode?: typeof isBareMode
}
function createDefaultOAuthService(): Pick<
CodexOAuthService,
'startOAuthFlow' | 'cleanup'
> {
return new CodexOAuthService()
}
export function useCodexOAuthFlow(options: {
onAuthenticated: (
tokens: CodexOAuthTokens,
persistCredentials: PersistCodexOAuthCredentials,
) => void | Promise<void>
deps?: CodexOAuthFlowDependencies
}): CodexOAuthFlowStatus {
const { onAuthenticated } = options
const createOAuthService =
options.deps?.createOAuthService ?? createDefaultOAuthService
const openBrowserFn = options.deps?.openBrowser ?? openBrowser
const saveCredentials =
options.deps?.saveCodexCredentials ?? saveCodexCredentials
const isBareModeFn = options.deps?.isBareMode ?? isBareMode
const [status, setStatus] = React.useState<CodexOAuthFlowStatus>({
state: 'starting',
})
React.useEffect(() => {
if (isBareModeFn()) {
setStatus({
state: 'error',
message:
'Codex OAuth is unavailable in --bare because secure storage is disabled.',
})
return
}
let cancelled = false
const oauthService = createOAuthService()
void oauthService
.startOAuthFlow(async authUrl => {
if (cancelled) return
setStatus({
state: 'waiting',
authUrl,
browserOpened: null,
})
const browserOpened = await openBrowserFn(authUrl)
if (cancelled) return
setStatus({
state: 'waiting',
authUrl,
browserOpened,
})
})
.then(async tokens => {
if (cancelled) return
const persistCredentials: PersistCodexOAuthCredentials = options => {
const saved = saveCredentials({
apiKey: tokens.apiKey,
accessToken: tokens.accessToken,
refreshToken: tokens.refreshToken,
idToken: tokens.idToken,
accountId: tokens.accountId,
profileId: options?.profileId,
})
if (!saved.success) {
throw new Error(
saved.warning ??
'Codex OAuth succeeded, but credentials could not be saved securely.',
)
}
}
await onAuthenticated(tokens, persistCredentials)
})
.catch(error => {
if (cancelled) return
setStatus({
state: 'error',
message: error instanceof Error ? error.message : String(error),
})
})
return () => {
cancelled = true
oauthService.cleanup()
}
}, [
createOAuthService,
isBareModeFn,
onAuthenticated,
openBrowserFn,
saveCredentials,
])
return status
}

View File

@@ -1,16 +1,5 @@
import { afterEach, expect, test } from 'bun:test' import { afterEach, expect, test } from 'bun:test'
// MACRO is replaced at build time by Bun.define but not in test mode.
// Define it globally so tests that import modules using MACRO don't crash.
;(globalThis as Record<string, unknown>).MACRO = {
VERSION: '99.0.0',
DISPLAY_VERSION: '0.0.0-test',
BUILD_TIME: new Date().toISOString(),
ISSUES_EXPLAINER: 'report the issue at https://github.com/anthropics/claude-code/issues',
PACKAGE_URL: '@gitlawb/openclaude',
NATIVE_PACKAGE_URL: undefined,
}
import { getSystemPrompt, DEFAULT_AGENT_PROMPT } from './prompts.js' import { getSystemPrompt, DEFAULT_AGENT_PROMPT } from './prompts.js'
import { CLI_SYSPROMPT_PREFIXES, getCLISyspromptPrefix } from './system.js' import { CLI_SYSPROMPT_PREFIXES, getCLISyspromptPrefix } from './system.js'
import { CLAUDE_CODE_GUIDE_AGENT } from '../tools/AgentTool/built-in/claudeCodeGuideAgent.js' import { CLAUDE_CODE_GUIDE_AGENT } from '../tools/AgentTool/built-in/claudeCodeGuideAgent.js'

View File

@@ -823,11 +823,6 @@ function getFunctionResultClearingSection(model: string): string | null {
return null return null
} }
const config = getCachedMCConfigForFRC() const config = getCachedMCConfigForFRC()
if (!config) {
// External/stub builds return null from getCachedMCConfig — abort the
// section rather than trying to read .supportedModels off null.
return null
}
const isModelSupported = config.supportedModels?.some(pattern => const isModelSupported = config.supportedModels?.some(pattern =>
model.includes(pattern), model.includes(pattern),
) )

View File

@@ -37,6 +37,8 @@ export const ALL_AGENT_DISALLOWED_TOOLS = new Set([
TASK_OUTPUT_TOOL_NAME, TASK_OUTPUT_TOOL_NAME,
EXIT_PLAN_MODE_V2_TOOL_NAME, EXIT_PLAN_MODE_V2_TOOL_NAME,
ENTER_PLAN_MODE_TOOL_NAME, ENTER_PLAN_MODE_TOOL_NAME,
// Allow Agent tool for agents when user is ant (enables nested agents)
...(process.env.USER_TYPE === 'ant' ? [] : [AGENT_TOOL_NAME]),
ASK_USER_QUESTION_TOOL_NAME, ASK_USER_QUESTION_TOOL_NAME,
TASK_STOP_TOOL_NAME, TASK_STOP_TOOL_NAME,
// Prevent recursive workflow execution inside subagents. // Prevent recursive workflow execution inside subagents.
@@ -80,9 +82,9 @@ export const IN_PROCESS_TEAMMATE_ALLOWED_TOOLS = new Set([
SEND_MESSAGE_TOOL_NAME, SEND_MESSAGE_TOOL_NAME,
// Teammate-created crons are tagged with the creating agentId and routed to // Teammate-created crons are tagged with the creating agentId and routed to
// that teammate's pendingUserMessages queue (see useScheduledTasks.ts). // that teammate's pendingUserMessages queue (see useScheduledTasks.ts).
CRON_CREATE_TOOL_NAME, ...(feature('AGENT_TRIGGERS')
CRON_DELETE_TOOL_NAME, ? [CRON_CREATE_TOOL_NAME, CRON_DELETE_TOOL_NAME, CRON_LIST_TOOL_NAME]
CRON_LIST_TOOL_NAME, : []),
]) ])
/* /*

View File

@@ -1,18 +0,0 @@
import type { BuiltInAgentDefinition } from '../tools/AgentTool/loadAgentsDir.js'
import { EXPLORE_AGENT } from '../tools/AgentTool/built-in/exploreAgent.js'
import { GENERAL_PURPOSE_AGENT } from '../tools/AgentTool/built-in/generalPurposeAgent.js'
import { PLAN_AGENT } from '../tools/AgentTool/built-in/planAgent.js'
// The coordinator system prompt instructs the model to spawn workers with
// subagent_type: "worker". This agent definition matches that type so
// AgentTool.tsx can resolve it. It reuses GENERAL_PURPOSE_AGENT's capabilities.
const WORKER_AGENT: BuiltInAgentDefinition = {
...GENERAL_PURPOSE_AGENT,
agentType: 'worker',
whenToUse:
'Worker agent for coordinator mode. Executes tasks autonomously — research, implementation, or verification.',
}
export function getCoordinatorAgents(): BuiltInAgentDefinition[] {
return [WORKER_AGENT, GENERAL_PURPOSE_AGENT, EXPLORE_AGENT, PLAN_AGENT]
}

View File

@@ -5,7 +5,7 @@ import {
} from '../utils/providerProfile.js' } from '../utils/providerProfile.js'
import { import {
getProviderValidationError, getProviderValidationError,
validateProviderEnvForStartupOrExit, validateProviderEnvOrExit,
} from '../utils/providerValidation.js' } from '../utils/providerValidation.js'
// OpenClaude: polyfill globalThis.File for Node < 20. // OpenClaude: polyfill globalThis.File for Node < 20.
@@ -132,7 +132,7 @@ async function main(): Promise<void> {
hydrateGithubModelsTokenFromSecureStorage() hydrateGithubModelsTokenFromSecureStorage()
} }
await validateProviderEnvForStartupOrExit() await validateProviderEnvOrExit()
// Print the gradient startup screen before the Ink UI loads // Print the gradient startup screen before the Ink UI loads
const { printStartupScreen } = await import('../components/StartupScreen.js') const { printStartupScreen } = await import('../components/StartupScreen.js')

View File

@@ -1,75 +0,0 @@
import { describe, it, expect, mock } from 'bun:test'
import { getCombinedTools, loadReexposedMcpTools } from './mcp.js'
import type { Tool as InternalTool } from '../Tool.js'
import type { MCPServerConnection } from '../services/mcp/types.js'
import type { Tool } from '@modelcontextprotocol/sdk/types.js'
// Mock the MCP client service to control the tools and connections returned
const mockGetMcpToolsCommandsAndResources = mock(async (onConnectionAttempt: any) => {})
mock.module('../services/mcp/client.js', () => ({
getMcpToolsCommandsAndResources: mockGetMcpToolsCommandsAndResources
}))
describe('getCombinedTools', () => {
it('deduplicates builtins when mcpTools have the same name, prioritizing mcpTools', () => {
const builtinBash = { name: 'Bash', isMcp: false } as unknown as InternalTool
const builtinRead = { name: 'Read', isMcp: false } as unknown as InternalTool
const mcpBash = { name: 'Bash', isMcp: true } as unknown as InternalTool
const builtins = [builtinBash, builtinRead]
const mcpTools = [mcpBash]
const result = getCombinedTools(builtins, mcpTools)
expect(result).toHaveLength(2)
expect(result[0]).toBe(mcpBash)
expect(result[1]).toBe(builtinRead)
})
})
describe('loadReexposedMcpTools', () => {
it('loads tools and clients regardless of connection state (including needs-auth)', async () => {
// Setup the mock to simulate yielding a needs-auth server and a connected server
mockGetMcpToolsCommandsAndResources.mockImplementation(async (onConnectionAttempt) => {
const needsAuthClient = {
name: 'auth-server',
type: 'needs-auth',
config: {}
} as MCPServerConnection
const authTool = {
name: 'mcp__auth-server__authenticate',
isMcp: true
} as unknown as InternalTool
const connectedClient = {
name: 'connected-server',
type: 'connected',
config: {},
client: {}
} as MCPServerConnection
const connectedTool = {
name: 'mcp__connected-server__do_thing',
isMcp: true
} as unknown as InternalTool
// Simulate the callback behavior
onConnectionAttempt({ client: needsAuthClient, tools: [authTool], commands: [] })
onConnectionAttempt({ client: connectedClient, tools: [connectedTool], commands: [] })
})
const { mcpClients, mcpTools } = await loadReexposedMcpTools()
expect(mcpClients).toHaveLength(2)
expect(mcpClients[0].type).toBe('needs-auth')
expect(mcpClients[1].type).toBe('connected')
expect(mcpTools).toHaveLength(2)
expect(mcpTools[0].name).toBe('mcp__auth-server__authenticate')
expect(mcpTools[1].name).toBe('mcp__connected-server__do_thing')
// Reset mock for other tests
mockGetMcpToolsCommandsAndResources.mockReset()
})
})

View File

@@ -7,7 +7,6 @@ process.env.CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS ??= 'true'
import { Server } from '@modelcontextprotocol/sdk/server/index.js' import { Server } from '@modelcontextprotocol/sdk/server/index.js'
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js' import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'
import { ZodError } from 'zod'
import { import {
CallToolRequestSchema, CallToolRequestSchema,
type CallToolResult, type CallToolResult,
@@ -18,12 +17,9 @@ import {
import { getDefaultAppState } from 'src/state/AppStateStore.js' import { getDefaultAppState } from 'src/state/AppStateStore.js'
import review from '../commands/review.js' import review from '../commands/review.js'
import type { Command } from '../commands.js' import type { Command } from '../commands.js'
import { getMcpToolsCommandsAndResources } from '../services/mcp/client.js'
import type { MCPServerConnection } from '../services/mcp/types.js'
import { import {
findToolByName, findToolByName,
getEmptyToolPermissionContext, getEmptyToolPermissionContext,
type Tool as InternalTool,
type ToolUseContext, type ToolUseContext,
} from '../Tool.js' } from '../Tool.js'
import { getTools } from '../tools.js' import { getTools } from '../tools.js'
@@ -43,32 +39,6 @@ type ToolOutput = Tool['outputSchema']
const MCP_COMMANDS: Command[] = [review] const MCP_COMMANDS: Command[] = [review]
export function getCombinedTools(
builtins: InternalTool[],
mcpTools: InternalTool[],
): InternalTool[] {
const mcpToolNames = new Set(mcpTools.map(t => t.name))
const deduplicatedBuiltins = builtins.filter(t => !mcpToolNames.has(t.name))
return [...mcpTools, ...deduplicatedBuiltins]
}
export async function loadReexposedMcpTools(): Promise<{
mcpClients: MCPServerConnection[]
mcpTools: InternalTool[]
}> {
const mcpClients: MCPServerConnection[] = []
const mcpTools: InternalTool[] = []
// Load configured MCP clients and their tools
await getMcpToolsCommandsAndResources(({ client, tools: clientTools }) => {
mcpClients.push(client)
mcpTools.push(...clientTools)
})
return { mcpClients, mcpTools }
}
export async function startMCPServer( export async function startMCPServer(
cwd: string, cwd: string,
debug: boolean, debug: boolean,
@@ -93,13 +63,12 @@ export async function startMCPServer(
}, },
) )
const { mcpClients, mcpTools } = await loadReexposedMcpTools()
server.setRequestHandler( server.setRequestHandler(
ListToolsRequestSchema, ListToolsRequestSchema,
async (): Promise<ListToolsResult> => { async (): Promise<ListToolsResult> => {
// TODO: Also re-expose any MCP tools
const toolPermissionContext = getEmptyToolPermissionContext() const toolPermissionContext = getEmptyToolPermissionContext()
const tools = getCombinedTools(getTools(toolPermissionContext), mcpTools) const tools = getTools(toolPermissionContext)
return { return {
tools: await Promise.all( tools: await Promise.all(
tools.map(async tool => { tools.map(async tool => {
@@ -125,7 +94,7 @@ export async function startMCPServer(
tools, tools,
agents: [], agents: [],
}), }),
inputSchema: (tool.inputJSONSchema ?? zodToJsonSchema(tool.inputSchema)) as ToolInput, inputSchema: zodToJsonSchema(tool.inputSchema) as ToolInput,
outputSchema, outputSchema,
} }
}), }),
@@ -138,7 +107,8 @@ export async function startMCPServer(
CallToolRequestSchema, CallToolRequestSchema,
async ({ params: { name, arguments: args } }): Promise<CallToolResult> => { async ({ params: { name, arguments: args } }): Promise<CallToolResult> => {
const toolPermissionContext = getEmptyToolPermissionContext() const toolPermissionContext = getEmptyToolPermissionContext()
const tools = getCombinedTools(getTools(toolPermissionContext), mcpTools) // TODO: Also re-expose any MCP tools
const tools = getTools(toolPermissionContext)
const tool = findToolByName(tools, name) const tool = findToolByName(tools, name)
if (!tool) { if (!tool) {
throw new Error(`Tool ${name} not found`) throw new Error(`Tool ${name} not found`)
@@ -153,7 +123,7 @@ export async function startMCPServer(
tools, tools,
mainLoopModel: getMainLoopModel(), mainLoopModel: getMainLoopModel(),
thinkingConfig: { type: 'disabled' }, thinkingConfig: { type: 'disabled' },
mcpClients, mcpClients: [],
mcpResources: {}, mcpResources: {},
isNonInteractiveSession: true, isNonInteractiveSession: true,
debug, debug,
@@ -170,16 +140,13 @@ export async function startMCPServer(
updateAttributionState: () => {}, updateAttributionState: () => {},
} }
// TODO: validate input types with zod
try { try {
if (!tool.isEnabled()) { if (!tool.isEnabled()) {
throw new Error(`Tool ${name} is not enabled`) throw new Error(`Tool ${name} is not enabled`)
} }
// Validate input types with zod
const parsedArgs = tool.inputSchema.parse(args ?? {})
const validationResult = await tool.validateInput?.( const validationResult = await tool.validateInput?.(
(parsedArgs as never) ?? {}, (args as never) ?? {},
toolUseContext, toolUseContext,
) )
if (validationResult && !validationResult.result) { if (validationResult && !validationResult.result) {
@@ -188,7 +155,7 @@ export async function startMCPServer(
) )
} }
const finalResult = await tool.call( const finalResult = await tool.call(
(parsedArgs ?? {}) as never, (args ?? {}) as never,
toolUseContext, toolUseContext,
hasPermissionsToUseTool, hasPermissionsToUseTool,
createAssistantMessage({ createAssistantMessage({
@@ -196,49 +163,19 @@ export async function startMCPServer(
}), }),
) )
let content: CallToolResult['content']
const data = finalResult.data as string | { type: string; text?: string; source?: { type: string; media_type: string; data: string } }[] | unknown
if (typeof data === 'string') {
content = [{ type: 'text', text: data }]
} else if (Array.isArray(data)) {
content = data.map((block: any) => {
if (block.type === 'text') {
return { type: 'text', text: block.text || '' }
} else if (block.type === 'image' && block.source) {
return { return {
type: 'image',
data: block.source.data,
mimeType: block.source.media_type,
}
} else {
// eslint-disable-next-line custom-rules/no-top-level-side-effects, no-console
console.warn(`Unmapped content block type from tool ${name}: ${block.type || 'unknown'}`)
return { type: 'text', text: jsonStringify(block) }
}
}) as CallToolResult['content']
} else {
content = [{ type: 'text', text: jsonStringify(data) }]
}
return {
content,
isError: !!(finalResult as any).isError,
}
} catch (error) {
logError(error)
if (error instanceof ZodError) {
return {
isError: true,
content: [ content: [
{ {
type: 'text', type: 'text' as const,
text: `Tool ${name} input is invalid:\n${error.errors.map(e => `- ${e.path.join('.')}: ${e.message}`).join('\n')}`, text:
typeof finalResult === 'string'
? finalResult
: jsonStringify(finalResult.data),
}, },
], ],
} }
} } catch (error) {
logError(error)
const parts = const parts =
error instanceof Error ? getErrorParts(error) : [String(error)] error instanceof Error ? getErrorParts(error) : [String(error)]
@@ -264,4 +201,3 @@ export async function startMCPServer(
return await runServer() return await runServer()
} }

View File

@@ -114,8 +114,8 @@ export const SandboxSettingsSchema = lazySchema(() =>
.boolean() .boolean()
.optional() .optional()
.describe( .describe(
'Allow trusted, user-initiated commands to run outside the sandbox. ' + 'Allow commands to run outside the sandbox via the dangerouslyDisableSandbox parameter. ' +
'When false, sandbox override requests are ignored and all commands must run sandboxed. ' + 'When false, the dangerouslyDisableSandbox parameter is completely ignored and all commands must run sandboxed. ' +
'Default: true.', 'Default: true.',
), ),
network: SandboxNetworkConfigSchema(), network: SandboxNetworkConfigSchema(),

View File

@@ -1,123 +0,0 @@
import { PassThrough } from 'node:stream'
import { afterEach, expect, mock, test } from 'bun:test'
import React from 'react'
import { createRoot, Text } from '../ink.js'
type AuthState = {
anthropicAuthEnabled: boolean
claudeSubscriber: boolean
key?: string
source?: string
}
function createTestStreams(): {
stdout: PassThrough
stdin: PassThrough & {
isTTY: boolean
setRawMode: (mode: boolean) => void
ref: () => void
unref: () => void
}
} {
const stdout = new PassThrough()
const stdin = new PassThrough() as PassThrough & {
isTTY: boolean
setRawMode: (mode: boolean) => void
ref: () => void
unref: () => void
}
stdin.isTTY = true
stdin.setRawMode = () => {}
stdin.ref = () => {}
stdin.unref = () => {}
;(stdout as unknown as { columns: number }).columns = 120
return { stdout, stdin }
}
async function waitForCondition(
predicate: () => boolean,
timeoutMs = 2000,
): Promise<void> {
const startedAt = Date.now()
while (Date.now() - startedAt < timeoutMs) {
if (predicate()) {
return
}
await Bun.sleep(10)
}
throw new Error('Timed out waiting for useApiKeyVerification test state')
}
afterEach(() => {
mock.restore()
})
test('useApiKeyVerification resets stale missing status when the session switches to a third-party provider', async () => {
const authState: AuthState = {
anthropicAuthEnabled: true,
claudeSubscriber: false,
}
const seenStatuses: string[] = []
mock.module('../utils/auth.js', () => ({
getAnthropicApiKeyWithSource: () => ({
key: authState.key,
source: authState.source,
}),
getApiKeyFromApiKeyHelper: async () => undefined,
isAnthropicAuthEnabled: () => authState.anthropicAuthEnabled,
isClaudeAISubscriber: () => authState.claudeSubscriber,
}))
mock.module('../bootstrap/state.js', () => ({
getIsNonInteractiveSession: () => false,
}))
mock.module('../services/api/claude.js', () => ({
verifyApiKey: async () => true,
}))
// @ts-expect-error cache-busting query string for Bun module mocks
const { useApiKeyVerification } = await import(
'./useApiKeyVerification.ts?switch-to-third-party'
)
function Harness(): React.ReactNode {
const { status } = useApiKeyVerification()
React.useEffect(() => {
seenStatuses.push(status)
}, [status])
return <Text>{status}</Text>
}
const { stdout, stdin } = createTestStreams()
const root = await createRoot({
stdout: stdout as unknown as NodeJS.WriteStream,
stdin: stdin as unknown as NodeJS.ReadStream,
patchConsole: false,
})
root.render(<Harness />)
await waitForCondition(() => seenStatuses.includes('missing'))
authState.anthropicAuthEnabled = false
root.render(<Harness />)
await waitForCondition(() => seenStatuses.includes('valid'))
root.unmount()
stdin.end()
stdout.end()
await Bun.sleep(0)
expect(seenStatuses[0]).toBe('missing')
expect(seenStatuses).toContain('valid')
})

View File

@@ -1,4 +1,4 @@
import { useCallback, useEffect, useState } from 'react' import { useCallback, useState } from 'react'
import { getIsNonInteractiveSession } from '../bootstrap/state.js' import { getIsNonInteractiveSession } from '../bootstrap/state.js'
import { verifyApiKey } from '../services/api/claude.js' import { verifyApiKey } from '../services/api/claude.js'
import { import {
@@ -21,7 +21,8 @@ export type ApiKeyVerificationResult = {
error: Error | null error: Error | null
} }
function getInitialVerificationStatus(): VerificationStatus { export function useApiKeyVerification(): ApiKeyVerificationResult {
const [status, setStatus] = useState<VerificationStatus>(() => {
if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) { if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) {
return 'valid' return 'valid'
} }
@@ -36,28 +37,8 @@ function getInitialVerificationStatus(): VerificationStatus {
return 'loading' return 'loading'
} }
return 'missing' return 'missing'
} })
export function useApiKeyVerification(): ApiKeyVerificationResult {
const [status, setStatus] = useState<VerificationStatus>(
getInitialVerificationStatus,
)
const [error, setError] = useState<Error | null>(null) const [error, setError] = useState<Error | null>(null)
const anthropicVerificationEnabled =
isAnthropicAuthEnabled() && !isClaudeAISubscriber()
useEffect(() => {
const nextStatus = anthropicVerificationEnabled
? getInitialVerificationStatus()
: 'valid'
setStatus(currentStatus =>
currentStatus === nextStatus ? currentStatus : nextStatus,
)
if (nextStatus !== 'error') {
setError(null)
}
}, [anthropicVerificationEnabled])
const verify = useCallback(async (): Promise<void> => { const verify = useCallback(async (): Promise<void> => {
if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) { if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) {

View File

@@ -19,7 +19,7 @@ async function _temp() {
logForDebugging("Showing marketplace config save failure notification"); logForDebugging("Showing marketplace config save failure notification");
notifs.push({ notifs.push({
key: "marketplace-config-save-failed", key: "marketplace-config-save-failed",
jsx: <Text color="error">Failed to save marketplace retry info · Check ~/.openclaude.json permissions</Text>, jsx: <Text color="error">Failed to save marketplace retry info · Check ~/.claude.json permissions</Text>,
priority: "immediate", priority: "immediate",
timeoutMs: 10000 timeoutMs: 10000
}); });

View File

@@ -1,8 +1,5 @@
import { expect, test } from 'bun:test' import { expect, test } from 'bun:test'
import { import { supportsClipboardImageFallback } from './usePasteHandler.ts'
shouldHandleInputAsPaste,
supportsClipboardImageFallback,
} from './usePasteHandler.ts'
test('supports clipboard image fallback on Windows', () => { test('supports clipboard image fallback on Windows', () => {
expect(supportsClipboardImageFallback('windows')).toBe(true) expect(supportsClipboardImageFallback('windows')).toBe(true)
@@ -23,42 +20,3 @@ test('does not support clipboard image fallback on WSL', () => {
test('does not support clipboard image fallback on unknown platforms', () => { test('does not support clipboard image fallback on unknown platforms', () => {
expect(supportsClipboardImageFallback('unknown')).toBe(false) expect(supportsClipboardImageFallback('unknown')).toBe(false)
}) })
test('does not treat a bracketed paste as pending when no paste handlers are provided', () => {
expect(
shouldHandleInputAsPaste({
hasTextPasteHandler: false,
hasImagePasteHandler: false,
inputLength: 'kimi-k2.5'.length,
pastePending: false,
hasImageFilePath: false,
isFromPaste: true,
}),
).toBe(false)
})
test('treats bracketed text paste as pending when a text paste handler exists', () => {
expect(
shouldHandleInputAsPaste({
hasTextPasteHandler: true,
hasImagePasteHandler: false,
inputLength: 'kimi-k2.5'.length,
pastePending: false,
hasImageFilePath: false,
isFromPaste: true,
}),
).toBe(true)
})
test('treats image path paste as pending when only an image handler exists', () => {
expect(
shouldHandleInputAsPaste({
hasTextPasteHandler: false,
hasImagePasteHandler: true,
inputLength: 'C:\\Users\\jat\\image.png'.length,
pastePending: false,
hasImageFilePath: true,
isFromPaste: false,
}),
).toBe(true)
})

View File

@@ -35,24 +35,6 @@ type PasteHandlerProps = {
) => void ) => void
} }
export function shouldHandleInputAsPaste(options: {
hasTextPasteHandler: boolean
hasImagePasteHandler: boolean
inputLength: number
pastePending: boolean
hasImageFilePath: boolean
isFromPaste: boolean
}): boolean {
return (
(options.hasTextPasteHandler &&
(options.inputLength > PASTE_THRESHOLD ||
options.pastePending ||
options.hasImageFilePath ||
options.isFromPaste)) ||
(options.hasImagePasteHandler && options.hasImageFilePath)
)
}
export function usePasteHandler({ export function usePasteHandler({
onPaste, onPaste,
onInput, onInput,
@@ -254,6 +236,11 @@ export function usePasteHandler({
// The keypress parser sets isPasted=true for content within bracketed paste. // The keypress parser sets isPasted=true for content within bracketed paste.
const isFromPaste = event.keypress.isPasted const isFromPaste = event.keypress.isPasted
// If this is pasted content, set isPasting state for UI feedback
if (isFromPaste) {
setIsPasting(true)
}
// Handle large pastes (>PASTE_THRESHOLD chars) // Handle large pastes (>PASTE_THRESHOLD chars)
// Usually we get one or two input characters at a time. If we // Usually we get one or two input characters at a time. If we
// get more than the threshold, the user has probably pasted. // get more than the threshold, the user has probably pasted.
@@ -281,7 +268,6 @@ export function usePasteHandler({
canFallbackToClipboardImage && canFallbackToClipboardImage &&
onImagePaste onImagePaste
) { ) {
setIsPasting(true)
checkClipboardForImage() checkClipboardForImage()
// Reset isPasting since there's no text content to process // Reset isPasting since there's no text content to process
setIsPasting(false) setIsPasting(false)
@@ -289,17 +275,14 @@ export function usePasteHandler({
} }
// Check if we should handle as paste (from bracketed paste, large input, or continuation) // Check if we should handle as paste (from bracketed paste, large input, or continuation)
const shouldHandleAsPaste = shouldHandleInputAsPaste({ const shouldHandleAsPaste =
hasTextPasteHandler: Boolean(onPaste), onPaste &&
hasImagePasteHandler: Boolean(onImagePaste), (input.length > PASTE_THRESHOLD ||
inputLength: input.length, pastePendingRef.current ||
pastePending: pastePendingRef.current, hasImageFilePath ||
hasImageFilePath, isFromPaste)
isFromPaste,
})
if (shouldHandleAsPaste) { if (shouldHandleAsPaste) {
setIsPasting(true)
pastePendingRef.current = true pastePendingRef.current = true
setPasteState(({ chunks, timeoutId }) => { setPasteState(({ chunks, timeoutId }) => {
return { return {

View File

@@ -434,7 +434,7 @@ export function useReplBridge(messages: Message[], setMessages: (action: React.S
if (!store.getState().toolPermissionContext.isBypassPermissionsModeAvailable) { if (!store.getState().toolPermissionContext.isBypassPermissionsModeAvailable) {
return { return {
ok: false, ok: false,
error: 'Cannot set permission mode to bypassPermissions. Enable it with --allow-dangerously-skip-permissions or set permissions.allowBypassPermissionsMode in settings.json' error: 'Cannot set permission mode to bypassPermissions because the session was not launched with --dangerously-skip-permissions'
}; };
} }
} }

View File

@@ -1,23 +1,34 @@
/** /**
* Swarm Permission Callback Registry * Swarm Permission Poller Hook
* *
* Manages callback registrations for permission requests and responses * This hook polls for permission responses from the team leader when running
* in agent swarms. Responses are delivered exclusively via the mailbox * as a worker agent in a swarm. When a response is received, it calls the
* system (useInboxPoller → processMailboxPermissionResponse). * appropriate callback (onAllow/onReject) to continue execution.
* *
* The legacy file-based polling (resolved/ directory) has been removed * This hook should be used in conjunction with the worker-side integration
* because it created an unauthenticated attack surface — any local process * in useCanUseTool.ts, which creates pending requests that this hook monitors.
* could forge approval files. The mailbox path is the sole active channel.
*/ */
import { useCallback, useEffect, useRef } from 'react'
import { useInterval } from 'usehooks-ts'
import { logForDebugging } from '../utils/debug.js' import { logForDebugging } from '../utils/debug.js'
import { errorMessage } from '../utils/errors.js'
import { import {
type PermissionUpdate, type PermissionUpdate,
permissionUpdateSchema, permissionUpdateSchema,
} from '../utils/permissions/PermissionUpdateSchema.js' } from '../utils/permissions/PermissionUpdateSchema.js'
import {
isSwarmWorker,
type PermissionResponse,
pollForResponse,
removeWorkerResponse,
} from '../utils/swarm/permissionSync.js'
import { getAgentName, getTeamName } from '../utils/teammate.js'
const POLL_INTERVAL_MS = 500
/** /**
* Validate permissionUpdates from external sources (mailbox IPC). * Validate permissionUpdates from external sources (mailbox IPC, disk polling).
* Malformed entries from buggy/old teammate processes are filtered out rather * Malformed entries from buggy/old teammate processes are filtered out rather
* than propagated unchecked into callback.onAllow(). * than propagated unchecked into callback.onAllow().
*/ */
@@ -214,9 +225,106 @@ export function processSandboxPermissionResponse(params: {
return true return true
} }
// Legacy file-based polling (useSwarmPermissionPoller, processResponse) /**
// has been removed. Permission responses are now delivered exclusively * Process a permission response by invoking the registered callback
// via the mailbox system: */
// Leader: sendPermissionResponseViaMailbox() → writeToMailbox() function processResponse(response: PermissionResponse): boolean {
// Worker: useInboxPoller → processMailboxPermissionResponse() const callback = pendingCallbacks.get(response.requestId)
// See: fix(security) — remove unauthenticated file-based permission channel
if (!callback) {
logForDebugging(
`[SwarmPermissionPoller] No callback registered for request ${response.requestId}`,
)
return false
}
logForDebugging(
`[SwarmPermissionPoller] Processing response for request ${response.requestId}: ${response.decision}`,
)
// Remove from registry before invoking callback
pendingCallbacks.delete(response.requestId)
if (response.decision === 'approved') {
const permissionUpdates = parsePermissionUpdates(response.permissionUpdates)
const updatedInput = response.updatedInput
callback.onAllow(updatedInput, permissionUpdates)
} else {
callback.onReject(response.feedback)
}
return true
}
/**
* Hook that polls for permission responses when running as a swarm worker.
*
* This hook:
* 1. Only activates when isSwarmWorker() returns true
* 2. Polls every 500ms for responses
* 3. When a response is found, invokes the registered callback
* 4. Cleans up the response file after processing
*/
export function useSwarmPermissionPoller(): void {
const isProcessingRef = useRef(false)
const poll = useCallback(async () => {
// Don't poll if not a swarm worker
if (!isSwarmWorker()) {
return
}
// Prevent concurrent polling
if (isProcessingRef.current) {
return
}
// Don't poll if no callbacks are registered
if (pendingCallbacks.size === 0) {
return
}
isProcessingRef.current = true
try {
const agentName = getAgentName()
const teamName = getTeamName()
if (!agentName || !teamName) {
return
}
// Check each pending request for a response
for (const [requestId, _callback] of pendingCallbacks) {
const response = await pollForResponse(requestId, agentName, teamName)
if (response) {
// Process the response
const processed = processResponse(response)
if (processed) {
// Clean up the response from the worker's inbox
await removeWorkerResponse(requestId, agentName, teamName)
}
}
}
} catch (error) {
logForDebugging(
`[SwarmPermissionPoller] Error during poll: ${errorMessage(error)}`,
)
} finally {
isProcessingRef.current = false
}
}, [])
// Only poll if we're a swarm worker
const shouldPoll = isSwarmWorker()
useInterval(() => void poll(), shouldPoll ? POLL_INTERVAL_MS : null)
// Initial poll on mount
useEffect(() => {
if (isSwarmWorker()) {
void poll()
}
}, [poll])
}

View File

@@ -11,16 +11,14 @@ const execFileNoThrowMock = mock(
async () => ({ code: 0, stdout: '', stderr: '' }), async () => ({ code: 0, stdout: '', stderr: '' }),
) )
function installOscMocks(): void { mock.module('../../utils/execFileNoThrow.js', () => ({
mock.module('../../utils/execFileNoThrow.js', () => ({
execFileNoThrow: execFileNoThrowMock, execFileNoThrow: execFileNoThrowMock,
execFileNoThrowWithCwd: execFileNoThrowMock, execFileNoThrowWithCwd: execFileNoThrowMock,
})) }))
mock.module('../../utils/tempfile.js', () => ({ mock.module('../../utils/tempfile.js', () => ({
generateTempFilePath: generateTempFilePathMock, generateTempFilePath: generateTempFilePathMock,
})) }))
}
async function importFreshOscModule() { async function importFreshOscModule() {
return import(`./osc.ts?ts=${Date.now()}-${Math.random()}`) return import(`./osc.ts?ts=${Date.now()}-${Math.random()}`)
@@ -47,7 +45,6 @@ async function waitForExecCall(
describe('Windows clipboard fallback', () => { describe('Windows clipboard fallback', () => {
beforeEach(() => { beforeEach(() => {
installOscMocks()
execFileNoThrowMock.mockClear() execFileNoThrowMock.mockClear()
generateTempFilePathMock.mockClear() generateTempFilePathMock.mockClear()
process.env = { ...originalEnv } process.env = { ...originalEnv }
@@ -65,12 +62,14 @@ describe('Windows clipboard fallback', () => {
const { setClipboard } = await importFreshOscModule() const { setClipboard } = await importFreshOscModule()
await setClipboard('Привет мир') await setClipboard('Привет мир')
const windowsCall = await waitForExecCall('powershell') await flushClipboardCopy()
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'clip')).toBe( expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'clip')).toBe(
false, false,
) )
expect(windowsCall).toBeDefined() expect(
execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'powershell'),
).toBe(true)
}) })
test('passes Windows clipboard text through a UTF-8 temp file instead of stdin', async () => { test('passes Windows clipboard text through a UTF-8 temp file instead of stdin', async () => {
@@ -98,7 +97,6 @@ describe('Windows clipboard fallback', () => {
describe('clipboard path behavior remains stable', () => { describe('clipboard path behavior remains stable', () => {
beforeEach(() => { beforeEach(() => {
installOscMocks()
execFileNoThrowMock.mockClear() execFileNoThrowMock.mockClear()
process.env = { ...originalEnv } process.env = { ...originalEnv }
delete process.env['SSH_CONNECTION'] delete process.env['SSH_CONNECTION']

View File

@@ -481,16 +481,16 @@ export const CLEAR_TAB_STATUS = osc(
) )
/** /**
* Gate for emitting OSC 21337 (tab-status indicator). Currently disabled * Gate for emitting OSC 21337 (tab-status indicator). Ant-only while the
* (spec is unstable). Terminals that don't recognize it discard silently, * spec is unstable. Terminals that don't recognize it discard silently, so
* so emission is safe unconditionally — we don't gate on terminal detection * emission is safe unconditionally — we don't gate on terminal detection
* since support is expected across several terminals. * since support is expected across several terminals.
* *
* Callers must wrap output with wrapForMultiplexer() so tmux/screen * Callers must wrap output with wrapForMultiplexer() so tmux/screen
* DCS-passthrough carries the sequence to the outer terminal. * DCS-passthrough carries the sequence to the outer terminal.
*/ */
export function supportsTabStatus(): boolean { export function supportsTabStatus(): boolean {
return false return process.env.USER_TYPE === 'ant'
} }
/** /**

View File

@@ -74,7 +74,7 @@ export function isTeamMemoryEnabled(): boolean {
if (!isAutoMemoryEnabled()) { if (!isAutoMemoryEnabled()) {
return false return false
} }
return getFeatureValue_CACHED_MAY_BE_STALE('tengu_herring_clock', true) return getFeatureValue_CACHED_MAY_BE_STALE('tengu_herring_clock', false)
} }
/** /**

View File

@@ -12,7 +12,7 @@ import {
* One-shot migration: clear skipAutoPermissionPrompt for users who accepted * One-shot migration: clear skipAutoPermissionPrompt for users who accepted
* the old 2-option AutoModeOptInDialog but don't have auto as their default. * the old 2-option AutoModeOptInDialog but don't have auto as their default.
* Re-surfaces the dialog so they see the new "make it my default mode" option. * Re-surfaces the dialog so they see the new "make it my default mode" option.
* Guard lives in GlobalConfig (~/.openclaude.json), not settings.json, so it * Guard lives in GlobalConfig (~/.claude.json), not settings.json, so it
* survives settings resets and doesn't re-arm itself. * survives settings resets and doesn't re-arm itself.
* *
* Only runs when tengu_auto_mode_config.enabled === 'enabled'. For 'opt-in' * Only runs when tengu_auto_mode_config.enabled === 'enabled'. For 'opt-in'

View File

@@ -1,62 +0,0 @@
import { afterEach, describe, expect, test } from 'bun:test'
import { mkdir, mkdtemp, rm, writeFile } from 'node:fs/promises'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import {
getSteps,
isProjectOnboardingComplete,
} from './projectOnboardingSteps.js'
import { runWithCwdOverride } from './utils/cwd.js'
let tempDir: string | undefined
afterEach(async () => {
if (tempDir) {
await rm(tempDir, { recursive: true, force: true })
tempDir = undefined
}
})
describe('project onboarding completion', () => {
test('is incomplete when neither AGENTS.md nor CLAUDE.md exists', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
await runWithCwdOverride(tempDir, async () => {
expect(isProjectOnboardingComplete()).toBe(false)
expect(getSteps()[1]?.text).toContain('/init')
expect(getSteps()[1]?.text).toContain('AGENTS.md')
expect(getSteps()[1]?.text).toContain('CLAUDE.md')
})
})
test('is complete when only CLAUDE.md exists', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
await writeFile(join(tempDir, 'CLAUDE.md'), '# CLAUDE.md\n')
await runWithCwdOverride(tempDir, async () => {
expect(isProjectOnboardingComplete()).toBe(true)
})
})
test('is complete when only AGENTS.md exists', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
await writeFile(join(tempDir, 'AGENTS.md'), '# AGENTS.md\n')
await runWithCwdOverride(tempDir, async () => {
expect(isProjectOnboardingComplete()).toBe(true)
})
})
test('is complete from a nested cwd when repo instructions exist in an ancestor directory', async () => {
tempDir = await mkdtemp(join(tmpdir(), 'project-onboarding-'))
const nestedDir = join(tempDir, 'packages', 'app')
await writeFile(join(tempDir, 'AGENTS.md'), '# AGENTS.md\n')
await mkdir(nestedDir, { recursive: true })
await writeFile(join(nestedDir, 'index.ts'), 'export {}\n')
await runWithCwdOverride(nestedDir, async () => {
expect(isProjectOnboardingComplete()).toBe(true)
})
})
})

View File

@@ -1,14 +1,50 @@
import memoize from 'lodash-es/memoize.js' import memoize from 'lodash-es/memoize.js'
import { join } from 'path'
import { import {
getCurrentProjectConfig, getCurrentProjectConfig,
saveCurrentProjectConfig, saveCurrentProjectConfig,
} from './utils/config.js' } from './utils/config.js'
export { import { getCwd } from './utils/cwd.js'
getSteps, import { isDirEmpty } from './utils/file.js'
isProjectOnboardingComplete, import { getFsImplementation } from './utils/fsOperations.js'
type Step,
} from './projectOnboardingSteps.js' export type Step = {
import { isProjectOnboardingComplete } from './projectOnboardingSteps.js' key: string
text: string
isComplete: boolean
isCompletable: boolean
isEnabled: boolean
}
export function getSteps(): Step[] {
const hasClaudeMd = getFsImplementation().existsSync(
join(getCwd(), 'CLAUDE.md'),
)
const isWorkspaceDirEmpty = isDirEmpty(getCwd())
return [
{
key: 'workspace',
text: 'Ask Claude to create a new app or clone a repository',
isComplete: false,
isCompletable: true,
isEnabled: isWorkspaceDirEmpty,
},
{
key: 'claudemd',
text: 'Run /init to create a CLAUDE.md file with instructions for Claude',
isComplete: hasClaudeMd,
isCompletable: true,
isEnabled: !isWorkspaceDirEmpty,
},
]
}
export function isProjectOnboardingComplete(): boolean {
return getSteps()
.filter(({ isCompletable, isEnabled }) => isCompletable && isEnabled)
.every(({ isComplete }) => isComplete)
}
export function maybeMarkProjectOnboardingComplete(): void { export function maybeMarkProjectOnboardingComplete(): void {
// Short-circuit on cached config — isProjectOnboardingComplete() hits // Short-circuit on cached config — isProjectOnboardingComplete() hits

View File

@@ -1,44 +0,0 @@
import { getCwd } from './utils/cwd.js'
import { isDirEmpty } from './utils/file.js'
import { getFsImplementation } from './utils/fsOperations.js'
import { findProjectInstructionFilePathInAncestors } from './utils/projectInstructions.js'
export type Step = {
key: string
text: string
isComplete: boolean
isCompletable: boolean
isEnabled: boolean
}
export function getSteps(): Step[] {
const hasRepoInstructions =
findProjectInstructionFilePathInAncestors(
getCwd(),
getFsImplementation().existsSync,
) !== null
const isWorkspaceDirEmpty = isDirEmpty(getCwd())
return [
{
key: 'workspace',
text: 'Ask Claude to create a new app or clone a repository',
isComplete: false,
isCompletable: true,
isEnabled: isWorkspaceDirEmpty,
},
{
key: 'claudemd',
text: 'Set up repo instructions (/init creates AGENTS.md or updates existing CLAUDE.md; either file counts)',
isComplete: hasRepoInstructions,
isCompletable: true,
isEnabled: !isWorkspaceDirEmpty,
},
]
}
export function isProjectOnboardingComplete(): boolean {
return getSteps()
.filter(({ isCompletable, isEnabled }) => isCompletable && isEnabled)
.every(({ isComplete }) => isComplete)
}

View File

@@ -160,7 +160,6 @@ function* yieldMissingToolResultBlocks(
* rules, ye will be punished with an entire day of debugging and hair pulling. * rules, ye will be punished with an entire day of debugging and hair pulling.
*/ */
const MAX_OUTPUT_TOKENS_RECOVERY_LIMIT = 3 const MAX_OUTPUT_TOKENS_RECOVERY_LIMIT = 3
const MAX_CONTINUATION_NUDGES = 3
/** /**
* Is this a max_output_tokens error message? If so, the streaming loop should * Is this a max_output_tokens error message? If so, the streaming loop should
@@ -210,10 +209,6 @@ type State = {
pendingToolUseSummary: Promise<ToolUseSummaryMessage | null> | undefined pendingToolUseSummary: Promise<ToolUseSummaryMessage | null> | undefined
stopHookActive: boolean | undefined stopHookActive: boolean | undefined
turnCount: number turnCount: number
// Count of consecutive continuation nudges within the current turn.
// Capped at MAX_CONTINUATION_NUDGES to prevent infinite nudge loops
// when the model keeps matching continuation signals without tool calls.
continuationNudgeCount: number
// Why the previous iteration continued. Undefined on first iteration. // Why the previous iteration continued. Undefined on first iteration.
// Lets tests assert recovery paths fired without inspecting message contents. // Lets tests assert recovery paths fired without inspecting message contents.
transition: Continue | undefined transition: Continue | undefined
@@ -277,7 +272,6 @@ async function* queryLoop(
maxOutputTokensRecoveryCount: 0, maxOutputTokensRecoveryCount: 0,
hasAttemptedReactiveCompact: false, hasAttemptedReactiveCompact: false,
turnCount: 1, turnCount: 1,
continuationNudgeCount: 0,
pendingToolUseSummary: undefined, pendingToolUseSummary: undefined,
transition: undefined, transition: undefined,
} }
@@ -651,35 +645,6 @@ async function* queryLoop(
} }
} }
// Safety net: when auto-compact's circuit breaker has tripped (3+
// consecutive failures), the normal blocking check above is gated on
// reactiveCompact. If reactiveCompact is also enabled but ALSO fails
// (or is disabled), the oversized context goes straight to the API and
// gets a 500. This check catches that gap — if compaction is exhausted
// and context is still over the autocompact threshold, block immediately
// with a clear message instead of burning an API call that will 500.
if (
tracking?.consecutiveFailures !== undefined &&
tracking.consecutiveFailures >= 3 &&
isAutoCompactEnabled()
) {
const model = toolUseContext.options.mainLoopModel
const tokenUsage = tokenCountWithEstimation(messagesForQuery) - snipTokensFreed
const { isAboveAutoCompactThreshold } = calculateTokenWarningState(
tokenUsage,
model,
)
if (isAboveAutoCompactThreshold) {
yield createAssistantAPIErrorMessage({
content:
'The conversation has exceeded the context limit and automatic compaction has failed. ' +
'Press esc twice to go up a few messages and try again, or start a new session with /new.',
error: 'invalid_request',
})
return { reason: 'blocking_limit' }
}
}
let attemptWithFallback = true let attemptWithFallback = true
queryCheckpoint('query_api_loop_start') queryCheckpoint('query_api_loop_start')
@@ -1137,7 +1102,6 @@ async function* queryLoop(
pendingToolUseSummary: undefined, pendingToolUseSummary: undefined,
stopHookActive: undefined, stopHookActive: undefined,
turnCount, turnCount,
continuationNudgeCount: state.continuationNudgeCount,
transition: { transition: {
reason: 'collapse_drain_retry', reason: 'collapse_drain_retry',
committed: drained.committed, committed: drained.committed,
@@ -1191,7 +1155,6 @@ async function* queryLoop(
pendingToolUseSummary: undefined, pendingToolUseSummary: undefined,
stopHookActive: undefined, stopHookActive: undefined,
turnCount, turnCount,
continuationNudgeCount: state.continuationNudgeCount,
transition: { reason: 'reactive_compact_retry' }, transition: { reason: 'reactive_compact_retry' },
} }
state = next state = next
@@ -1247,7 +1210,6 @@ async function* queryLoop(
pendingToolUseSummary: undefined, pendingToolUseSummary: undefined,
stopHookActive: undefined, stopHookActive: undefined,
turnCount, turnCount,
continuationNudgeCount: state.continuationNudgeCount,
transition: { reason: 'max_output_tokens_escalate' }, transition: { reason: 'max_output_tokens_escalate' },
} }
state = next state = next
@@ -1276,7 +1238,6 @@ async function* queryLoop(
pendingToolUseSummary: undefined, pendingToolUseSummary: undefined,
stopHookActive: undefined, stopHookActive: undefined,
turnCount, turnCount,
continuationNudgeCount: state.continuationNudgeCount,
transition: { transition: {
reason: 'max_output_tokens_recovery', reason: 'max_output_tokens_recovery',
attempt: maxOutputTokensRecoveryCount + 1, attempt: maxOutputTokensRecoveryCount + 1,
@@ -1334,7 +1295,6 @@ async function* queryLoop(
pendingToolUseSummary: undefined, pendingToolUseSummary: undefined,
stopHookActive: true, stopHookActive: true,
turnCount, turnCount,
continuationNudgeCount: state.continuationNudgeCount,
transition: { reason: 'stop_hook_blocking' }, transition: { reason: 'stop_hook_blocking' },
} }
state = next state = next
@@ -1371,7 +1331,6 @@ async function* queryLoop(
pendingToolUseSummary: undefined, pendingToolUseSummary: undefined,
stopHookActive: undefined, stopHookActive: undefined,
turnCount, turnCount,
continuationNudgeCount: state.continuationNudgeCount,
transition: { reason: 'token_budget_continuation' }, transition: { reason: 'token_budget_continuation' },
} }
continue continue
@@ -1391,77 +1350,6 @@ async function* queryLoop(
} }
} }
// Continuation nudge: detect when the model signals intent to continue
// (e.g., "so now I have to do it", "let me now...", "I'll need to...")
// but returned no tool calls. This prevents premature task completion.
//
// Guard: capped at MAX_CONTINUATION_NUDGES to prevent infinite loops
// when the model keeps matching signals without ever calling tools.
if (
assistantMessages.length > 0 &&
turnCount < (maxTurns ?? Infinity) &&
state.continuationNudgeCount < MAX_CONTINUATION_NUDGES
) {
const lastAssistant = assistantMessages.at(-1)
if (lastAssistant?.type === 'assistant') {
const lastText = lastAssistant.message.content
.filter((b): b is { type: 'text'; text: string } => b.type === 'text')
.map(b => b.text)
.join(' ')
.toLowerCase()
// Tightened patterns: require explicit action verbs and exclude
// common explanatory phrasing to reduce false positives.
const continuationSignals = [
// Only match "so now I/let me/we" followed by an action verb
/\bso now (i|let me|we) (need to|have to|should|must|will) (do|create|write|edit|update|fix|implement|add|run|check|make|build|set up)\b/,
// "now I'll" + action (not "now I'll explain" etc.)
/\bnow i('ll| will) (do|create|write|edit|update|fix|implement|add|run|check|make|build|set up|go|proceed)\b/,
// "let me" + action (not "let me think/explain/show")
/\blet me (go ahead and |now )?(do|create|write|edit|update|fix|implement|add|run|check|make|build|set up|proceed)\b/,
// "I'll/I need to/I have to" + action, only if message is short (<80 chars)
...(lastText.length < 80
? [/\b(i('ll| will| need to| have to| must) (now )?(do|create|write|edit|update|fix|implement|add|run|check|make|build|set up))\b/]
: []),
// "time to" + action
/\btime to (do|create|write|edit|update|fix|implement|add|run|check|make|build|get started|begin)\b/,
// "next, I'll/let me" + action, only if message is short
...(lastText.length < 80
? [/\bnext,?\s+(i('ll| will)|let me|i need to) (do|create|write|edit|update|fix|implement|add|run|check|make|build)\b/]
: []),
]
// Don't nudge if the text contains completion markers
const completionMarkers = /\b(done|finished|completed|complete|summary|that's all|that is all|all set|hope this helps|let me know if)\b/
if (completionMarkers.test(lastText)) {
// Model signaled completion — don't nudge
} else if (continuationSignals.some(re => re.test(lastText))) {
logForDebugging(
`Continuation nudge triggered (${state.continuationNudgeCount + 1}/${MAX_CONTINUATION_NUDGES}): model said "${lastText.slice(-120)}" without tool calls`,
)
const nudge = createUserMessage({
content: 'Continue with the task. Use the appropriate tools to proceed.',
isMeta: true,
})
const next: State = {
messages: [...messagesForQuery, ...assistantMessages, nudge],
toolUseContext,
autoCompactTracking: tracking,
maxOutputTokensRecoveryCount: 0,
hasAttemptedReactiveCompact: false,
maxOutputTokensOverride: undefined,
pendingToolUseSummary: undefined,
stopHookActive: undefined,
turnCount,
continuationNudgeCount: state.continuationNudgeCount + 1,
transition: { reason: 'continuation_nudge' },
}
state = next
continue
}
}
}
return { reason: 'completed' } return { reason: 'completed' }
} }
@@ -1827,7 +1715,6 @@ async function* queryLoop(
turnCount: nextTurnCount, turnCount: nextTurnCount,
maxOutputTokensRecoveryCount: 0, maxOutputTokensRecoveryCount: 0,
hasAttemptedReactiveCompact: false, hasAttemptedReactiveCompact: false,
continuationNudgeCount: 0,
pendingToolUseSummary: nextPendingToolUseSummary, pendingToolUseSummary: nextPendingToolUseSummary,
maxOutputTokensOverride: undefined, maxOutputTokensOverride: undefined,
stopHookActive, stopHookActive,

View File

@@ -196,7 +196,7 @@ const PROACTIVE_NO_OP_SUBSCRIBE = (_cb: () => void) => () => { };
const PROACTIVE_FALSE = () => false; const PROACTIVE_FALSE = () => false;
const SUGGEST_BG_PR_NOOP = (_p: string, _n: string): boolean => false; const SUGGEST_BG_PR_NOOP = (_p: string, _n: string): boolean => false;
const useProactive = feature('PROACTIVE') || feature('KAIROS') ? require('../proactive/useProactive.js').useProactive : null; const useProactive = feature('PROACTIVE') || feature('KAIROS') ? require('../proactive/useProactive.js').useProactive : null;
const useScheduledTasks = require('../hooks/useScheduledTasks.js').useScheduledTasks; const useScheduledTasks = feature('AGENT_TRIGGERS') ? require('../hooks/useScheduledTasks.js').useScheduledTasks : null;
/* eslint-enable @typescript-eslint/no-require-imports */ /* eslint-enable @typescript-eslint/no-require-imports */
import { isAgentSwarmsEnabled } from '../utils/agentSwarmsEnabled.js'; import { isAgentSwarmsEnabled } from '../utils/agentSwarmsEnabled.js';
import { useTaskListWatcher } from '../hooks/useTaskListWatcher.js'; import { useTaskListWatcher } from '../hooks/useTaskListWatcher.js';
@@ -3873,7 +3873,7 @@ export function REPL({
// empty to non-empty, not on every length change -- otherwise a render loop // empty to non-empty, not on every length change -- otherwise a render loop
// (concurrent onQuery thrashing, etc.) spams saveGlobalConfig, which hits // (concurrent onQuery thrashing, etc.) spams saveGlobalConfig, which hits
// ELOCKED under concurrent sessions and falls back to unlocked writes. // ELOCKED under concurrent sessions and falls back to unlocked writes.
// That write storm is the primary trigger for ~/.openclaude.json corruption // That write storm is the primary trigger for ~/.claude.json corruption
// (GH #3117). // (GH #3117).
const hasCountedQueueUseRef = useRef(false); const hasCountedQueueUseRef = useRef(false);
useEffect(() => { useEffect(() => {
@@ -4076,13 +4076,21 @@ export function REPL({
}); });
// Scheduled tasks from .claude/scheduled_tasks.json (CronCreate/Delete/List) // Scheduled tasks from .claude/scheduled_tasks.json (CronCreate/Delete/List)
// and session-only /loop runs. if (feature('AGENT_TRIGGERS')) {
// Assistant mode bypasses the isLoading gate (the proactive tick →
// Sleep → tick loop would otherwise starve the scheduler).
// kairosEnabled is set once in initialState (main.tsx) and never mutated — no
// subscription needed. The tengu_kairos_cron runtime gate is checked inside
// useScheduledTasks's effect (not here) since wrapping a hook call in a dynamic
// condition would break rules-of-hooks.
const assistantMode = store.getState().kairosEnabled; const assistantMode = store.getState().kairosEnabled;
useScheduledTasks({ // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
useScheduledTasks!({
isLoading, isLoading,
assistantMode, assistantMode,
setMessages setMessages
}); });
}
// Note: Permission polling is now handled by useInboxPoller // Note: Permission polling is now handled by useInboxPoller
// - Workers receive permission responses via mailbox messages // - Workers receive permission responses via mailbox messages

View File

@@ -334,7 +334,7 @@ async function processRemoteEvalPayload(
// Empty object is truthy — without the length check, `{features: {}}` // Empty object is truthy — without the length check, `{features: {}}`
// (transient server bug, truncated response) would pass, clear the maps // (transient server bug, truncated response) would pass, clear the maps
// below, return true, and syncRemoteEvalToDisk would wholesale-write `{}` // below, return true, and syncRemoteEvalToDisk would wholesale-write `{}`
// to disk: total flag blackout for every process sharing ~/.openclaude.json. // to disk: total flag blackout for every process sharing ~/.claude.json.
if (!payload?.features || Object.keys(payload.features).length === 0) { if (!payload?.features || Object.keys(payload.features).length === 0) {
return false return false
} }

View File

@@ -116,21 +116,9 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
return parsed.data return parsed.data
}) })
} catch (error) { } catch (error) {
if (axios.isAxiosError(error)) {
const status = error.response?.status ?? 'no-response'
const code = error.code ?? 'unknown-code'
const method = error.config?.method?.toUpperCase() ?? 'UNKNOWN'
const requestUrl = error.config?.url ?? 'unknown-url'
const message = error.message ?? 'unknown axios error'
logForDebugging( logForDebugging(
`[Bootstrap] Fetch failed: status=${status} code=${code} method=${method} url=${requestUrl} message=${message}`, `[Bootstrap] Fetch failed: ${axios.isAxiosError(error) ? (error.response?.status ?? error.code) : 'unknown'}`,
) )
} else {
const message = error instanceof Error ? error.message : String(error)
logForDebugging(`[Bootstrap] Fetch failed: ${message}`)
}
throw error throw error
} }
} }

View File

@@ -23,7 +23,6 @@ import { randomUUID } from 'crypto'
import { import {
getAPIProvider, getAPIProvider,
isFirstPartyAnthropicBaseUrl, isFirstPartyAnthropicBaseUrl,
isGithubNativeAnthropicMode,
} from 'src/utils/model/providers.js' } from 'src/utils/model/providers.js'
import { import {
getAttributionHeader, getAttributionHeader,
@@ -335,13 +334,8 @@ export function getPromptCachingEnabled(model: string): boolean {
// Prompt caching is an Anthropic-specific feature. Third-party providers // Prompt caching is an Anthropic-specific feature. Third-party providers
// do not understand cache_control blocks and strict backends (e.g. Azure // do not understand cache_control blocks and strict backends (e.g. Azure
// Foundry) reject or flag requests that contain them. // Foundry) reject or flag requests that contain them.
//
// Exception: when the GitHub provider is configured in native Anthropic API
// mode (CLAUDE_CODE_GITHUB_ANTHROPIC_API=1), requests are sent in Anthropic
// format, so cache_control blocks are supported.
const provider = getAPIProvider() const provider = getAPIProvider()
const isNativeGithub = isGithubNativeAnthropicMode(model) if (provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex') {
if (provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex' && !isNativeGithub) {
return false return false
} }
@@ -1217,7 +1211,7 @@ async function* queryModel(
cachedMCEnabled = featureEnabled && modelSupported cachedMCEnabled = featureEnabled && modelSupported
const config = getCachedMCConfig() const config = getCachedMCConfig()
logForDebugging( logForDebugging(
`Cached MC gate: enabled=${featureEnabled} modelSupported=${modelSupported} model=${options.model} supportedModels=${jsonStringify(config?.supportedModels)}`, `Cached MC gate: enabled=${featureEnabled} modelSupported=${modelSupported} model=${options.model} supportedModels=${jsonStringify(config.supportedModels)}`,
) )
} }

View File

@@ -14,7 +14,6 @@ import { getSmallFastModel } from 'src/utils/model/model.js'
import { import {
getAPIProvider, getAPIProvider,
isFirstPartyAnthropicBaseUrl, isFirstPartyAnthropicBaseUrl,
isGithubNativeAnthropicMode,
} from 'src/utils/model/providers.js' } from 'src/utils/model/providers.js'
import { getProxyFetchOptions } from 'src/utils/proxy.js' import { getProxyFetchOptions } from 'src/utils/proxy.js'
import { import {
@@ -175,25 +174,6 @@ export async function getAnthropicClient({
providerOverride, providerOverride,
}) as unknown as Anthropic }) as unknown as Anthropic
} }
// GitHub provider in native Anthropic API mode: send requests in Anthropic
// format so cache_control blocks are honoured and prompt caching works.
// Requires the GitHub endpoint (OPENAI_BASE_URL) to support Anthropic's
// messages API — set CLAUDE_CODE_GITHUB_ANTHROPIC_API=1 to opt in.
if (isGithubNativeAnthropicMode(model)) {
const githubBaseUrl =
process.env.OPENAI_BASE_URL?.replace(/\/$/, '') ??
'https://api.githubcopilot.com'
const githubToken =
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? ''
const nativeArgs: ConstructorParameters<typeof Anthropic>[0] = {
...ARGS,
baseURL: githubBaseUrl,
authToken: githubToken,
// No apiKey — we authenticate via Bearer token (authToken)
apiKey: null,
}
return new Anthropic(nativeArgs)
}
if ( if (
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) || isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||

View File

@@ -1,166 +0,0 @@
import { createServer } from 'node:http'
import { afterEach, expect, mock, test } from 'bun:test'
import { CodexOAuthService } from './codexOAuth.js'
const originalFetch = globalThis.fetch
const originalCallbackPort = process.env.CODEX_OAUTH_CALLBACK_PORT
const originalClientId = process.env.CODEX_OAUTH_CLIENT_ID
afterEach(() => {
mock.restore()
globalThis.fetch = originalFetch
if (originalCallbackPort === undefined) {
delete process.env.CODEX_OAUTH_CALLBACK_PORT
} else {
process.env.CODEX_OAUTH_CALLBACK_PORT = originalCallbackPort
}
if (originalClientId === undefined) {
delete process.env.CODEX_OAUTH_CLIENT_ID
} else {
process.env.CODEX_OAUTH_CLIENT_ID = originalClientId
}
})
async function getFreePort(): Promise<number> {
return await new Promise((resolve, reject) => {
const server = createServer()
server.once('error', reject)
server.listen(0, '127.0.0.1', () => {
const address = server.address()
if (!address || typeof address === 'string') {
server.close(() => reject(new Error('Failed to allocate test port.')))
return
}
const { port } = address
server.close(error => {
if (error) {
reject(error)
return
}
resolve(port)
})
})
})
}
function buildCallbackRequest(authUrl: string): string {
const authorizeUrl = new URL(authUrl)
const redirectUri = authorizeUrl.searchParams.get('redirect_uri')
const state = authorizeUrl.searchParams.get('state')
if (!redirectUri || !state) {
throw new Error('Codex OAuth test did not receive a valid authorization URL.')
}
const callbackUrl = new URL(redirectUri)
callbackUrl.searchParams.set('code', 'auth-code')
callbackUrl.searchParams.set('state', state)
return callbackUrl.toString()
}
test('serves updated success copy after a successful Codex OAuth flow', async () => {
const callbackPort = await getFreePort()
process.env.CODEX_OAUTH_CALLBACK_PORT = String(callbackPort)
process.env.CODEX_OAUTH_CLIENT_ID = 'test-client-id'
globalThis.fetch = mock(async (input, init) => {
const url = String(input)
if (url.startsWith('http://localhost:')) {
return originalFetch(input, init)
}
return new Response(
JSON.stringify({
access_token: 'access-token',
refresh_token: 'refresh-token',
}),
{
status: 200,
headers: { 'Content-Type': 'application/json' },
},
)
}) as typeof fetch
const service = new CodexOAuthService()
let callbackResponsePromise!: Promise<Response>
const flowPromise = service.startOAuthFlow(async authUrl => {
callbackResponsePromise = originalFetch(buildCallbackRequest(authUrl))
})
const tokens = await flowPromise
const callbackResponse = await callbackResponsePromise
const html = await callbackResponse.text()
expect(tokens.accessToken).toBe('access-token')
expect(tokens.refreshToken).toBe('refresh-token')
expect(html).toContain('You can return to OpenClaude now.')
expect(html).toContain(
'OpenClaude will finish activating your new Codex OAuth login.',
)
expect(html).not.toContain('continue automatically')
})
test('cancellation during token exchange returns a cancelled page and rejects the flow', async () => {
const callbackPort = await getFreePort()
process.env.CODEX_OAUTH_CALLBACK_PORT = String(callbackPort)
process.env.CODEX_OAUTH_CLIENT_ID = 'test-client-id'
let resolveFetchStart!: () => void
const fetchStarted = new Promise<void>(resolve => {
resolveFetchStart = resolve
})
globalThis.fetch = mock((input, init) => {
const url = String(input)
if (url.startsWith('http://localhost:')) {
return originalFetch(input, init)
}
return new Promise<Response>((_resolve, reject) => {
resolveFetchStart()
const signal = init?.signal
if (!signal) {
return
}
if (signal.aborted) {
reject(signal.reason)
return
}
signal.addEventListener(
'abort',
() => {
reject(signal.reason)
},
{ once: true },
)
})
}) as typeof fetch
const service = new CodexOAuthService()
let callbackResponsePromise!: Promise<Response>
const flowPromise = service.startOAuthFlow(async authUrl => {
callbackResponsePromise = originalFetch(buildCallbackRequest(authUrl))
})
await fetchStarted
service.cleanup()
await expect(flowPromise).rejects.toThrow('Codex OAuth flow was cancelled.')
const callbackResponse = await callbackResponsePromise
const html = await callbackResponse.text()
expect(html).toContain('Codex login cancelled')
expect(html).toContain('retry in OpenClaude')
})

View File

@@ -1,307 +0,0 @@
import { AuthCodeListener } from '../oauth/auth-code-listener.js'
import {
generateCodeChallenge,
generateCodeVerifier,
generateState,
} from '../oauth/crypto.js'
import {
asTrimmedString,
CODEX_OAUTH_ISSUER,
CODEX_OAUTH_ORIGINATOR,
CODEX_OAUTH_SCOPE,
escapeHtml,
exchangeCodexIdTokenForApiKey,
getCodexOAuthCallbackPort,
getCodexOAuthClientId,
parseChatgptAccountId,
} from './codexOAuthShared.js'
type CodexOAuthTokenResponse = {
id_token?: string
access_token?: string
refresh_token?: string
}
export type CodexOAuthTokens = {
apiKey?: string
accessToken: string
refreshToken: string
idToken?: string
accountId?: string
}
function buildCodexAuthorizeUrl(options: {
port: number
codeChallenge: string
state: string
}): string {
const redirectUri = `http://localhost:${options.port}/auth/callback`
const authUrl = new URL(`${CODEX_OAUTH_ISSUER}/oauth/authorize`)
authUrl.searchParams.append('response_type', 'code')
authUrl.searchParams.append('client_id', getCodexOAuthClientId())
authUrl.searchParams.append('redirect_uri', redirectUri)
authUrl.searchParams.append('scope', CODEX_OAUTH_SCOPE)
authUrl.searchParams.append('code_challenge', options.codeChallenge)
authUrl.searchParams.append('code_challenge_method', 'S256')
authUrl.searchParams.append('id_token_add_organizations', 'true')
authUrl.searchParams.append('codex_cli_simplified_flow', 'true')
authUrl.searchParams.append('state', options.state)
authUrl.searchParams.append('originator', CODEX_OAUTH_ORIGINATOR)
return authUrl.toString()
}
function renderSuccessPage(): string {
return `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Codex Login Complete</title>
<style>
body { font-family: sans-serif; padding: 32px; line-height: 1.5; color: #111827; }
h1 { margin: 0 0 12px; font-size: 22px; }
p { margin: 0 0 10px; }
</style>
</head>
<body>
<h1>Codex login complete</h1>
<p>You can return to OpenClaude now.</p>
<p>OpenClaude will finish activating your new Codex OAuth login.</p>
</body>
</html>`
}
function renderErrorPage(message: string): string {
const safeMessage = escapeHtml(message)
return `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Codex Login Failed</title>
<style>
body { font-family: sans-serif; padding: 32px; line-height: 1.5; color: #111827; }
h1 { margin: 0 0 12px; font-size: 22px; color: #991b1b; }
p { margin: 0 0 10px; }
</style>
</head>
<body>
<h1>Codex login failed</h1>
<p>${safeMessage}</p>
<p>You can close this window and try again in OpenClaude.</p>
</body>
</html>`
}
function renderCancelledPage(): string {
return `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Codex Login Cancelled</title>
<style>
body { font-family: sans-serif; padding: 32px; line-height: 1.5; color: #111827; }
h1 { margin: 0 0 12px; font-size: 22px; }
p { margin: 0 0 10px; }
</style>
</head>
<body>
<h1>Codex login cancelled</h1>
<p>You can close this window and retry in OpenClaude.</p>
</body>
</html>`
}
async function exchangeAuthorizationCode(options: {
authorizationCode: string
codeVerifier: string
port: number
signal?: AbortSignal
}): Promise<CodexOAuthTokens> {
const redirectUri = `http://localhost:${options.port}/auth/callback`
const body = new URLSearchParams({
grant_type: 'authorization_code',
code: options.authorizationCode,
redirect_uri: redirectUri,
client_id: getCodexOAuthClientId(),
code_verifier: options.codeVerifier,
})
const response = await fetch(`${CODEX_OAUTH_ISSUER}/oauth/token`, {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
body,
signal: options.signal
? AbortSignal.any([options.signal, AbortSignal.timeout(15_000)])
: AbortSignal.timeout(15_000),
})
if (!response.ok) {
const errorText = await response.text().catch(() => '')
throw new Error(
errorText.trim()
? `Codex OAuth token exchange failed (${response.status}): ${errorText.trim()}`
: `Codex OAuth token exchange failed with status ${response.status}.`,
)
}
const payload = (await response.json()) as CodexOAuthTokenResponse
const accessToken = asTrimmedString(payload.access_token)
const refreshToken = asTrimmedString(payload.refresh_token)
if (!accessToken || !refreshToken) {
throw new Error(
'Codex OAuth completed, but the token response was missing credentials.',
)
}
const idToken = asTrimmedString(payload.id_token)
const apiKey = idToken
? await exchangeCodexIdTokenForApiKey(idToken).catch(() => undefined)
: undefined
return {
apiKey,
accessToken,
refreshToken,
idToken,
accountId:
parseChatgptAccountId(idToken) ?? parseChatgptAccountId(accessToken),
}
}
export class CodexOAuthService {
private authCodeListener: AuthCodeListener | null = null
private port: number | null = null
private tokenExchangeAbortController: AbortController | null = null
private buildCancellationError(): Error {
return new Error('Codex OAuth flow was cancelled.')
}
async startOAuthFlow(
authURLHandler: (authUrl: string) => Promise<void>,
): Promise<CodexOAuthTokens> {
const codeVerifier = generateCodeVerifier()
const callbackPort = getCodexOAuthCallbackPort()
const authCodeListener = new AuthCodeListener('/auth/callback')
this.authCodeListener = authCodeListener
this.port = null
try {
const port = await authCodeListener.start(callbackPort)
this.port = port
const state = generateState()
const codeChallenge = await generateCodeChallenge(codeVerifier)
const authUrl = buildCodexAuthorizeUrl({
port,
codeChallenge,
state,
})
try {
const authorizationCode = await authCodeListener.waitForAuthorization(
state,
async () => {
await authURLHandler(authUrl)
},
)
const tokenExchangeAbortController = new AbortController()
this.tokenExchangeAbortController = tokenExchangeAbortController
let tokens: CodexOAuthTokens
try {
tokens = await exchangeAuthorizationCode({
authorizationCode,
codeVerifier,
port,
signal: tokenExchangeAbortController.signal,
})
} finally {
if (
this.tokenExchangeAbortController === tokenExchangeAbortController
) {
this.tokenExchangeAbortController = null
}
}
if (this.authCodeListener !== authCodeListener) {
throw this.buildCancellationError()
}
authCodeListener.handleSuccessRedirect([], res => {
res.writeHead(200, {
'Content-Type': 'text/html; charset=utf-8',
})
res.end(renderSuccessPage())
})
return tokens
} catch (error) {
const resolvedError =
this.authCodeListener === authCodeListener
? error
: this.buildCancellationError()
if (authCodeListener.hasPendingResponse()) {
const isCancellation =
resolvedError instanceof Error &&
resolvedError.message === 'Codex OAuth flow was cancelled.'
authCodeListener.handleErrorRedirect(res => {
res.writeHead(isCancellation ? 200 : 400, {
'Content-Type': 'text/html; charset=utf-8',
})
res.end(
isCancellation
? renderCancelledPage()
: renderErrorPage(
resolvedError instanceof Error
? resolvedError.message
: String(resolvedError),
),
)
})
}
throw resolvedError
} finally {
this.cleanup()
}
} catch (error) {
const message = error instanceof Error ? error.message : String(error)
if (
message.includes('EADDRINUSE') ||
message.includes(String(callbackPort))
) {
throw new Error(
`Codex OAuth needs localhost:${callbackPort} for its callback. Close any app already using that port and try again.`,
)
}
throw error
}
}
cleanup(): void {
const cancellationError = this.buildCancellationError()
this.tokenExchangeAbortController?.abort(cancellationError)
this.tokenExchangeAbortController = null
if (this.authCodeListener?.hasPendingResponse()) {
this.authCodeListener.handleErrorRedirect(res => {
res.writeHead(200, {
'Content-Type': 'text/html; charset=utf-8',
})
res.end(renderCancelledPage())
})
}
this.authCodeListener?.cancelPendingAuthorization(cancellationError)
this.authCodeListener = null
this.port = null
}
}

View File

@@ -1,139 +0,0 @@
export const CODEX_OAUTH_ISSUER = 'https://auth.openai.com'
export const CODEX_REFRESH_URL = `${CODEX_OAUTH_ISSUER}/oauth/token`
export const DEFAULT_CODEX_OAUTH_CLIENT_ID = 'app_EMoamEEZ73f0CkXaXp7hrann'
export const DEFAULT_CODEX_OAUTH_CALLBACK_PORT = 1455
export const CODEX_OAUTH_SCOPE =
'openid profile email offline_access api.connectors.read api.connectors.invoke'
export const CODEX_OAUTH_ORIGINATOR = 'codex_cli_rs'
export const CODEX_API_KEY_TOKEN_NAME = 'openai-api-key'
export const CODEX_ID_TOKEN_SUBJECT_TYPE =
'urn:ietf:params:oauth:token-type:id_token'
export const CODEX_TOKEN_EXCHANGE_GRANT =
'urn:ietf:params:oauth:grant-type:token-exchange'
export function asTrimmedString(value: unknown): string | undefined {
if (typeof value !== 'string') return undefined
const trimmed = value.trim()
return trimmed ? trimmed : undefined
}
export function getCodexOAuthClientId(
env: NodeJS.ProcessEnv = process.env,
): string {
return asTrimmedString(env.CODEX_OAUTH_CLIENT_ID) ?? DEFAULT_CODEX_OAUTH_CLIENT_ID
}
export function getCodexOAuthCallbackPort(
env: NodeJS.ProcessEnv = process.env,
): number {
const rawPort = asTrimmedString(env.CODEX_OAUTH_CALLBACK_PORT)
if (!rawPort) {
return DEFAULT_CODEX_OAUTH_CALLBACK_PORT
}
const parsed = Number.parseInt(rawPort, 10)
if (Number.isInteger(parsed) && parsed > 0 && parsed <= 65535) {
return parsed
}
return DEFAULT_CODEX_OAUTH_CALLBACK_PORT
}
export function decodeJwtPayload(
token: string,
): Record<string, unknown> | undefined {
const parts = token.split('.')
if (parts.length < 2) return undefined
try {
const normalized = parts[1].replace(/-/g, '+').replace(/_/g, '/')
const padded = normalized + '='.repeat((4 - (normalized.length % 4)) % 4)
const json = Buffer.from(padded, 'base64').toString('utf8')
const parsed = JSON.parse(json)
return parsed && typeof parsed === 'object'
? (parsed as Record<string, unknown>)
: undefined
} catch {
return undefined
}
}
export function parseChatgptAccountId(
token: string | undefined,
): string | undefined {
if (!token) return undefined
const payload = decodeJwtPayload(token)
const nestedAuth =
payload?.['https://api.openai.com/auth'] &&
typeof payload['https://api.openai.com/auth'] === 'object'
? (payload['https://api.openai.com/auth'] as Record<string, unknown>)
: undefined
return (
asTrimmedString(
nestedAuth?.chatgpt_account_id ??
payload?.['https://api.openai.com/auth.chatgpt_account_id'] ??
payload?.chatgpt_account_id,
) ?? undefined
)
}
export function escapeHtml(value: string): string {
return value.replace(/[&<>"']/g, char => {
switch (char) {
case '&':
return '&amp;'
case '<':
return '&lt;'
case '>':
return '&gt;'
case '"':
return '&quot;'
case '\'':
return '&#39;'
default:
return char
}
})
}
export async function exchangeCodexIdTokenForApiKey(
idToken: string,
): Promise<string> {
const body = new URLSearchParams({
grant_type: CODEX_TOKEN_EXCHANGE_GRANT,
client_id: getCodexOAuthClientId(),
requested_token: CODEX_API_KEY_TOKEN_NAME,
subject_token: idToken,
subject_token_type: CODEX_ID_TOKEN_SUBJECT_TYPE,
})
const response = await fetch(CODEX_REFRESH_URL, {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
body,
signal: AbortSignal.timeout(15_000),
})
if (!response.ok) {
const bodyText = await response.text().catch(() => '')
throw new Error(
bodyText.trim()
? `Codex API key exchange failed (${response.status}): ${bodyText.trim()}`
: `Codex API key exchange failed with status ${response.status}.`,
)
}
const payload = (await response.json()) as { access_token?: string }
const apiKey = asTrimmedString(payload.access_token)
if (!apiKey) {
throw new Error(
'Codex API key exchange completed, but no API key token was returned.',
)
}
return apiKey
}

View File

@@ -8,14 +8,16 @@ import {
convertCodexResponseToAnthropicMessage, convertCodexResponseToAnthropicMessage,
convertToolsToResponsesTools, convertToolsToResponsesTools,
} from './codexShim.js' } from './codexShim.js'
import { __test as webSearchToolTest } from '../../tools/WebSearchTool/WebSearchTool.js' import {
resolveCodexApiCredentials,
resolveProviderRequest,
} from './providerConfig.js'
const tempDirs: string[] = [] const tempDirs: string[] = []
const originalEnv = { const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL, OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_BASE: process.env.OPENAI_API_BASE, OPENAI_API_BASE: process.env.OPENAI_API_BASE,
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
OPENAI_MODEL: process.env.OPENAI_MODEL,
} }
afterEach(() => { afterEach(() => {
@@ -28,9 +30,6 @@ afterEach(() => {
if (originalEnv.CLAUDE_CODE_USE_GITHUB === undefined) delete process.env.CLAUDE_CODE_USE_GITHUB if (originalEnv.CLAUDE_CODE_USE_GITHUB === undefined) delete process.env.CLAUDE_CODE_USE_GITHUB
else process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB else process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
if (originalEnv.OPENAI_MODEL === undefined) delete process.env.OPENAI_MODEL
else process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
while (tempDirs.length > 0) { while (tempDirs.length > 0) {
const dir = tempDirs.pop() const dir = tempDirs.pop()
if (dir) rmSync(dir, { recursive: true, force: true }) if (dir) rmSync(dir, { recursive: true, force: true })
@@ -60,10 +59,6 @@ async function collectStreamEventTypes(responseText: string): Promise<string[]>
return events return events
} }
async function importFreshProviderConfigModule() {
return import(`./providerConfig.js?ts=${Date.now()}-${Math.random()}`)
}
describe('Codex provider config', () => { describe('Codex provider config', () => {
const originalOpenaiBaseUrl = process.env.OPENAI_BASE_URL const originalOpenaiBaseUrl = process.env.OPENAI_BASE_URL
const originalOpenaiApiBase = process.env.OPENAI_API_BASE const originalOpenaiApiBase = process.env.OPENAI_API_BASE
@@ -80,8 +75,7 @@ describe('Codex provider config', () => {
else process.env.OPENAI_API_BASE = originalOpenaiApiBase else process.env.OPENAI_API_BASE = originalOpenaiApiBase
}) })
test('resolves codexplan alias to Codex transport with reasoning', async () => { test('resolves codexplan alias to Codex transport with reasoning', () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
delete process.env.OPENAI_BASE_URL delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE delete process.env.OPENAI_API_BASE
delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_GITHUB
@@ -90,23 +84,9 @@ describe('Codex provider config', () => {
expect(resolved.transport).toBe('codex_responses') expect(resolved.transport).toBe('codex_responses')
expect(resolved.resolvedModel).toBe('gpt-5.4') expect(resolved.resolvedModel).toBe('gpt-5.4')
expect(resolved.reasoning).toEqual({ effort: 'high' }) expect(resolved.reasoning).toEqual({ effort: 'high' })
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
}) })
test('resolves codexspark alias to Codex transport with Codex base URL', async () => { test('does not force Codex transport when a local non-Codex base URL is explicit', () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
delete process.env.CLAUDE_CODE_USE_GITHUB
const resolved = resolveProviderRequest({ model: 'codexspark' })
expect(resolved.transport).toBe('codex_responses')
expect(resolved.resolvedModel).toBe('gpt-5.3-codex-spark')
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
})
test('does not force Codex transport when a local non-Codex base URL is explicit', async () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
const resolved = resolveProviderRequest({ const resolved = resolveProviderRequest({
model: 'codexplan', model: 'codexplan',
baseUrl: 'http://127.0.0.1:8080/v1', baseUrl: 'http://127.0.0.1:8080/v1',
@@ -117,8 +97,7 @@ describe('Codex provider config', () => {
expect(resolved.resolvedModel).toBe('gpt-5.4') expect(resolved.resolvedModel).toBe('gpt-5.4')
}) })
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', async () => { test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
// On Windows, env vars can leak as the literal string "undefined" instead of // On Windows, env vars can leak as the literal string "undefined" instead of
// the JS value undefined when not properly unset (issue #336). // the JS value undefined when not properly unset (issue #336).
process.env.OPENAI_BASE_URL = 'undefined' process.env.OPENAI_BASE_URL = 'undefined'
@@ -126,57 +105,20 @@ describe('Codex provider config', () => {
expect(resolved.transport).toBe('codex_responses') expect(resolved.transport).toBe('codex_responses')
}) })
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is an empty string', async () => { test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is an empty string', () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
process.env.OPENAI_BASE_URL = '' process.env.OPENAI_BASE_URL = ''
const resolved = resolveProviderRequest({ model: 'codexplan' }) const resolved = resolveProviderRequest({ model: 'codexplan' })
expect(resolved.transport).toBe('codex_responses') expect(resolved.transport).toBe('codex_responses')
}) })
test('prefers explicit baseUrl option over env var', async () => { test('prefers explicit baseUrl option over env var', () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
process.env.OPENAI_BASE_URL = 'https://example.com/v1' process.env.OPENAI_BASE_URL = 'https://example.com/v1'
const resolved = resolveProviderRequest({ model: 'codexplan', baseUrl: 'https://chatgpt.com/backend-api/codex' }) const resolved = resolveProviderRequest({ model: 'codexplan', baseUrl: 'https://chatgpt.com/backend-api/codex' })
expect(resolved.transport).toBe('codex_responses') expect(resolved.transport).toBe('codex_responses')
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex') expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
}) })
test('default gpt-4o uses OpenAI base URL (no regression)', async () => { test('loads Codex credentials from auth.json fallback', () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
delete process.env.OPENAI_BASE_URL
delete process.env.CLAUDE_CODE_USE_GITHUB
const resolved = resolveProviderRequest({ model: 'gpt-4o' })
expect(resolved.transport).toBe('chat_completions')
expect(resolved.baseUrl).toBe('https://api.openai.com/v1')
expect(resolved.resolvedModel).toBe('gpt-4o')
})
test('resolves codexplan from env var OPENAI_MODEL to Codex endpoint', async () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
process.env.OPENAI_MODEL = 'codexplan'
delete process.env.OPENAI_BASE_URL
delete process.env.CLAUDE_CODE_USE_GITHUB
const resolved = resolveProviderRequest()
expect(resolved.transport).toBe('codex_responses')
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
expect(resolved.resolvedModel).toBe('gpt-5.4')
})
test('does not override custom base URL for codexplan (e.g., local provider)', async () => {
const { resolveProviderRequest } = await importFreshProviderConfigModule()
process.env.OPENAI_MODEL = 'codexplan'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
delete process.env.CLAUDE_CODE_USE_GITHUB
const resolved = resolveProviderRequest()
expect(resolved.transport).toBe('chat_completions')
expect(resolved.baseUrl).toBe('http://localhost:11434/v1')
})
test('loads Codex credentials from auth.json fallback', async () => {
const { resolveCodexApiCredentials } = await importFreshProviderConfigModule()
const authPath = createTempAuthJson({ const authPath = createTempAuthJson({
tokens: { tokens: {
access_token: 'header.payload.signature', access_token: 'header.payload.signature',
@@ -192,31 +134,6 @@ describe('Codex provider config', () => {
expect(credentials.accountId).toBe('acct_test') expect(credentials.accountId).toBe('acct_test')
expect(credentials.source).toBe('auth.json') expect(credentials.source).toBe('auth.json')
}) })
test('does not treat auth.json id_token as a Codex bearer credential', async () => {
const { resolveCodexApiCredentials } = await importFreshProviderConfigModule()
const idTokenPayload = Buffer.from(
JSON.stringify({
'https://api.openai.com/auth': {
chatgpt_account_id: 'acct_from_id_token',
},
}),
'utf8',
).toString('base64url')
const authPath = createTempAuthJson({
tokens: {
id_token: `header.${idTokenPayload}.signature`,
},
})
const credentials = resolveCodexApiCredentials({
CODEX_AUTH_JSON_PATH: authPath,
} as NodeJS.ProcessEnv)
expect(credentials.apiKey).toBe('')
expect(credentials.accountId).toBe('acct_from_id_token')
expect(credentials.source).toBe('none')
})
}) })
describe('Codex request translation', () => { describe('Codex request translation', () => {
@@ -548,7 +465,7 @@ describe('Codex request translation', () => {
]) ])
}) })
test('strips <think> tag block from completed Codex text responses', () => { test('strips leaked reasoning preamble from completed Codex text responses', () => {
const message = convertCodexResponseToAnthropicMessage( const message = convertCodexResponseToAnthropicMessage(
{ {
id: 'resp_1', id: 'resp_1',
@@ -561,7 +478,7 @@ describe('Codex request translation', () => {
{ {
type: 'output_text', type: 'output_text',
text: text:
'<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?', 'The user just said "hey" - a simple greeting. I should respond briefly and friendly.\n\nHey! How can I help you today?',
}, },
], ],
}, },
@@ -579,195 +496,6 @@ describe('Codex request translation', () => {
]) ])
}) })
test('strips unterminated <think> tag at block boundary in Codex completed response', () => {
const message = convertCodexResponseToAnthropicMessage(
{
id: 'resp_1',
model: 'gpt-5.4',
output: [
{
type: 'message',
role: 'assistant',
content: [
{
type: 'output_text',
text:
'Here is the answer.\n<think>wait, let me reconsider the user request',
},
],
},
],
usage: { input_tokens: 12, output_tokens: 4 },
},
'gpt-5.4',
)
expect(message.content).toEqual([
{
type: 'text',
text: 'Here is the answer.',
},
])
})
test('recovers Codex web search text and sources from sparse completed response', () => {
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
{
output: [
{
type: 'web_search_call',
sources: [
{
title: 'OpenClaude repo',
url: 'https://github.com/example/openclaude',
},
],
},
{
type: 'message',
role: 'assistant',
content: [
{
type: 'text',
text: 'OpenClaude is available on GitHub.',
sources: [
{
title: 'Docs',
url: 'https://docs.example.com/openclaude',
},
],
},
],
},
],
},
'OpenClaude GitHub 2026',
0.42,
)
expect(output.results).toEqual([
'OpenClaude is available on GitHub.',
{
tool_use_id: 'codex-web-search',
content: [
{
title: 'OpenClaude repo',
url: 'https://github.com/example/openclaude',
},
{
title: 'Docs',
url: 'https://docs.example.com/openclaude',
},
],
},
])
})
test('falls back to a non-empty Codex web search result message', () => {
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
{ output: [] },
'OpenClaude GitHub 2026',
0.11,
)
expect(output.results).toEqual(['No results found.'])
})
test('surfaces Codex web search failure reason with a message', () => {
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
{
output: [
{
type: 'web_search_call',
status: 'failed',
error: { message: 'upstream search provider rate-limited' },
},
],
},
'OpenClaude GitHub 2026',
0.05,
)
expect(output.results).toEqual([
'Web search failed: upstream search provider rate-limited',
])
})
test('surfaces Codex web search failure reason nested under action.error', () => {
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
{
output: [
{
type: 'web_search_call',
status: 'failed',
action: { error: { message: 'query blocked' } },
},
],
},
'OpenClaude GitHub 2026',
0.05,
)
expect(output.results).toEqual(['Web search failed: query blocked'])
})
test('handles Codex web search failure with no reason attached', () => {
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
{
output: [
{
type: 'web_search_call',
status: 'failed',
},
],
},
'OpenClaude GitHub 2026',
0.05,
)
expect(output.results).toEqual(['Web search failed.'])
})
test('a failure item does not suppress sources from a later message item', () => {
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
{
output: [
{
type: 'web_search_call',
status: 'failed',
error: { message: 'partial outage' },
},
{
type: 'message',
role: 'assistant',
content: [
{
type: 'output_text',
text: 'Partial results below.',
sources: [
{ title: 'Docs', url: 'https://docs.example.com/openclaude' },
],
},
],
},
],
},
'OpenClaude GitHub 2026',
0.05,
)
expect(output.results).toEqual([
'Web search failed: partial outage',
'Partial results below.',
{
tool_use_id: 'codex-web-search',
content: [
{ title: 'Docs', url: 'https://docs.example.com/openclaude' },
],
},
])
})
test('translates Codex SSE text stream into Anthropic events', async () => { test('translates Codex SSE text stream into Anthropic events', async () => {
const responseText = [ const responseText = [
'event: response.output_item.added', 'event: response.output_item.added',
@@ -799,7 +527,7 @@ describe('Codex request translation', () => {
]) ])
}) })
test('strips <think> tag block from Codex SSE text stream', async () => { test('strips leaked reasoning preamble from Codex SSE text stream', async () => {
const responseText = [ const responseText = [
'event: response.output_item.added', 'event: response.output_item.added',
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}', 'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
@@ -808,13 +536,13 @@ describe('Codex request translation', () => {
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}', 'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
'', '',
'event: response.output_text.delta', 'event: response.output_text.delta',
'data: {"type":"response.output_text.delta","content_index":0,"delta":"<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?","item_id":"msg_1","output_index":0,"sequence_number":2}', 'data: {"type":"response.output_text.delta","content_index":0,"delta":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?","item_id":"msg_1","output_index":0,"sequence_number":2}',
'', '',
'event: response.output_item.done', 'event: response.output_item.done',
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?"}],"role":"assistant"},"output_index":0,"sequence_number":3}', 'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}],"role":"assistant"},"output_index":0,"sequence_number":3}',
'', '',
'event: response.completed', 'event: response.completed',
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"<think>user wants a greeting, respond briefly</think>Hey! How can I help you today?"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}', 'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"The user just said \\"hey\\" - a simple greeting. I should respond briefly and friendly.\\n\\nHey! How can I help you today?"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
'', '',
].join('\n') ].join('\n')
@@ -836,50 +564,6 @@ describe('Codex request translation', () => {
} }
} }
expect(textDeltas.join('')).toBe('Hey! How can I help you today?') expect(textDeltas).toEqual(['Hey! How can I help you today?'])
})
test('preserves prose without tags (no phrase-based false positive)', async () => {
// Regression test: older phrase-based sanitizer would incorrectly strip text
// starting with "I should" or "The user". The tag-based approach leaves it alone.
const responseText = [
'event: response.output_item.added',
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
'',
'event: response.content_part.added',
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
'',
'event: response.output_text.delta',
'data: {"type":"response.output_text.delta","content_index":0,"delta":"I should note that the user role requires a briefly concise friendly response format.","item_id":"msg_1","output_index":0,"sequence_number":2}',
'',
'event: response.output_item.done',
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"I should note that the user role requires a briefly concise friendly response format."}],"role":"assistant"},"output_index":0,"sequence_number":3}',
'',
'event: response.completed',
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"I should note that the user role requires a briefly concise friendly response format."}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
'',
].join('\n')
const stream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode(responseText))
controller.close()
},
})
const textDeltas: string[] = []
for await (const event of codexStreamToAnthropic(
new Response(stream),
'gpt-5.4',
)) {
const delta = (event as { delta?: { type?: string; text?: string } }).delta
if (delta?.type === 'text_delta' && typeof delta.text === 'string') {
textDeltas.push(delta.text)
}
}
expect(textDeltas.join('')).toBe(
'I should note that the user role requires a briefly concise friendly response format.',
)
}) })
}) })

View File

@@ -1,15 +1,14 @@
import { APIError } from '@anthropic-ai/sdk' import { APIError } from '@anthropic-ai/sdk'
import { compressToolHistory } from './compressToolHistory.js'
import { fetchWithProxyRetry } from './fetchWithProxyRetry.js'
import type { import type {
ResolvedCodexCredentials, ResolvedCodexCredentials,
ResolvedProviderRequest, ResolvedProviderRequest,
} from './providerConfig.js' } from './providerConfig.js'
import { sanitizeSchemaForOpenAICompat } from './openaiSchemaSanitizer.js' import { sanitizeSchemaForOpenAICompat } from './openaiSchemaSanitizer.js'
import { import {
createThinkTagFilter, looksLikeLeakedReasoningPrefix,
stripThinkTags, shouldBufferPotentialReasoningPrefix,
} from './thinkTagSanitizer.js' stripLeakedReasoningPreamble,
} from './reasoningLeakSanitizer.js'
export interface AnthropicUsage { export interface AnthropicUsage {
input_tokens: number input_tokens: number
@@ -485,15 +484,13 @@ export async function performCodexRequest(options: {
defaultHeaders: Record<string, string> defaultHeaders: Record<string, string>
signal?: AbortSignal signal?: AbortSignal
}): Promise<Response> { }): Promise<Response> {
const compressedMessages = compressToolHistory( const input = convertAnthropicMessagesToResponsesInput(
options.params.messages as Array<{ options.params.messages as Array<{
role?: string role?: string
message?: { role?: string; content?: unknown } message?: { role?: string; content?: unknown }
content?: unknown content?: unknown
}>, }>,
options.request.resolvedModel,
) )
const input = convertAnthropicMessagesToResponsesInput(compressedMessages)
const body: Record<string, unknown> = { const body: Record<string, unknown> = {
model: options.request.resolvedModel, model: options.request.resolvedModel,
input: input.length > 0 input: input.length > 0
@@ -562,15 +559,12 @@ export async function performCodexRequest(options: {
} }
headers.originator ??= 'openclaude' headers.originator ??= 'openclaude'
const response = await fetchWithProxyRetry( const response = await fetch(`${options.request.baseUrl}/responses`, {
`${options.request.baseUrl}/responses`,
{
method: 'POST', method: 'POST',
headers, headers,
body: JSON.stringify(body), body: JSON.stringify(body),
signal: options.signal, signal: options.signal,
}, })
)
if (!response.ok) { if (!response.ok) {
const errorBody = await response.text().catch(() => 'unknown error') const errorBody = await response.text().catch(() => 'unknown error')
@@ -586,55 +580,15 @@ export async function performCodexRequest(options: {
return response return response
} }
async function* readSseEvents(response: Response, signal?: AbortSignal): AsyncGenerator<CodexSseEvent> { async function* readSseEvents(response: Response): AsyncGenerator<CodexSseEvent> {
const reader = response.body?.getReader() const reader = response.body?.getReader()
if (!reader) return if (!reader) return
const decoder = new TextDecoder() const decoder = new TextDecoder()
let buffer = '' let buffer = ''
const STREAM_IDLE_TIMEOUT_MS = 120_000 // 2 minutes without data
let lastDataTime = Date.now()
/**
* Read from the stream with an idle timeout. Respects the caller's
* AbortSignal — clears the idle timer on abort so the AbortError
* surfaces cleanly instead of a spurious idle timeout.
*/
async function readWithTimeout(): Promise<ReadableStreamReadResult<Uint8Array>> {
return new Promise((resolve, reject) => {
const timeoutId = setTimeout(() => {
const elapsed = Math.round((Date.now() - lastDataTime) / 1000)
reject(new Error(
`Codex SSE stream idle for ${elapsed}s (limit: ${STREAM_IDLE_TIMEOUT_MS / 1000}s). Connection likely dropped.`,
))
}, STREAM_IDLE_TIMEOUT_MS)
let abortCleanup: (() => void) | undefined
if (signal) {
abortCleanup = () => {
clearTimeout(timeoutId)
}
signal.addEventListener('abort', abortCleanup, { once: true })
}
reader.read().then(
result => {
clearTimeout(timeoutId)
if (signal && abortCleanup) signal.removeEventListener('abort', abortCleanup)
if (result.value) lastDataTime = Date.now()
resolve(result)
},
err => {
clearTimeout(timeoutId)
if (signal && abortCleanup) signal.removeEventListener('abort', abortCleanup)
reject(err)
},
)
})
}
while (true) { while (true) {
const { done, value } = await readWithTimeout() const { done, value } = await reader.read()
if (done) break if (done) break
buffer += decoder.decode(value, { stream: true }) buffer += decoder.decode(value, { stream: true })
@@ -695,11 +649,10 @@ function determineStopReason(
export async function collectCodexCompletedResponse( export async function collectCodexCompletedResponse(
response: Response, response: Response,
signal?: AbortSignal,
): Promise<Record<string, any>> { ): Promise<Record<string, any>> {
let completedResponse: Record<string, any> | undefined let completedResponse: Record<string, any> | undefined
for await (const event of readSseEvents(response, signal)) { for await (const event of readSseEvents(response)) {
if (event.event === 'response.failed') { if (event.event === 'response.failed') {
const msg = event.data?.response?.error?.message ?? const msg = event.data?.response?.error?.message ??
event.data?.error?.message ?? 'Codex response failed' event.data?.error?.message ?? 'Codex response failed'
@@ -728,7 +681,6 @@ export async function collectCodexCompletedResponse(
export async function* codexStreamToAnthropic( export async function* codexStreamToAnthropic(
response: Response, response: Response,
model: string, model: string,
signal?: AbortSignal,
): AsyncGenerator<AnthropicStreamEvent> { ): AsyncGenerator<AnthropicStreamEvent> {
const messageId = makeMessageId() const messageId = makeMessageId()
const toolBlocksByItemId = new Map< const toolBlocksByItemId = new Map<
@@ -736,29 +688,34 @@ export async function* codexStreamToAnthropic(
{ index: number; toolUseId: string } { index: number; toolUseId: string }
>() >()
let activeTextBlockIndex: number | null = null let activeTextBlockIndex: number | null = null
const thinkFilter = createThinkTagFilter() let activeTextBuffer = ''
let textBufferMode: 'none' | 'pending' | 'strip' = 'none'
let nextContentBlockIndex = 0 let nextContentBlockIndex = 0
let sawToolUse = false let sawToolUse = false
let finalResponse: Record<string, any> | undefined let finalResponse: Record<string, any> | undefined
const closeActiveTextBlock = async function* () { const closeActiveTextBlock = async function* () {
if (activeTextBlockIndex === null) return if (activeTextBlockIndex === null) return
const tail = thinkFilter.flush() if (textBufferMode !== 'none') {
if (tail) { const sanitized = stripLeakedReasoningPreamble(activeTextBuffer)
if (sanitized) {
yield { yield {
type: 'content_block_delta', type: 'content_block_delta',
index: activeTextBlockIndex, index: activeTextBlockIndex,
delta: { delta: {
type: 'text_delta', type: 'text_delta',
text: tail, text: sanitized,
}, },
} }
} }
}
yield { yield {
type: 'content_block_stop', type: 'content_block_stop',
index: activeTextBlockIndex, index: activeTextBlockIndex,
} }
activeTextBlockIndex = null activeTextBlockIndex = null
activeTextBuffer = ''
textBufferMode = 'none'
} }
const startTextBlockIfNeeded = async function* () { const startTextBlockIfNeeded = async function* () {
@@ -785,7 +742,7 @@ export async function* codexStreamToAnthropic(
}, },
} }
for await (const event of readSseEvents(response, signal)) { for await (const event of readSseEvents(response)) {
const payload = event.data const payload = event.data
if (event.event === 'response.output_item.added') { if (event.event === 'response.output_item.added') {
@@ -834,17 +791,43 @@ export async function* codexStreamToAnthropic(
if (event.event === 'response.output_text.delta') { if (event.event === 'response.output_text.delta') {
yield* startTextBlockIfNeeded() yield* startTextBlockIfNeeded()
activeTextBuffer += payload.delta ?? ''
if (activeTextBlockIndex !== null) { if (activeTextBlockIndex !== null) {
const visible = thinkFilter.feed(payload.delta ?? '') if (
if (visible) { textBufferMode === 'strip' ||
looksLikeLeakedReasoningPrefix(activeTextBuffer)
) {
textBufferMode = 'strip'
continue
}
if (textBufferMode === 'pending') {
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
continue
}
yield { yield {
type: 'content_block_delta', type: 'content_block_delta',
index: activeTextBlockIndex, index: activeTextBlockIndex,
delta: { delta: {
type: 'text_delta', type: 'text_delta',
text: visible, text: activeTextBuffer,
}, },
} }
textBufferMode = 'none'
continue
}
if (shouldBufferPotentialReasoningPrefix(activeTextBuffer)) {
textBufferMode = 'pending'
continue
}
yield {
type: 'content_block_delta',
index: activeTextBlockIndex,
delta: {
type: 'text_delta',
text: payload.delta ?? '',
},
} }
} }
continue continue
@@ -940,7 +923,7 @@ export function convertCodexResponseToAnthropicMessage(
if (part?.type === 'output_text') { if (part?.type === 'output_text') {
content.push({ content.push({
type: 'text', type: 'text',
text: stripThinkTags(part.text ?? ''), text: stripLeakedReasoningPreamble(part.text ?? ''),
}) })
} }
} }

View File

@@ -1,13 +1,7 @@
import {
readCodexCredentialsAsync,
refreshCodexAccessTokenIfNeeded,
} from '../../utils/codexCredentials.js'
import { logForDebugging } from '../../utils/debug.js'
import { isBareMode } from '../../utils/envUtils.js'
import { import {
DEFAULT_CODEX_BASE_URL, DEFAULT_CODEX_BASE_URL,
isCodexBaseUrl, isCodexBaseUrl,
resolveRuntimeCodexCredentials, resolveCodexApiCredentials,
resolveProviderRequest, resolveProviderRequest,
} from './providerConfig.js' } from './providerConfig.js'
@@ -397,18 +391,6 @@ export function getCodexUsageUrl(baseUrl = DEFAULT_CODEX_BASE_URL): string {
} }
export async function fetchCodexUsage(): Promise<CodexUsageData> { export async function fetchCodexUsage(): Promise<CodexUsageData> {
const refreshResult = await refreshCodexAccessTokenIfNeeded().catch(
async error => {
logForDebugging(
`[codex] access token refresh failed before usage fetch: ${error instanceof Error ? error.message : String(error)}`,
{ level: 'warn' },
)
return {
refreshed: false,
credentials: await readCodexCredentialsAsync(),
}
},
)
const request = resolveProviderRequest({ const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL, model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL, baseUrl: process.env.OPENAI_BASE_URL,
@@ -419,19 +401,16 @@ export async function fetchCodexUsage(): Promise<CodexUsageData> {
) )
} }
const credentials = resolveRuntimeCodexCredentials({ const credentials = resolveCodexApiCredentials()
storedCredentials: refreshResult.credentials,
})
if (!credentials.apiKey) { if (!credentials.apiKey) {
const oauthHint = isBareMode() ? '' : ', choose Codex OAuth in /provider'
const authHint = credentials.authPath const authHint = credentials.authPath
? `${oauthHint} or place a Codex auth.json at ${credentials.authPath}` ? ` or place a Codex auth.json at ${credentials.authPath}`
: oauthHint : ''
throw new Error(`Codex auth is required. Set CODEX_API_KEY${authHint}.`) throw new Error(`Codex auth is required. Set CODEX_API_KEY${authHint}.`)
} }
if (!credentials.accountId) { if (!credentials.accountId) {
throw new Error( throw new Error(
'Codex auth is missing chatgpt_account_id. Re-login with Codex OAuth, the Codex CLI, or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.', 'Codex auth is missing chatgpt_account_id. Re-login with the Codex CLI or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.',
) )
} }

View File

@@ -1,572 +0,0 @@
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
import { compressToolHistory, getTiers } from './compressToolHistory.js'
// Mock the two dependencies so tests are deterministic and don't read disk config.
const mockState = {
enabled: true,
effectiveWindow: 100_000,
}
mock.module('../../utils/config.js', () => ({
getGlobalConfig: () => ({
toolHistoryCompressionEnabled: mockState.enabled,
}),
}))
mock.module('../compact/autoCompact.js', () => ({
getEffectiveContextWindowSize: () => mockState.effectiveWindow,
}))
beforeEach(() => {
mockState.enabled = true
mockState.effectiveWindow = 100_000
})
afterEach(() => {
mockState.enabled = true
mockState.effectiveWindow = 100_000
})
type Block = Record<string, unknown>
type Msg = { role: string; content: Block[] | string }
function bigText(n: number): string {
return 'x'.repeat(n)
}
function buildToolExchange(id: number, resultLength: number): Msg[] {
return [
{
role: 'assistant',
content: [
{
type: 'tool_use',
id: `toolu_${id}`,
name: 'Read',
input: { file_path: `/path/to/file${id}.ts` },
},
],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: `toolu_${id}`,
content: bigText(resultLength),
},
],
},
]
}
function buildConversation(numToolExchanges: number, resultLength = 5_000): Msg[] {
const out: Msg[] = [{ role: 'user', content: 'Initial request' }]
for (let i = 0; i < numToolExchanges; i++) {
out.push(...buildToolExchange(i, resultLength))
}
return out
}
function getResultMessages(messages: Msg[]): Msg[] {
return messages.filter(
m => Array.isArray(m.content) && m.content.some((b: any) => b.type === 'tool_result'),
)
}
function getResultBlock(msg: Msg): Block {
return (msg.content as Block[]).find((b: any) => b.type === 'tool_result') as Block
}
function getResultText(msg: Msg): string {
const block = getResultBlock(msg)
const c = block.content
if (typeof c === 'string') return c
if (Array.isArray(c)) {
return c
.filter((b: any) => b.type === 'text')
.map((b: any) => b.text)
.join('\n')
}
return ''
}
// ---------- getTiers ----------
test('getTiers: < 16k window → recent=2, mid=3', () => {
expect(getTiers(8_000)).toEqual({ recent: 2, mid: 3 })
})
test('getTiers: 16k32k → recent=3, mid=5', () => {
expect(getTiers(20_000)).toEqual({ recent: 3, mid: 5 })
})
test('getTiers: 32k64k → recent=4, mid=8', () => {
expect(getTiers(48_000)).toEqual({ recent: 4, mid: 8 })
})
test('getTiers: 64k128k (Copilot gpt-4o) → recent=5, mid=10', () => {
expect(getTiers(100_000)).toEqual({ recent: 5, mid: 10 })
})
test('getTiers: 128k256k (Copilot Claude) → recent=8, mid=15', () => {
expect(getTiers(200_000)).toEqual({ recent: 8, mid: 15 })
})
test('getTiers: 256k500k → recent=12, mid=25', () => {
expect(getTiers(400_000)).toEqual({ recent: 12, mid: 25 })
})
test('getTiers: ≥ 500k (gpt-4.1 1M) → recent=25, mid=50', () => {
expect(getTiers(1_000_000)).toEqual({ recent: 25, mid: 50 })
})
// ---------- master switch ----------
test('pass-through when toolHistoryCompressionEnabled is false', () => {
mockState.enabled = false
const messages = buildConversation(20)
const result = compressToolHistory(messages, 'gpt-4o')
expect(result).toBe(messages) // same reference (no transformation)
})
test('pass-through when total tool_results <= recent tier', () => {
// 100k effective → recent=5; only 4 exchanges → no compression
const messages = buildConversation(4)
const result = compressToolHistory(messages, 'gpt-4o')
expect(result).toBe(messages)
})
// ---------- per-tier behavior ----------
test('recent tier: tool_result content untouched', () => {
// 100k effective → recent=5, mid=10. With 6 exchanges, only the oldest is touched.
const messages = buildConversation(6, 5_000)
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
// Last 5 should be untouched (full 5000 chars)
for (let i = resultMsgs.length - 5; i < resultMsgs.length; i++) {
expect(getResultText(resultMsgs[i]).length).toBe(5_000)
}
})
test('mid tier: long content truncated to MID_MAX_CHARS with marker', () => {
// 100k → recent=5, mid=10. 10 exchanges: 5 recent + 5 mid (none old).
const messages = buildConversation(10, 5_000)
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
// First 5 are mid tier — should be truncated to ~2000 chars + marker
for (let i = 0; i < 5; i++) {
const text = getResultText(resultMsgs[i])
expect(text).toContain('[…truncated')
expect(text).toContain('chars from tool history]')
// Should be roughly 2000 chars + marker (under 2200)
expect(text.length).toBeLessThan(2_200)
expect(text.length).toBeGreaterThan(2_000)
}
})
test('mid tier: short content (< MID_MAX_CHARS) untouched', () => {
const messages = buildConversation(10, 500) // 500 < MID_MAX_CHARS
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
for (let i = 0; i < 5; i++) {
expect(getResultText(resultMsgs[i])).toBe(bigText(500))
}
})
test('old tier: content replaced with stub [name args={...} → N chars omitted]', () => {
// 100k → recent=5, mid=10, old=rest. 20 exchanges → 5 old + 10 mid + 5 recent.
const messages = buildConversation(20, 5_000)
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
// First 5 are old tier — should be stubs
for (let i = 0; i < 5; i++) {
const text = getResultText(resultMsgs[i])
expect(text).toMatch(/^\[Read args=\{.*\} → 5000 chars omitted\]$/)
}
})
test('old tier: stub args truncated to 200 chars', () => {
const longArg = bigText(500)
const messages: Msg[] = [
{ role: 'user', content: 'start' },
{
role: 'assistant',
content: [
{
type: 'tool_use',
id: 'toolu_x',
name: 'Bash',
input: { command: longArg },
},
],
},
{
role: 'user',
content: [
{ type: 'tool_result', tool_use_id: 'toolu_x', content: 'output' },
],
},
// Pad with enough recent exchanges to push the above into old tier
...buildConversation(20, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
const text = getResultText(resultMsgs[0])
// Stub format: [Bash args=<json≤200chars> → N chars omitted]
// The args portion (between args= and →) must be ≤ 200 chars.
const argsMatch = text.match(/args=(.*?) →/)
expect(argsMatch).not.toBeNull()
expect(argsMatch![1].length).toBeLessThanOrEqual(200)
})
test('old tier: orphan tool_result (no matching tool_use) falls back to "tool"', () => {
const messages: Msg[] = [
{ role: 'user', content: 'start' },
// Orphan: tool_result without matching tool_use in history
{
role: 'user',
content: [
{ type: 'tool_result', tool_use_id: 'orphan_id', content: 'data' },
],
},
...buildConversation(20, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
const text = getResultText(resultMsgs[0])
expect(text).toMatch(/^\[tool args=\{\} → 4 chars omitted\]$/)
})
// ---------- structural preservation ----------
test('tool_use blocks always preserved', () => {
const messages = buildConversation(20, 5_000)
const result = compressToolHistory(messages, 'gpt-4o')
const useCount = (msgs: Msg[]) =>
msgs.reduce((sum, m) => {
if (!Array.isArray(m.content)) return sum
return sum + m.content.filter((b: any) => b.type === 'tool_use').length
}, 0)
expect(useCount(result as Msg[])).toBe(useCount(messages))
})
test('text blocks always preserved', () => {
const messages: Msg[] = [
{ role: 'user', content: 'first' },
{
role: 'assistant',
content: [
{ type: 'text', text: 'reasoning before tool' },
{ type: 'tool_use', id: 'toolu_1', name: 'Read', input: {} },
],
},
{
role: 'user',
content: [{ type: 'tool_result', tool_use_id: 'toolu_1', content: bigText(5000) }],
},
...buildConversation(20, 5_000).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const assistantMsg = (result as Msg[])[1]
const textBlock = (assistantMsg.content as Block[]).find((b: any) => b.type === 'text')
expect(textBlock).toEqual({ type: 'text', text: 'reasoning before tool' })
})
test('thinking blocks always preserved', () => {
const messages: Msg[] = [
{ role: 'user', content: 'first' },
{
role: 'assistant',
content: [
{ type: 'thinking', thinking: 'internal reasoning', signature: 'sig' },
{ type: 'tool_use', id: 'toolu_1', name: 'Read', input: {} },
],
},
{
role: 'user',
content: [{ type: 'tool_result', tool_use_id: 'toolu_1', content: bigText(5000) }],
},
...buildConversation(20, 5_000).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const assistantMsg = (result as Msg[])[1]
const thinking = (assistantMsg.content as Block[]).find((b: any) => b.type === 'thinking')
expect(thinking).toEqual({
type: 'thinking',
thinking: 'internal reasoning',
signature: 'sig',
})
})
test('non-array content (string) handled gracefully', () => {
const messages: Msg[] = [
{ role: 'user', content: 'plain string content' },
...buildConversation(20, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
expect((result as Msg[])[0].content).toBe('plain string content')
})
test('empty content array handled gracefully', () => {
const messages: Msg[] = [
{ role: 'user', content: [] },
...buildConversation(20, 100).slice(1),
]
expect(() => compressToolHistory(messages, 'gpt-4o')).not.toThrow()
})
// ---------- message shape compatibility ----------
test('wrapped shape ({ message: { role, content } }) handled', () => {
type WrappedMsg = { message: { role: string; content: Block[] | string } }
const wrap = (m: Msg): WrappedMsg => ({ message: { role: m.role, content: m.content } })
const messages = buildConversation(20, 5_000).map(wrap)
const result = compressToolHistory(messages as any, 'gpt-4o')
// First wrapped tool-result message should have stub content (old tier)
const firstResultMsg = (result as WrappedMsg[]).find(
m =>
Array.isArray(m.message.content) &&
m.message.content.some((b: any) => b.type === 'tool_result'),
)
const block = (firstResultMsg!.message.content as Block[]).find(
(b: any) => b.type === 'tool_result',
) as Block
const text = ((block.content as Block[])[0] as any).text
expect(text).toMatch(/^\[Read args=.*→ 5000 chars omitted\]$/)
})
test('flat shape ({ role, content }) handled', () => {
const messages = buildConversation(20, 5_000)
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
expect(getResultText(resultMsgs[0])).toMatch(/^\[Read args=.*→ 5000 chars omitted\]$/)
})
// ---------- tier boundary correctness ----------
test('tier boundaries: 6 exchanges → 1 mid + 5 recent (recent=5)', () => {
const messages = buildConversation(6, 5_000)
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
// Oldest: mid (truncated)
expect(getResultText(resultMsgs[0])).toContain('[…truncated')
// Last 5: untouched
for (let i = 1; i < 6; i++) {
expect(getResultText(resultMsgs[i]).length).toBe(5_000)
}
})
test('tier boundaries: 16 exchanges → 1 old + 10 mid + 5 recent', () => {
const messages = buildConversation(16, 5_000)
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
// Oldest 1: stub (old tier)
expect(getResultText(resultMsgs[0])).toMatch(/^\[Read .*chars omitted\]$/)
// Next 10: mid (truncated)
for (let i = 1; i < 11; i++) {
expect(getResultText(resultMsgs[i])).toContain('[…truncated')
}
// Last 5: untouched
for (let i = 11; i < 16; i++) {
expect(getResultText(resultMsgs[i]).length).toBe(5_000)
}
})
test('large window (1M) with 30 exchanges: all untouched (recent=25 ≥ 30 - 5)', () => {
// ≥500k → recent=25, mid=50. 30 exchanges → 5 mid + 25 recent. None old.
mockState.effectiveWindow = 1_000_000
const messages = buildConversation(30, 5_000)
const result = compressToolHistory(messages, 'gpt-4.1')
const resultMsgs = getResultMessages(result)
// Last 25: untouched
for (let i = 5; i < 30; i++) {
expect(getResultText(resultMsgs[i]).length).toBe(5_000)
}
})
// ---------- attribute preservation ----------
test('is_error flag preserved in mid tier', () => {
const messages: Msg[] = [
{ role: 'user', content: 'start' },
{
role: 'assistant',
content: [{ type: 'tool_use', id: 'toolu_err', name: 'Bash', input: {} }],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'toolu_err',
is_error: true,
content: bigText(5_000),
},
],
},
// Pad with enough recent exchanges to push the above into MID tier
...buildConversation(10, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
const block = getResultBlock(resultMsgs[0]) as { is_error?: boolean; content: unknown }
expect(block.is_error).toBe(true)
expect(getResultText(resultMsgs[0])).toContain('[…truncated')
})
test('is_error flag preserved in old tier (stub)', () => {
const messages: Msg[] = [
{ role: 'user', content: 'start' },
{
role: 'assistant',
content: [{ type: 'tool_use', id: 'toolu_err', name: 'Bash', input: {} }],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'toolu_err',
is_error: true,
content: bigText(5_000),
},
],
},
...buildConversation(20, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
const block = getResultBlock(resultMsgs[0]) as { is_error?: boolean; content: unknown }
expect(block.is_error).toBe(true)
expect(getResultText(resultMsgs[0])).toMatch(/^\[Bash .*chars omitted\]$/)
})
// ---------- COMPACTABLE_TOOLS filter ----------
test('non-compactable tool (e.g. Task/Agent) is NEVER compressed', () => {
// Build conversation where the OLDEST exchange uses a non-compactable tool name
const messages: Msg[] = [
{ role: 'user', content: 'start' },
{
role: 'assistant',
content: [
{ type: 'tool_use', id: 'task_1', name: 'Task', input: { goal: 'plan' } },
],
},
{
role: 'user',
content: [
{ type: 'tool_result', tool_use_id: 'task_1', content: bigText(5_000) },
],
},
// Pad with 20 compactable exchanges to push Task into old tier
...buildConversation(20, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
// First tool_result is for Task (non-compactable) → must remain full
expect(getResultText(resultMsgs[0]).length).toBe(5_000)
expect(getResultText(resultMsgs[0])).not.toContain('chars omitted')
expect(getResultText(resultMsgs[0])).not.toContain('[…truncated')
})
test('mcp__ prefixed tools ARE compactable (matches microCompact behavior)', () => {
const messages: Msg[] = [
{ role: 'user', content: 'start' },
{
role: 'assistant',
content: [
{ type: 'tool_use', id: 'mcp_1', name: 'mcp__github__get_issue', input: {} },
],
},
{
role: 'user',
content: [
{ type: 'tool_result', tool_use_id: 'mcp_1', content: bigText(5_000) },
],
},
...buildConversation(20, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
// MCP tool result is compressed (gets stub since it's in old tier)
expect(getResultText(resultMsgs[0])).toMatch(/^\[mcp__github__get_issue .*chars omitted\]$/)
})
// ---------- skip already-cleared blocks ----------
test('blocks already cleared by microCompact are NOT re-compressed', () => {
const messages: Msg[] = [
{ role: 'user', content: 'start' },
{
role: 'assistant',
content: [{ type: 'tool_use', id: 'cleared_1', name: 'Read', input: {} }],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'cleared_1',
content: '[Old tool result content cleared]', // microCompact's marker
},
],
},
...buildConversation(20, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
// Already-cleared marker survives untouched (no double processing)
expect(getResultText(resultMsgs[0])).toBe('[Old tool result content cleared]')
})
test('extra block attributes (e.g. cache_control) preserved across rewrites', () => {
const cacheControl = { type: 'ephemeral' }
const messages: Msg[] = [
{ role: 'user', content: 'start' },
{
role: 'assistant',
content: [{ type: 'tool_use', id: 'toolu_cc', name: 'Read', input: {} }],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'toolu_cc',
cache_control: cacheControl,
content: bigText(5_000),
},
],
},
...buildConversation(20, 100).slice(1),
]
const result = compressToolHistory(messages, 'gpt-4o')
const resultMsgs = getResultMessages(result)
const block = getResultBlock(resultMsgs[0]) as { cache_control?: unknown }
// The custom attribute survived the stub rewrite via ...block spread
expect(block.cache_control).toEqual(cacheControl)
})

View File

@@ -1,255 +0,0 @@
/**
* Compresses old tool_result content for stateless OpenAI-compatible providers
* (Copilot, Mistral, Ollama). Preserves all conversation structure — tool_use,
* tool_result pairing, text, thinking, and is_error all survive intact. Only
* the BULK text of older tool_results is shrunk to delay context saturation.
*
* Tier sizes scale with the model's effective context window via
* getEffectiveContextWindowSize() — same calculation used by auto-compact, so
* the two systems stay aligned.
*
* Complements (does not replace) microCompact.ts:
* - microCompact: time/cache-based, runs from query.ts, binary clear/keep,
* limited to Claude (cache editing) or idle gaps (time-based).
* - compressToolHistory: size-based, runs at the shim layer, tiered
* compression, covers the gap for active sessions on non-Claude providers.
*
* Reuses isCompactableTool from microCompact to avoid touching tools the
* project already classifies as unsafe to compress (e.g. Task, Agent).
* Skips blocks already cleared by microCompact (TOOL_RESULT_CLEARED_MESSAGE).
*
* Anthropic native bypasses both shims, so it is unaffected by this module.
*/
import { getEffectiveContextWindowSize } from '../compact/autoCompact.js'
import { isCompactableTool } from '../compact/microCompact.js'
import { TOOL_RESULT_CLEARED_MESSAGE } from '../../utils/toolResultStorage.js'
import { getGlobalConfig } from '../../utils/config.js'
// Mid-tier truncation budget. 2k chars ≈ 500 tokens, enough to preserve the
// shape of most tool outputs (file headers, command stderr, top grep hits)
// without ballooning context. Bump too high and the tier loses its purpose.
const MID_MAX_CHARS = 2_000
// Stub args budget. JSON.stringify of a typical tool input fits in 200 chars
// (file paths, short commands, small queries). Long inputs are rare and clamping
// here keeps the stub size bounded even when callers pass oversized arguments.
const STUB_ARGS_MAX_CHARS = 200
type AnyMessage = {
role?: string
message?: { role?: string; content?: unknown }
content?: unknown
}
type ToolResultBlock = {
type: 'tool_result'
tool_use_id?: string
is_error?: boolean
content?: unknown
}
type ToolUseBlock = {
type: 'tool_use'
id?: string
name?: string
input?: unknown
}
type Tiers = { recent: number; mid: number }
// Tier sizes scale with effective window. Targets roughly:
// - recent tier stays under ~25% of available window (full fidelity kept)
// - recent + mid tier stays under ~50% of available window (bounded bulk)
// - everything older collapses to ~15-token stubs
// Values assume ~5KB avg tool_result, which matches the Copilot default case
// (parallel_tool_calls=true means multiple Read/Bash outputs per turn). For
// ≥ 500k models the tiers are so generous that compression is effectively
// inert for any realistic session — see compressToolHistory.test.ts.
export function getTiers(effectiveWindow: number): Tiers {
if (effectiveWindow < 16_000) return { recent: 2, mid: 3 }
if (effectiveWindow < 32_000) return { recent: 3, mid: 5 }
if (effectiveWindow < 64_000) return { recent: 4, mid: 8 }
if (effectiveWindow < 128_000) return { recent: 5, mid: 10 }
if (effectiveWindow < 256_000) return { recent: 8, mid: 15 }
if (effectiveWindow < 500_000) return { recent: 12, mid: 25 }
return { recent: 25, mid: 50 }
}
function extractText(content: unknown): string {
if (typeof content === 'string') return content
if (Array.isArray(content)) {
return content
.filter(
(b: { type?: string; text?: string }) =>
b?.type === 'text' && typeof b.text === 'string',
)
.map((b: { text?: string }) => b.text ?? '')
.join('\n')
}
return ''
}
// Old-tier compression strategy. Replaces content entirely with a one-line
// metadata marker ~10× more token-efficient than a 500-char truncation AND
// unambiguous — partial truncations can look authoritative to the model. The
// stub format encodes tool name + args so the model can re-invoke the same
// tool if it needs the omitted output back.
function buildStub(
block: ToolResultBlock,
toolUsesById: Map<string, ToolUseBlock>,
): ToolResultBlock {
const original = extractText(block.content)
const toolUse = toolUsesById.get(block.tool_use_id ?? '')
const name = toolUse?.name ?? 'tool'
const args = toolUse?.input
? JSON.stringify(toolUse.input).slice(0, STUB_ARGS_MAX_CHARS)
: '{}'
return {
...block,
content: [
{
type: 'text',
text: `[${name} args=${args}${original.length} chars omitted]`,
},
],
}
}
// Mid-tier compression. The trailing marker is load-bearing: without it, the
// model can't distinguish "tool returned 2000 chars" from "tool returned 20k
// chars that we cut to 2000". Distinguishing those matters for the model's
// decision to re-invoke the tool.
function truncateBlock(
block: ToolResultBlock,
maxChars: number,
): ToolResultBlock {
const text = extractText(block.content)
if (text.length <= maxChars) return block
const omitted = text.length - maxChars
return {
...block,
content: [
{
type: 'text',
text: `${text.slice(0, maxChars)}\n[…truncated ${omitted} chars from tool history]`,
},
],
}
}
function getInner(msg: AnyMessage): { role?: string; content?: unknown } {
return (msg.message ?? msg) as { role?: string; content?: unknown }
}
function indexToolUses(messages: AnyMessage[]): Map<string, ToolUseBlock> {
const map = new Map<string, ToolUseBlock>()
for (const msg of messages) {
const content = getInner(msg).content
if (!Array.isArray(content)) continue
for (const b of content as Array<{ type?: string; id?: string }>) {
if (b?.type === 'tool_use' && b.id) {
map.set(b.id, b as ToolUseBlock)
}
}
}
return map
}
function indexToolResultMessages(messages: AnyMessage[]): number[] {
const indices: number[] = []
for (let i = 0; i < messages.length; i++) {
const inner = getInner(messages[i])
const role = inner.role ?? messages[i].role
const content = inner.content
if (
role === 'user' &&
Array.isArray(content) &&
content.some((b: { type?: string }) => b?.type === 'tool_result')
) {
indices.push(i)
}
}
return indices
}
function rewriteMessage<T extends AnyMessage>(
msg: T,
newContent: unknown[],
): T {
if (msg.message) {
return { ...msg, message: { ...msg.message, content: newContent } }
}
return { ...msg, content: newContent }
}
// microCompact.maybeTimeBasedMicrocompact may have already replaced old
// tool_result content with TOOL_RESULT_CLEARED_MESSAGE before we see it.
// Re-compressing produces a stub over a marker (e.g. `[Read args={} → 40
// chars omitted]`), wasteful and less informative than the canonical marker.
function isAlreadyCleared(block: ToolResultBlock): boolean {
const text = extractText(block.content)
return text === TOOL_RESULT_CLEARED_MESSAGE
}
function shouldCompressBlock(
block: ToolResultBlock,
toolUsesById: Map<string, ToolUseBlock>,
): boolean {
if (isAlreadyCleared(block)) return false
const toolUse = toolUsesById.get(block.tool_use_id ?? '')
// Unknown tool name (orphan tool_result with no matching tool_use) falls
// through to compression with a generic "tool" stub. Safer default: the
// original tool_use vanished so there's no downstream use for the output.
if (!toolUse?.name) return true
// Respect microCompact's curated safe-to-compress set (Read/Bash/Grep/…/
// mcp__*) so user-facing flow tools (Task, Agent, custom) stay intact.
return isCompactableTool(toolUse.name)
}
export function compressToolHistory<T extends AnyMessage>(
messages: T[],
model: string,
): T[] {
// Master kill-switch. Returns the original reference so callers skip a
// defensive copy when the feature is disabled.
if (!getGlobalConfig().toolHistoryCompressionEnabled) return messages
const tiers = getTiers(getEffectiveContextWindowSize(model))
const toolResultIndices = indexToolResultMessages(messages)
const total = toolResultIndices.length
// If every tool-result fits in the recent tier, no boundary crosses; return
// the same reference for the same copy-elision reason.
if (total <= tiers.recent) return messages
// O(1) lookup: messageIndex → tool-result position (0 = oldest). Replaces
// the naive Array.indexOf(i) that was O(n²) across the .map below.
const positionByIndex = new Map<number, number>()
for (let pos = 0; pos < toolResultIndices.length; pos++) {
positionByIndex.set(toolResultIndices[pos], pos)
}
const toolUsesById = indexToolUses(messages)
return messages.map((msg, i) => {
const pos = positionByIndex.get(i)
if (pos === undefined) return msg
const fromEnd = total - 1 - pos
if (fromEnd < tiers.recent) return msg
const inMidWindow = fromEnd < tiers.recent + tiers.mid
const content = getInner(msg).content as unknown[]
const newContent = content.map(block => {
const b = block as { type?: string }
if (b?.type !== 'tool_result') return block
const tr = block as ToolResultBlock
if (!shouldCompressBlock(tr, toolUsesById)) return block
return inMidWindow
? truncateBlock(tr, MID_MAX_CHARS)
: buildStub(tr, toolUsesById)
})
return rewriteMessage(msg, newContent)
})
}

View File

@@ -1,44 +0,0 @@
import { APIError } from '@anthropic-ai/sdk'
import { expect, test } from 'bun:test'
import { getAssistantMessageFromError } from './errors.js'
function getFirstText(message: ReturnType<typeof getAssistantMessageFromError>): string {
const first = message.message.content[0]
if (!first || typeof first !== 'object' || !('text' in first)) {
return ''
}
return typeof first.text === 'string' ? first.text : ''
}
test('maps endpoint_not_found category markers to actionable setup guidance', () => {
const error = APIError.generate(
404,
undefined,
'OpenAI API error 404: Not Found [openai_category=endpoint_not_found] Hint: Confirm OPENAI_BASE_URL includes /v1.',
new Headers(),
)
const message = getAssistantMessageFromError(error, 'qwen2.5-coder:7b')
const text = getFirstText(message)
expect(message.isApiErrorMessage).toBe(true)
expect(text).toContain('Provider endpoint was not found')
expect(text).toContain('OPENAI_BASE_URL')
expect(text).toContain('/v1')
})
test('maps tool_call_incompatible category markers to model/tool guidance', () => {
const error = APIError.generate(
400,
undefined,
'OpenAI API error 400: tool_calls are not supported [openai_category=tool_call_incompatible]',
new Headers(),
)
const message = getAssistantMessageFromError(error, 'qwen2.5-coder:7b')
const text = getFirstText(message)
expect(text).toContain('rejected tool-calling payloads')
expect(text).toContain('/model')
})

View File

@@ -50,110 +50,9 @@ import {
} from '../claudeAiLimits.js' } from '../claudeAiLimits.js'
import { shouldProcessRateLimits } from '../rateLimitMocking.js' // Used for /mock-limits command import { shouldProcessRateLimits } from '../rateLimitMocking.js' // Used for /mock-limits command
import { extractConnectionErrorDetails, formatAPIError } from './errorUtils.js' import { extractConnectionErrorDetails, formatAPIError } from './errorUtils.js'
import {
extractOpenAICategoryMarker,
type OpenAICompatibilityFailureCategory,
} from './openaiErrorClassification.js'
export const API_ERROR_MESSAGE_PREFIX = 'API Error' export const API_ERROR_MESSAGE_PREFIX = 'API Error'
function stripOpenAICompatibilityMetadata(message: string): string {
return message
.replace(/\s*\[openai_category=[a-z_]+\]\s*/g, ' ')
.replace(/\s{2,}/g, ' ')
.trim()
}
function mapOpenAICompatibilityFailureToAssistantMessage(options: {
category: OpenAICompatibilityFailureCategory
model: string
rawMessage: string
}): AssistantMessage {
const switchCmd = getIsNonInteractiveSession() ? '--model' : '/model'
const compactHint = getIsNonInteractiveSession()
? 'Reduce prompt size or start a new session.'
: 'Run /compact or start a new session with /new.'
switch (options.category) {
case 'localhost_resolution_failed':
case 'connection_refused':
return createAssistantAPIErrorMessage({
content:
'Could not connect to the local OpenAI-compatible provider. Ensure the local server is running, then use OPENAI_BASE_URL=http://127.0.0.1:11434/v1 for Ollama.',
error: 'unknown',
})
case 'endpoint_not_found':
return createAssistantAPIErrorMessage({
content:
'Provider endpoint was not found. Confirm OPENAI_BASE_URL targets an OpenAI-compatible /v1 endpoint (for Ollama: http://127.0.0.1:11434/v1).',
error: 'invalid_request',
})
case 'model_not_found':
return createAssistantAPIErrorMessage({
content: `The selected model (${options.model}) is not available on this provider. Run ${switchCmd} to choose another model, or verify installed local models (for Ollama: ollama list).`,
error: 'invalid_request',
})
case 'auth_invalid':
return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: Authentication failed for your OpenAI-compatible provider. Verify OPENAI_API_KEY and endpoint-specific auth requirements.`,
error: 'authentication_failed',
})
case 'rate_limited':
return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: Provider rate limit reached. Retry in a few seconds.`,
error: 'rate_limit',
})
case 'request_timeout':
return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: Provider request timed out. Local models may be loading or overloaded; retry shortly or increase API_TIMEOUT_MS.`,
error: 'unknown',
})
case 'context_overflow':
return createAssistantAPIErrorMessage({
content: `The conversation exceeded the provider context limit. ${compactHint}`,
error: 'invalid_request',
})
case 'tool_call_incompatible':
return createAssistantAPIErrorMessage({
content: `The selected provider/model rejected tool-calling payloads. Try ${switchCmd} to pick a tool-capable model or continue without tools.`,
error: 'invalid_request',
})
case 'malformed_provider_response':
return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: Provider returned a malformed response. Confirm endpoint compatibility and check local proxy/network middleware.`,
error: 'unknown',
errorDetails: stripOpenAICompatibilityMetadata(options.rawMessage),
})
case 'provider_unavailable':
return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: Provider is temporarily unavailable. Retry in a moment.`,
error: 'unknown',
})
case 'network_error':
case 'unknown':
return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: ${stripOpenAICompatibilityMetadata(options.rawMessage)}`,
error: 'unknown',
})
default:
return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: ${stripOpenAICompatibilityMetadata(options.rawMessage)}`,
error: 'unknown',
})
}
}
export function startsWithApiErrorPrefix(text: string): boolean { export function startsWithApiErrorPrefix(text: string): boolean {
return ( return (
text.startsWith(API_ERROR_MESSAGE_PREFIX) || text.startsWith(API_ERROR_MESSAGE_PREFIX) ||
@@ -558,19 +457,6 @@ export function getAssistantMessageFromError(
}) })
} }
// OpenAI-compatible transport and HTTP failures include structured category
// markers from openaiShim.ts for actionable end-user remediation.
if (error instanceof APIError) {
const openaiCategory = extractOpenAICategoryMarker(error.message)
if (openaiCategory) {
return mapOpenAICompatibilityFailureToAssistantMessage({
category: openaiCategory,
model,
rawMessage: error.message,
})
}
}
// Check for emergency capacity off switch for Opus PAYG users // Check for emergency capacity off switch for Opus PAYG users
if ( if (
error instanceof Error && error instanceof Error &&
@@ -1038,30 +924,6 @@ export function getAssistantMessageFromError(
}) })
} }
// 500 errors caused by context overflow — the API returns 500 instead of 400
// when the request body (including conversation context) exceeds limits.
// This happens when auto-compact fails or the token estimation undercounts.
// Detect by checking for context-related keywords in 500 responses.
if (
error instanceof APIError &&
error.status >= 500 &&
(error.message.toLowerCase().includes('too many tokens') ||
error.message.toLowerCase().includes('request too large') ||
error.message.toLowerCase().includes('context length') ||
error.message.toLowerCase().includes('maximum context') ||
error.message.toLowerCase().includes('input length') ||
error.message.toLowerCase().includes('payload too large'))
) {
const rewindInstruction = getIsNonInteractiveSession()
? ''
: ' Press esc twice to go up a few messages, or run /compact to reduce context.'
return createAssistantAPIErrorMessage({
content: `The conversation has grown too large for the API to process.${rewindInstruction} Alternatively, start a new session with /new.`,
error: 'invalid_request',
errorDetails: `Context overflow (500): ${error.message}`,
})
}
// Connection errors (non-timeout) — use formatAPIError for detailed messages // Connection errors (non-timeout) — use formatAPIError for detailed messages
if (error instanceof APIConnectionError) { if (error instanceof APIConnectionError) {
return createAssistantAPIErrorMessage({ return createAssistantAPIErrorMessage({

View File

@@ -1,86 +0,0 @@
import { afterEach, beforeEach, expect, test } from 'bun:test'
import { _resetKeepAliveForTesting } from '../../utils/proxy.js'
import {
fetchWithProxyRetry,
isRetryableFetchError,
} from './fetchWithProxyRetry.js'
type FetchType = typeof globalThis.fetch
const originalFetch = globalThis.fetch
const originalEnv = {
HTTP_PROXY: process.env.HTTP_PROXY,
HTTPS_PROXY: process.env.HTTPS_PROXY,
}
function restoreEnv(key: 'HTTP_PROXY' | 'HTTPS_PROXY', value: string | undefined): void {
if (value === undefined) {
delete process.env[key]
} else {
process.env[key] = value
}
}
beforeEach(() => {
process.env.HTTP_PROXY = 'http://127.0.0.1:15236'
delete process.env.HTTPS_PROXY
_resetKeepAliveForTesting()
})
afterEach(() => {
globalThis.fetch = originalFetch
restoreEnv('HTTP_PROXY', originalEnv.HTTP_PROXY)
restoreEnv('HTTPS_PROXY', originalEnv.HTTPS_PROXY)
_resetKeepAliveForTesting()
})
test('isRetryableFetchError matches Bun socket-closed failures', () => {
expect(
isRetryableFetchError(
new Error(
'The socket connection was closed unexpectedly. For more information, pass `verbose: true` in the second argument to fetch()',
),
),
).toBe(true)
})
test('fetchWithProxyRetry retries once with keepalive disabled after socket closure', async () => {
const calls: Array<RequestInit | undefined> = []
globalThis.fetch = (async (_input, init) => {
calls.push(init)
if (calls.length === 1) {
throw new Error(
'The socket connection was closed unexpectedly. For more information, pass `verbose: true` in the second argument to fetch()',
)
}
return new Response('ok')
}) as FetchType
const response = await fetchWithProxyRetry('https://example.com/search', {
method: 'POST',
})
expect(await response.text()).toBe('ok')
expect(calls).toHaveLength(2)
expect((calls[0] as RequestInit & { proxy?: string }).proxy).toBe(
'http://127.0.0.1:15236',
)
expect((calls[0] as RequestInit).keepalive).toBeUndefined()
expect((calls[1] as RequestInit).keepalive).toBe(false)
})
test('fetchWithProxyRetry does not retry non-network errors', async () => {
let attempts = 0
globalThis.fetch = (async () => {
attempts += 1
throw new Error('400 bad request')
}) as FetchType
await expect(fetchWithProxyRetry('https://example.com')).rejects.toThrow(
'400 bad request',
)
expect(attempts).toBe(1)
})

View File

@@ -1,44 +0,0 @@
import { disableKeepAlive, getProxyFetchOptions } from '../../utils/proxy.js'
const RETRYABLE_FETCH_ERROR_PATTERN =
/socket connection was closed unexpectedly|ECONNRESET|EPIPE|socket hang up|Connection reset by peer|fetch failed/i
export function isRetryableFetchError(error: unknown): boolean {
if (!(error instanceof Error)) {
return false
}
if (error.name === 'AbortError') {
return false
}
return RETRYABLE_FETCH_ERROR_PATTERN.test(error.message)
}
export async function fetchWithProxyRetry(
input: string | URL | Request,
init?: RequestInit,
options?: { forAnthropicAPI?: boolean; maxAttempts?: number },
): Promise<Response> {
const maxAttempts = Math.max(1, options?.maxAttempts ?? 2)
let lastError: unknown
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
try {
return await fetch(input, {
...init,
...getProxyFetchOptions({
forAnthropicAPI: options?.forAnthropicAPI,
}),
})
} catch (error) {
lastError = error
if (attempt >= maxAttempts || !isRetryableFetchError(error)) {
throw error
}
disableKeepAlive()
}
}
throw lastError instanceof Error
? lastError
: new Error('Fetch failed without an error object')
}

View File

@@ -1,97 +0,0 @@
import { expect, test } from 'bun:test'
import {
buildOpenAICompatibilityErrorMessage,
classifyOpenAIHttpFailure,
classifyOpenAINetworkFailure,
extractOpenAICategoryMarker,
formatOpenAICategoryMarker,
} from './openaiErrorClassification.js'
test('classifies localhost ECONNREFUSED as connection_refused', () => {
const error = Object.assign(new TypeError('fetch failed'), {
code: 'ECONNREFUSED',
})
const failure = classifyOpenAINetworkFailure(error, {
url: 'http://localhost:11434/v1/chat/completions',
})
expect(failure.category).toBe('connection_refused')
expect(failure.retryable).toBe(true)
expect(failure.code).toBe('ECONNREFUSED')
expect(failure.hint).toContain('local server is running')
})
test('classifies localhost ENOTFOUND as localhost_resolution_failed', () => {
const error = Object.assign(new TypeError('getaddrinfo ENOTFOUND localhost'), {
code: 'ENOTFOUND',
})
const failure = classifyOpenAINetworkFailure(error, {
url: 'http://localhost:11434/v1/chat/completions',
})
expect(failure.category).toBe('localhost_resolution_failed')
expect(failure.retryable).toBe(true)
expect(failure.code).toBe('ENOTFOUND')
expect(failure.hint).toContain('127.0.0.1')
})
test('classifies model-not-found 404 responses', () => {
const failure = classifyOpenAIHttpFailure({
status: 404,
body: 'The model qwen2.5-coder:7b was not found',
})
expect(failure.category).toBe('model_not_found')
expect(failure.retryable).toBe(false)
})
test('classifies generic 404 responses as endpoint_not_found', () => {
const failure = classifyOpenAIHttpFailure({
status: 404,
body: 'Not Found',
})
expect(failure.category).toBe('endpoint_not_found')
expect(failure.hint).toContain('/v1')
})
test('classifies context-overflow responses', () => {
const failure = classifyOpenAIHttpFailure({
status: 500,
body: 'request too large: maximum context length exceeded',
})
expect(failure.category).toBe('context_overflow')
expect(failure.retryable).toBe(false)
})
test('classifies tool compatibility failures', () => {
const failure = classifyOpenAIHttpFailure({
status: 400,
body: 'tool_calls are not supported by this model',
})
expect(failure.category).toBe('tool_call_incompatible')
})
test('embeds and extracts category markers in formatted messages', () => {
const marker = formatOpenAICategoryMarker('endpoint_not_found')
expect(marker).toBe('[openai_category=endpoint_not_found]')
const formatted = buildOpenAICompatibilityErrorMessage('OpenAI API error 404: Not Found', {
category: 'endpoint_not_found',
hint: 'Confirm OPENAI_BASE_URL includes /v1.',
})
expect(formatted).toContain('[openai_category=endpoint_not_found]')
expect(formatted).toContain('Hint: Confirm OPENAI_BASE_URL includes /v1.')
expect(extractOpenAICategoryMarker(formatted)).toBe('endpoint_not_found')
})
test('ignores unknown category markers during extraction', () => {
const malformed = 'OpenAI API error 500 [openai_category=totally_fake_category]'
expect(extractOpenAICategoryMarker(malformed)).toBeUndefined()
})

View File

@@ -1,352 +0,0 @@
export type OpenAICompatibilityFailureCategory =
| 'connection_refused'
| 'localhost_resolution_failed'
| 'request_timeout'
| 'network_error'
| 'auth_invalid'
| 'rate_limited'
| 'model_not_found'
| 'endpoint_not_found'
| 'context_overflow'
| 'tool_call_incompatible'
| 'malformed_provider_response'
| 'provider_unavailable'
| 'unknown'
export type OpenAICompatibilityFailure = {
source: 'network' | 'http'
category: OpenAICompatibilityFailureCategory
retryable: boolean
message: string
hint?: string
code?: string
status?: number
}
const OPENAI_CATEGORY_MARKER_PREFIX = '[openai_category='
const LOCALHOST_HOSTNAMES = new Set(['localhost', '127.0.0.1', '::1'])
const OPENAI_COMPATIBILITY_FAILURE_CATEGORIES: ReadonlySet<OpenAICompatibilityFailureCategory> =
new Set<OpenAICompatibilityFailureCategory>([
'connection_refused',
'localhost_resolution_failed',
'request_timeout',
'network_error',
'auth_invalid',
'rate_limited',
'model_not_found',
'endpoint_not_found',
'context_overflow',
'tool_call_incompatible',
'malformed_provider_response',
'provider_unavailable',
'unknown',
])
function isOpenAICompatibilityFailureCategory(
value: string,
): value is OpenAICompatibilityFailureCategory {
return OPENAI_COMPATIBILITY_FAILURE_CATEGORIES.has(
value as OpenAICompatibilityFailureCategory,
)
}
function getErrorCode(error: unknown): string | undefined {
let current: unknown = error
const maxDepth = 5
for (let depth = 0; depth < maxDepth; depth++) {
if (
current &&
typeof current === 'object' &&
'code' in current &&
typeof (current as { code?: unknown }).code === 'string'
) {
return (current as { code: string }).code
}
if (
current &&
typeof current === 'object' &&
'cause' in current &&
(current as { cause?: unknown }).cause !== current
) {
current = (current as { cause?: unknown }).cause
continue
}
break
}
return undefined
}
function getHostname(url: string): string | null {
try {
return new URL(url).hostname.toLowerCase()
} catch {
return null
}
}
function isLocalhostLikeHostname(hostname: string | null): boolean {
if (!hostname) return false
if (LOCALHOST_HOSTNAMES.has(hostname)) return true
return /^127\./.test(hostname)
}
function isContextOverflowMessage(body: string): boolean {
const lower = body.toLowerCase()
return (
lower.includes('too many tokens') ||
lower.includes('request too large') ||
lower.includes('context length') ||
lower.includes('maximum context') ||
lower.includes('input length') ||
lower.includes('payload too large') ||
lower.includes('prompt is too long')
)
}
function isToolCompatibilityMessage(body: string): boolean {
const lower = body.toLowerCase()
return (
lower.includes('tool_calls') ||
lower.includes('tool_call') ||
lower.includes('tool_use') ||
lower.includes('tool_result') ||
lower.includes('function calling') ||
lower.includes('function call')
)
}
function isMalformedProviderResponse(body: string): boolean {
const lower = body.toLowerCase()
return (
lower.includes('<!doctype html') ||
lower.includes('<html') ||
lower.includes('invalid json') ||
lower.includes('malformed') ||
lower.includes('unexpected token') ||
lower.includes('cannot parse') ||
lower.includes('not valid json')
)
}
function isModelNotFoundMessage(body: string): boolean {
const lower = body.toLowerCase()
return (
lower.includes('model') &&
(
lower.includes('not found') ||
lower.includes('does not exist') ||
lower.includes('unknown model') ||
lower.includes('unavailable model')
)
)
}
export function formatOpenAICategoryMarker(
category: OpenAICompatibilityFailureCategory,
): string {
return `${OPENAI_CATEGORY_MARKER_PREFIX}${category}]`
}
export function extractOpenAICategoryMarker(
message: string,
): OpenAICompatibilityFailureCategory | undefined {
const match = message.match(/\[openai_category=([a-z_]+)]/)
const category = match?.[1]
if (!category || !isOpenAICompatibilityFailureCategory(category)) {
return undefined
}
return category
}
export function buildOpenAICompatibilityErrorMessage(
baseMessage: string,
failure: Pick<OpenAICompatibilityFailure, 'category' | 'hint'>,
): string {
const marker = formatOpenAICategoryMarker(failure.category)
const hint = failure.hint ? ` Hint: ${failure.hint}` : ''
return `${baseMessage} ${marker}${hint}`
}
export function classifyOpenAINetworkFailure(
error: unknown,
options: { url: string },
): OpenAICompatibilityFailure {
const message = error instanceof Error ? error.message : String(error)
const lowerMessage = message.toLowerCase()
const code = getErrorCode(error)
const hostname = getHostname(options.url)
const isLocalHost = isLocalhostLikeHostname(hostname)
if (
code === 'ETIMEDOUT' ||
code === 'UND_ERR_CONNECT_TIMEOUT' ||
lowerMessage.includes('timeout') ||
lowerMessage.includes('timed out') ||
lowerMessage.includes('aborterror')
) {
return {
source: 'network',
category: 'request_timeout',
retryable: true,
message,
code,
hint: 'The provider took too long to respond. Check local model load time or increase API timeout.',
}
}
if (
isLocalHost &&
(
code === 'ENOTFOUND' ||
code === 'EAI_AGAIN' ||
lowerMessage.includes('getaddrinfo') ||
(code === undefined && lowerMessage.includes('fetch failed'))
)
) {
return {
source: 'network',
category: 'localhost_resolution_failed',
retryable: true,
message,
code,
hint: 'Localhost failed for this request. Retry with 127.0.0.1 and confirm Ollama is serving on the configured port.',
}
}
if (code === 'ECONNREFUSED') {
return {
source: 'network',
category: 'connection_refused',
retryable: true,
message,
code,
hint: isLocalHost
? 'Connection to the local provider was refused. Ensure the local server is running and listening on the configured port.'
: 'Connection was refused by the provider endpoint. Ensure the server is running and the port is correct.',
}
}
return {
source: 'network',
category: 'network_error',
retryable: true,
message,
code,
hint: 'Network transport failed before a provider response was received.',
}
}
export function classifyOpenAIHttpFailure(options: {
status: number
body: string
}): OpenAICompatibilityFailure {
const body = options.body ?? ''
if (options.status === 401 || options.status === 403) {
return {
source: 'http',
category: 'auth_invalid',
retryable: false,
status: options.status,
message: body,
hint: 'Authentication failed. Verify API key, token source, and endpoint-specific auth headers.',
}
}
if (options.status === 429) {
return {
source: 'http',
category: 'rate_limited',
retryable: true,
status: options.status,
message: body,
hint: 'Provider rate-limited the request. Retry after backoff.',
}
}
if (options.status === 404 && isModelNotFoundMessage(body)) {
return {
source: 'http',
category: 'model_not_found',
retryable: false,
status: options.status,
message: body,
hint: 'The selected model is not installed or not available on this endpoint.',
}
}
if (options.status === 404) {
return {
source: 'http',
category: 'endpoint_not_found',
retryable: false,
status: options.status,
message: body,
hint: 'Endpoint was not found. Confirm OPENAI_BASE_URL includes /v1 for OpenAI-compatible local providers.',
}
}
if (
options.status === 413 ||
((options.status === 400 || options.status >= 500) &&
isContextOverflowMessage(body))
) {
return {
source: 'http',
category: 'context_overflow',
retryable: false,
status: options.status,
message: body,
hint: 'Prompt context exceeded model/server limits. Reduce context or increase provider context length.',
}
}
if (options.status === 400 && isToolCompatibilityMessage(body)) {
return {
source: 'http',
category: 'tool_call_incompatible',
retryable: false,
status: options.status,
message: body,
hint: 'Provider/model rejected tool-calling payload. Retry without tools or use a tool-capable model.',
}
}
if (options.status >= 400 && isMalformedProviderResponse(body)) {
return {
source: 'http',
category: 'malformed_provider_response',
retryable: false,
status: options.status,
message: body,
hint: 'Provider returned malformed or non-JSON response where JSON was expected.',
}
}
if (options.status >= 500) {
return {
source: 'http',
category: 'provider_unavailable',
retryable: true,
status: options.status,
message: body,
hint: 'Provider reported a server-side failure. Retry after a short delay.',
}
}
return {
source: 'http',
category: 'unknown',
retryable: false,
status: options.status,
message: body,
}
}

View File

@@ -1,317 +0,0 @@
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
import { createOpenAIShimClient } from './openaiShim.js'
type FetchType = typeof globalThis.fetch
const originalFetch = globalThis.fetch
const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
OPENAI_MODEL: process.env.OPENAI_MODEL,
}
// Mock config + autoCompact so the shim sees deterministic state.
const mockState = {
enabled: true,
effectiveWindow: 100_000, // Copilot gpt-4o tier
}
mock.module('../../utils/config.js', () => ({
getGlobalConfig: () => ({
toolHistoryCompressionEnabled: mockState.enabled,
autoCompactEnabled: false,
}),
}))
mock.module('../compact/autoCompact.js', () => ({
getEffectiveContextWindowSize: () => mockState.effectiveWindow,
}))
type OpenAIShimClient = {
beta: {
messages: {
create: (
params: Record<string, unknown>,
options?: Record<string, unknown>,
) => Promise<unknown>
}
}
}
function bigText(n: number): string {
return 'A'.repeat(n)
}
function buildToolExchange(id: number, resultLength: number) {
return [
{
role: 'assistant',
content: [
{
type: 'tool_use',
id: `toolu_${id}`,
name: 'Read',
input: { file_path: `/path/to/file${id}.ts` },
},
],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: `toolu_${id}`,
content: bigText(resultLength),
},
],
},
]
}
function buildLongConversation(numExchanges: number, resultLength = 5_000) {
const out: Array<{ role: string; content: unknown }> = [
{ role: 'user', content: 'start the work' },
]
for (let i = 0; i < numExchanges; i++) {
out.push(...buildToolExchange(i, resultLength))
}
return out
}
function makeFakeResponse(): Response {
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'gpt-4o',
choices: [
{
message: { role: 'assistant', content: 'done' },
finish_reason: 'stop',
},
],
usage: { prompt_tokens: 8, completion_tokens: 2, total_tokens: 10 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}
beforeEach(() => {
process.env.OPENAI_BASE_URL = 'http://example.test/v1'
process.env.OPENAI_API_KEY = 'test-key'
delete process.env.OPENAI_MODEL
mockState.enabled = true
mockState.effectiveWindow = 100_000
})
afterEach(() => {
if (originalEnv.OPENAI_BASE_URL === undefined) delete process.env.OPENAI_BASE_URL
else process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
if (originalEnv.OPENAI_API_KEY === undefined) delete process.env.OPENAI_API_KEY
else process.env.OPENAI_API_KEY = originalEnv.OPENAI_API_KEY
if (originalEnv.OPENAI_MODEL === undefined) delete process.env.OPENAI_MODEL
else process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
globalThis.fetch = originalFetch
})
async function captureRequestBody(
messages: Array<{ role: string; content: unknown }>,
model: string,
): Promise<Record<string, unknown>> {
let captured: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
captured = JSON.parse(String(init?.body))
return makeFakeResponse()
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model,
system: 'system prompt',
messages,
})
if (!captured) throw new Error('request not captured')
return captured
}
function getToolMessages(body: Record<string, unknown>): Array<{ content: string }> {
const messages = body.messages as Array<{ role: string; content: string }>
return messages.filter(m => m.role === 'tool')
}
function getAssistantToolCalls(body: Record<string, unknown>): unknown[] {
const messages = body.messages as Array<{
role: string
tool_calls?: unknown[]
}>
return messages
.filter(m => m.role === 'assistant' && Array.isArray(m.tool_calls))
.flatMap(m => m.tool_calls ?? [])
}
// ============================================================================
// BUG REPRO: without compression, full tool history is resent every turn
// ============================================================================
test('BUG REPRO: without compression, all 30 tool results are sent at full size', async () => {
mockState.enabled = false
const messages = buildLongConversation(30, 5_000)
const body = await captureRequestBody(messages, 'gpt-4o')
const toolMessages = getToolMessages(body)
const payloadSize = JSON.stringify(body).length
// All 30 tool results present, none truncated
expect(toolMessages.length).toBe(30)
for (const m of toolMessages) {
expect(m.content.length).toBeGreaterThanOrEqual(5_000)
expect(m.content).not.toContain('[…truncated')
expect(m.content).not.toContain('chars omitted')
}
// Total payload is large (~150KB raw) — this is the cost being paid every turn
expect(payloadSize).toBeGreaterThan(150_000)
})
// ============================================================================
// FIX: with compression, recent kept full, mid truncated, old stubbed
// ============================================================================
test('FIX: with compression on Copilot gpt-4o (tier 5/10/rest), 30 turns shrinks dramatically', async () => {
mockState.enabled = true
mockState.effectiveWindow = 100_000 // 64128k → recent=5, mid=10
const messages = buildLongConversation(30, 5_000)
const body = await captureRequestBody(messages, 'gpt-4o')
const toolMessages = getToolMessages(body)
const payloadSize = JSON.stringify(body).length
// Structure preserved: still 30 tool messages, no orphan tool_calls
expect(toolMessages.length).toBe(30)
expect(getAssistantToolCalls(body).length).toBe(30)
// Tier breakdown (oldest → newest):
// indices 0..14 → old tier (stubs)
// indices 15..24 → mid tier (truncated)
// indices 25..29 → recent (full)
for (let i = 0; i <= 14; i++) {
expect(toolMessages[i].content).toMatch(/^\[Read args=.*chars omitted\]$/)
}
for (let i = 15; i <= 24; i++) {
expect(toolMessages[i].content).toContain('[…truncated')
}
for (let i = 25; i <= 29; i++) {
expect(toolMessages[i].content.length).toBe(5_000)
expect(toolMessages[i].content).not.toContain('[…truncated')
expect(toolMessages[i].content).not.toContain('chars omitted')
}
// Significant reduction: from ~150KB to <60KB (10 mid×2KB + structure overhead)
expect(payloadSize).toBeLessThan(60_000)
})
// ============================================================================
// FIX: large-context model gets generous tiers — compression effectively inert
// ============================================================================
test('FIX: gpt-4.1 (1M context) with 25 exchanges keeps all full (recent tier=25)', async () => {
mockState.enabled = true
mockState.effectiveWindow = 1_000_000 // ≥500k → recent=25, mid=50
const messages = buildLongConversation(25, 5_000)
const body = await captureRequestBody(messages, 'gpt-4.1')
const toolMessages = getToolMessages(body)
expect(toolMessages.length).toBe(25)
for (const m of toolMessages) {
expect(m.content.length).toBe(5_000)
expect(m.content).not.toContain('[…truncated')
expect(m.content).not.toContain('chars omitted')
}
})
test('FIX: gpt-4.1 (1M context) with 30 exchanges → only first 5 mid-truncated', async () => {
mockState.enabled = true
mockState.effectiveWindow = 1_000_000 // recent=25, mid=50
const messages = buildLongConversation(30, 5_000)
const body = await captureRequestBody(messages, 'gpt-4.1')
const toolMessages = getToolMessages(body)
// 30 total: indices 0..4 mid, indices 5..29 recent
for (let i = 0; i < 5; i++) {
expect(toolMessages[i].content).toContain('[…truncated')
}
for (let i = 5; i < 30; i++) {
expect(toolMessages[i].content.length).toBe(5_000)
}
})
// ============================================================================
// FIX: stub preserves tool name and args — model can re-invoke if needed
// ============================================================================
test('FIX: stub format includes original tool name and arguments', async () => {
mockState.enabled = true
mockState.effectiveWindow = 100_000
const messages = buildLongConversation(30, 5_000)
const body = await captureRequestBody(messages, 'gpt-4o')
const toolMessages = getToolMessages(body)
const oldestStub = toolMessages[0].content
// Format: [<tool_name> args=<json> → <N> chars omitted]
expect(oldestStub).toMatch(/^\[Read /)
expect(oldestStub).toMatch(/file_path/)
expect(oldestStub).toMatch(/→ 5000 chars omitted\]$/)
})
// ============================================================================
// FIX: tool_use blocks (assistant tool_calls) are never modified
// ============================================================================
test('FIX: every tool_call retains its full id, name, and arguments', async () => {
mockState.enabled = true
mockState.effectiveWindow = 100_000
const messages = buildLongConversation(30, 5_000)
const body = await captureRequestBody(messages, 'gpt-4o')
const toolCalls = getAssistantToolCalls(body) as Array<{
id: string
function: { name: string; arguments: string }
}>
expect(toolCalls.length).toBe(30)
for (let i = 0; i < toolCalls.length; i++) {
expect(toolCalls[i].id).toBe(`toolu_${i}`)
expect(toolCalls[i].function.name).toBe('Read')
expect(JSON.parse(toolCalls[i].function.arguments)).toEqual({
file_path: `/path/to/file${i}.ts`,
})
}
})
// ============================================================================
// FIX: small-context provider (Mistral 32k) gets aggressive compression
// ============================================================================
test('FIX: 32k window (Mistral tier) → recent=3 keeps last 3 only', async () => {
mockState.enabled = true
mockState.effectiveWindow = 24_000 // 1632k → recent=3, mid=5
const messages = buildLongConversation(15, 3_000)
const body = await captureRequestBody(messages, 'mistral-large-latest')
const toolMessages = getToolMessages(body)
// 15 total: indices 0..6 old, 7..11 mid, 12..14 recent
for (let i = 0; i <= 6; i++) {
expect(toolMessages[i].content).toContain('chars omitted')
}
for (let i = 7; i <= 11; i++) {
expect(toolMessages[i].content).toContain('[…truncated')
}
for (let i = 12; i <= 14; i++) {
expect(toolMessages[i].content.length).toBe(3_000)
}
})

View File

@@ -1,286 +0,0 @@
import { afterEach, expect, mock, test } from 'bun:test'
const originalFetch = globalThis.fetch
const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
OPENAI_MODEL: process.env.OPENAI_MODEL,
}
function restoreEnv(key: string, value: string | undefined): void {
if (value === undefined) {
delete process.env[key]
} else {
process.env[key] = value
}
}
afterEach(() => {
globalThis.fetch = originalFetch
restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL)
restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY)
restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL)
mock.restore()
})
test('logs classified transport diagnostics with category and code', async () => {
const debugSpy = mock(() => {})
mock.module('../../utils/debug.js', () => ({
logForDebugging: debugSpy,
}))
const nonce = `${Date.now()}-${Math.random()}`
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_API_KEY = 'ollama'
const transportError = Object.assign(new TypeError('fetch failed'), {
code: 'ECONNREFUSED',
})
globalThis.fetch = mock(async () => {
throw transportError
}) as typeof globalThis.fetch
const client = createOpenAIShimClient({}) as {
beta: {
messages: {
create: (params: Record<string, unknown>) => Promise<unknown>
}
}
}
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
}),
).rejects.toThrow('openai_category=connection_refused')
const transportLog = debugSpy.mock.calls.find(call =>
typeof call?.[0] === 'string' && call[0].includes('transport failure'),
)
expect(transportLog).toBeDefined()
expect(String(transportLog?.[0])).toContain('category=connection_refused')
expect(String(transportLog?.[0])).toContain('code=ECONNREFUSED')
expect(transportLog?.[1]).toEqual({ level: 'warn' })
})
test('redacts credentials in transport diagnostic URL logs', async () => {
const debugSpy = mock(() => {})
mock.module('../../utils/debug.js', () => ({
logForDebugging: debugSpy,
}))
const nonce = `${Date.now()}-${Math.random()}`
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
process.env.OPENAI_BASE_URL = 'http://user:supersecret@localhost:11434/v1'
process.env.OPENAI_API_KEY = 'supersecret'
const transportError = Object.assign(new TypeError('fetch failed'), {
code: 'ECONNREFUSED',
})
globalThis.fetch = mock(async () => {
throw transportError
}) as typeof globalThis.fetch
const client = createOpenAIShimClient({}) as {
beta: {
messages: {
create: (params: Record<string, unknown>) => Promise<unknown>
}
}
}
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
}),
).rejects.toThrow('openai_category=connection_refused')
const transportLog = debugSpy.mock.calls.find(call =>
typeof call?.[0] === 'string' && call[0].includes('transport failure'),
)
expect(transportLog).toBeDefined()
const logLine = String(transportLog?.[0])
expect(logLine).toContain('url=http://redacted:redacted@localhost:11434/v1/chat/completions')
expect(logLine).not.toContain('user:supersecret')
expect(logLine).not.toContain('supersecret@')
})
test('logs self-heal localhost fallback with redacted from/to URLs', async () => {
const debugSpy = mock(() => {})
mock.module('../../utils/debug.js', () => ({
logForDebugging: debugSpy,
}))
const nonce = `${Date.now()}-${Math.random()}`
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
process.env.OPENAI_BASE_URL = 'http://user:supersecret@localhost:11434/v1'
process.env.OPENAI_API_KEY = 'supersecret'
globalThis.fetch = mock(async (input: string | Request) => {
const url = typeof input === 'string' ? input : input.url
if (url.includes('localhost')) {
throw Object.assign(new TypeError('fetch failed'), {
code: 'ENOTFOUND',
})
}
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'qwen2.5-coder:7b',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 5,
completion_tokens: 2,
total_tokens: 7,
},
}),
{
status: 200,
headers: {
'Content-Type': 'application/json',
},
},
)
}) as typeof globalThis.fetch
const client = createOpenAIShimClient({}) as {
beta: {
messages: {
create: (params: Record<string, unknown>) => Promise<unknown>
}
}
}
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: false,
}),
).resolves.toBeDefined()
const fallbackLog = debugSpy.mock.calls.find(call =>
typeof call?.[0] === 'string' &&
call[0].includes('self-heal retry reason=localhost_resolution_failed'),
)
expect(fallbackLog).toBeDefined()
const logLine = String(fallbackLog?.[0])
expect(logLine).toContain('from=http://redacted:redacted@localhost:11434/v1/chat/completions')
expect(logLine).toContain('to=http://redacted:redacted@127.0.0.1:11434/v1/chat/completions')
expect(logLine).not.toContain('supersecret')
})
test('logs self-heal toolless retry for local tool-call incompatibility', async () => {
const debugSpy = mock(() => {})
mock.module('../../utils/debug.js', () => ({
logForDebugging: debugSpy,
}))
const nonce = `${Date.now()}-${Math.random()}`
const { createOpenAIShimClient } = await import(`./openaiShim.ts?ts=${nonce}`)
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_API_KEY = 'ollama'
let callCount = 0
globalThis.fetch = mock(async () => {
callCount += 1
if (callCount === 1) {
return new Response('tool_calls are not supported', {
status: 400,
headers: {
'Content-Type': 'text/plain',
},
})
}
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'qwen2.5-coder:7b',
choices: [
{
message: {
role: 'assistant',
content: 'ok',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 7,
completion_tokens: 3,
total_tokens: 10,
},
}),
{
status: 200,
headers: {
'Content-Type': 'application/json',
},
},
)
}) as typeof globalThis.fetch
const client = createOpenAIShimClient({}) as {
beta: {
messages: {
create: (params: Record<string, unknown>) => Promise<unknown>
}
}
}
await expect(
client.beta.messages.create({
model: 'qwen2.5-coder:7b',
messages: [{ role: 'user', content: 'hello' }],
tools: [
{
name: 'Read',
description: 'Read file',
input_schema: {
type: 'object',
properties: {
filePath: { type: 'string' },
},
required: ['filePath'],
},
},
],
max_tokens: 64,
stream: false,
}),
).resolves.toBeDefined()
const fallbackLog = debugSpy.mock.calls.find(call =>
typeof call?.[0] === 'string' &&
call[0].includes('self-heal retry reason=tool_call_incompatible mode=toolless'),
)
expect(fallbackLog).toBeDefined()
expect(fallbackLog?.[1]).toEqual({ level: 'warn' })
})

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More