Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
131b31bf0e | ||
|
|
c1beea9867 | ||
|
|
658d076909 | ||
|
|
a07e5ef990 | ||
|
|
25ce2ca7bf | ||
|
|
1741f32cb7 | ||
|
|
fc7dc9ca0d | ||
|
|
252808bbd0 | ||
|
|
0e48884f56 | ||
|
|
b818dd5958 | ||
|
|
24d485f42f | ||
|
|
99a17144ee | ||
|
|
df2b9f2b7b | ||
|
|
adbe391e63 | ||
|
|
03e0b06e07 | ||
|
|
31be66d764 | ||
|
|
7c8bdcc3e2 | ||
|
|
64298a663f | ||
|
|
30c866d31a | ||
|
|
f6a4455ecf | ||
|
|
aeaa658f77 |
16
.dockerignore
Normal file
16
.dockerignore
Normal file
@@ -0,0 +1,16 @@
|
||||
node_modules
|
||||
dist
|
||||
.git
|
||||
.gitignore
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
coverage
|
||||
reports
|
||||
vscode-extension
|
||||
python
|
||||
docs
|
||||
*.md
|
||||
!README.md
|
||||
.github
|
||||
.tsbuildinfo
|
||||
55
.github/workflows/release.yml
vendored
55
.github/workflows/release.yml
vendored
@@ -86,3 +86,58 @@ jobs:
|
||||
echo "- npm: https://www.npmjs.com/package/@gitlawb/openclaude"
|
||||
echo "- GitHub: https://github.com/Gitlawb/openclaude/releases/tag/${{ needs.release-please.outputs.tag_name }}"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
docker:
|
||||
name: Build & Push Docker Image
|
||||
needs: release-please
|
||||
if: ${{ needs.release-please.outputs.release_created == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout release tag
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ needs.release-please.outputs.tag_name }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=${{ needs.release-please.outputs.version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=${{ needs.release-please.outputs.version }}
|
||||
type=raw,value=latest
|
||||
|
||||
- name: Build and load locally
|
||||
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
|
||||
with:
|
||||
context: .
|
||||
load: true
|
||||
tags: openclaude:smoke
|
||||
cache-from: type=gha
|
||||
|
||||
- name: Smoke test
|
||||
run: docker run --rm openclaude:smoke --version
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
".": "0.2.2"
|
||||
".": "0.3.0"
|
||||
}
|
||||
|
||||
33
CHANGELOG.md
33
CHANGELOG.md
@@ -1,5 +1,38 @@
|
||||
# Changelog
|
||||
|
||||
## [0.3.0](https://github.com/Gitlawb/openclaude/compare/v0.2.3...v0.3.0) (2026-04-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* activate coordinator mode in open build ([#647](https://github.com/Gitlawb/openclaude/issues/647)) ([99a1714](https://github.com/Gitlawb/openclaude/commit/99a17144ee285b892a0801acb6abcc9af68879af))
|
||||
* activate local-only team memory in open build ([#648](https://github.com/Gitlawb/openclaude/issues/648)) ([24d485f](https://github.com/Gitlawb/openclaude/commit/24d485f42f5b1405d2fab13f2f497d5edd3b5300))
|
||||
* activate message actions in open build ([#632](https://github.com/Gitlawb/openclaude/issues/632)) ([252808b](https://github.com/Gitlawb/openclaude/commit/252808bbd0a12a6ccf97e2cb09752a0212ea3acd))
|
||||
* add allowBypassPermissionsMode setting ([#658](https://github.com/Gitlawb/openclaude/issues/658)) ([31be66d](https://github.com/Gitlawb/openclaude/commit/31be66d7645ea3473334c9ce89ea1a5095b8df6e))
|
||||
* add Docker image build and push to GHCR on release ([#656](https://github.com/Gitlawb/openclaude/issues/656)) ([658d076](https://github.com/Gitlawb/openclaude/commit/658d076909e14eb0459bcb98aee9aa0472118265))
|
||||
* implement /loop command with fixed and dynamic scheduling ([#621](https://github.com/Gitlawb/openclaude/issues/621)) ([64298a6](https://github.com/Gitlawb/openclaude/commit/64298a663f1391b16aa1f5a49e8a877e1d3742f2))
|
||||
* implement Monitor tool for streaming shell output ([#649](https://github.com/Gitlawb/openclaude/issues/649)) ([b818dd5](https://github.com/Gitlawb/openclaude/commit/b818dd5958f4e8428566ce25a1a6be5fd4fe66f8))
|
||||
* local feature flag overrides via ~/.claude/feature-flags.json ([#639](https://github.com/Gitlawb/openclaude/issues/639)) ([0e48884](https://github.com/Gitlawb/openclaude/commit/0e48884f56c6c008f047a7926d3b2cb924170625))
|
||||
* open useful USER_TYPE-gated features to all users ([#644](https://github.com/Gitlawb/openclaude/issues/644)) ([c1beea9](https://github.com/Gitlawb/openclaude/commit/c1beea98676a413c54152a45a6b9fbe7fb9ed028))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* bump axios 1.14.0 → 1.15.0 (Dependabot [#4](https://github.com/Gitlawb/openclaude/issues/4), [#5](https://github.com/Gitlawb/openclaude/issues/5)) ([#670](https://github.com/Gitlawb/openclaude/issues/670)) ([a07e5ef](https://github.com/Gitlawb/openclaude/commit/a07e5ef990a5ed01a72e83fdbd1fcab36f515a08))
|
||||
* extend provider guard to protect anthropic profiles from cross-terminal override ([#641](https://github.com/Gitlawb/openclaude/issues/641)) ([03e0b06](https://github.com/Gitlawb/openclaude/commit/03e0b06e0784e4ea46945b3950840b10b6e3ca49))
|
||||
* improve fetch diagnostics for bootstrap and session requests ([#646](https://github.com/Gitlawb/openclaude/issues/646)) ([df2b9f2](https://github.com/Gitlawb/openclaude/commit/df2b9f2b7b4c661ee3d9ed5dc58b3064de0599d1))
|
||||
* **openai-shim:** preserve tool result images and local token caps ([#659](https://github.com/Gitlawb/openclaude/issues/659)) ([30c866d](https://github.com/Gitlawb/openclaude/commit/30c866d31ad8538496460667d86ed5efbd4a8547))
|
||||
* replace broken bun:bundle shim with source pre-processing ([#657](https://github.com/Gitlawb/openclaude/issues/657)) ([adbe391](https://github.com/Gitlawb/openclaude/commit/adbe391e63721918b5d147f4f845111c1a3143db))
|
||||
* resolve 12 bugs across API, MCP, agent tools, web search, and context overflow ([#674](https://github.com/Gitlawb/openclaude/issues/674)) ([25ce2ca](https://github.com/Gitlawb/openclaude/commit/25ce2ca7bff8937b0b79ad7f85c6dc1c68432069))
|
||||
* route OpenAI Codex shortcuts to correct endpoint ([#566](https://github.com/Gitlawb/openclaude/issues/566)) ([7c8bdcc](https://github.com/Gitlawb/openclaude/commit/7c8bdcc3e2ac1ecb98286c705c85671044be3d6b))
|
||||
|
||||
## [0.2.3](https://github.com/Gitlawb/openclaude/compare/v0.2.2...v0.2.3) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* prevent infinite auto-compact loop for unknown 3P models ([#635](https://github.com/Gitlawb/openclaude/issues/635)) ([#636](https://github.com/Gitlawb/openclaude/issues/636)) ([aeaa658](https://github.com/Gitlawb/openclaude/commit/aeaa658f776fb8df95721e8b8962385f8b00f66a))
|
||||
|
||||
## [0.2.2](https://github.com/Gitlawb/openclaude/compare/v0.2.1...v0.2.2) (2026-04-12)
|
||||
|
||||
|
||||
|
||||
49
Dockerfile
Normal file
49
Dockerfile
Normal file
@@ -0,0 +1,49 @@
|
||||
# ---- build stage ----
|
||||
FROM node:22-slim AS build
|
||||
|
||||
# Install Bun
|
||||
RUN npm install -g bun@1.3.11
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy dependency manifests first for better layer caching
|
||||
COPY package.json bun.lock ./
|
||||
|
||||
# Install all dependencies (including devDependencies for build)
|
||||
RUN bun install --frozen-lockfile
|
||||
|
||||
# Copy source code
|
||||
COPY src/ src/
|
||||
COPY scripts/ scripts/
|
||||
COPY bin/ bin/
|
||||
COPY tsconfig.json ./
|
||||
|
||||
# Build the CLI bundle
|
||||
RUN bun run build
|
||||
|
||||
# Prune devDependencies
|
||||
RUN rm -rf node_modules && bun install --frozen-lockfile --production
|
||||
|
||||
# ---- runtime stage ----
|
||||
FROM node:22-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy only what's needed to run
|
||||
COPY --from=build /app/dist/cli.mjs dist/cli.mjs
|
||||
COPY --from=build /app/bin/ bin/
|
||||
COPY --from=build /app/node_modules/ node_modules/
|
||||
COPY --from=build /app/package.json package.json
|
||||
COPY README.md ./
|
||||
|
||||
# Install git — many CLI tool operations depend on it
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Run as non-root user
|
||||
RUN groupadd --gid 1000 appuser && useradd --uid 1000 --gid appuser --shell /bin/bash --create-home appuser
|
||||
USER appuser
|
||||
WORKDIR /home/appuser
|
||||
ENV HOME=/home/appuser
|
||||
|
||||
ENTRYPOINT ["node", "/app/dist/cli.mjs"]
|
||||
10
README.md
10
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
OpenClaude is an open-source coding-agent CLI for cloud and local model providers.
|
||||
|
||||
Use OpenAI-compatible APIs, Gemini, GitHub Models, Codex, Ollama, Atomic Chat, and other supported backends while keeping one terminal-first workflow: prompts, tools, agents, MCP, slash commands, and streaming output.
|
||||
Use OpenAI-compatible APIs, Gemini, GitHub Models, Codex OAuth, Codex, Ollama, Atomic Chat, and other supported backends while keeping one terminal-first workflow: prompts, tools, agents, MCP, slash commands, and streaming output.
|
||||
|
||||
[](https://github.com/Gitlawb/openclaude/actions/workflows/pr-checks.yml)
|
||||
[](https://github.com/Gitlawb/openclaude/tags)
|
||||
@@ -10,13 +10,16 @@ Use OpenAI-compatible APIs, Gemini, GitHub Models, Codex, Ollama, Atomic Chat, a
|
||||
[](SECURITY.md)
|
||||
[](LICENSE)
|
||||
|
||||
OpenClaude is also mirrored to GitLawb:
|
||||
[gitlawb.com/node/repos/z6MkqDnb/openclaude](https://gitlawb.com/node/repos/z6MkqDnb/openclaude)
|
||||
|
||||
[Quick Start](#quick-start) | [Setup Guides](#setup-guides) | [Providers](#supported-providers) | [Source Build](#source-build-and-local-development) | [VS Code Extension](#vs-code-extension) | [Community](#community)
|
||||
|
||||
## Why OpenClaude
|
||||
|
||||
- Use one CLI across cloud APIs and local model backends
|
||||
- Save provider profiles inside the app with `/provider`
|
||||
- Run with OpenAI-compatible services, Gemini, GitHub Models, Codex, Ollama, Atomic Chat, and other supported providers
|
||||
- Run with OpenAI-compatible services, Gemini, GitHub Models, Codex OAuth, Codex, Ollama, Atomic Chat, and other supported providers
|
||||
- Keep coding-agent workflows in one place: bash, file tools, grep, glob, agents, tasks, MCP, and web tools
|
||||
- Use the bundled VS Code extension for launch integration and theme support
|
||||
|
||||
@@ -105,7 +108,8 @@ Advanced and source-build guides:
|
||||
| OpenAI-compatible | `/provider` or env vars | Works with OpenAI, OpenRouter, DeepSeek, Groq, Mistral, LM Studio, and other compatible `/v1` servers |
|
||||
| Gemini | `/provider` or env vars | Supports API key, access token, or local ADC workflow on current `main` |
|
||||
| GitHub Models | `/onboard-github` | Interactive onboarding with saved credentials |
|
||||
| Codex | `/provider` | Uses existing Codex credentials when available |
|
||||
| Codex OAuth | `/provider` | Opens ChatGPT sign-in in your browser and stores Codex credentials securely |
|
||||
| Codex | `/provider` | Uses existing Codex CLI auth, OpenClaude secure storage, or env credentials |
|
||||
| Ollama | `/provider` or env vars | Local inference with no API key |
|
||||
| Atomic Chat | advanced setup | Local Apple Silicon backend |
|
||||
| Bedrock / Vertex / Foundry | env vars | Additional provider integrations for supported environments |
|
||||
|
||||
8
bun.lock
8
bun.lock
@@ -30,7 +30,7 @@
|
||||
"@opentelemetry/semantic-conventions": "1.40.0",
|
||||
"ajv": "8.18.0",
|
||||
"auto-bind": "5.0.1",
|
||||
"axios": "1.14.0",
|
||||
"axios": "1.15.0",
|
||||
"bidi-js": "1.0.3",
|
||||
"chalk": "5.6.2",
|
||||
"chokidar": "4.0.3",
|
||||
@@ -479,7 +479,7 @@
|
||||
|
||||
"auto-bind": ["auto-bind@5.0.1", "", {}, "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg=="],
|
||||
|
||||
"axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
|
||||
"axios": ["axios@1.15.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-wWyJDlAatxk30ZJer+GeCWS209sA42X+N5jU2jy6oHTp7ufw8uzUTVFBX9+wTfAlhiJXGS0Bq7X6efruWjuK9Q=="],
|
||||
|
||||
"base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="],
|
||||
|
||||
@@ -1151,6 +1151,8 @@
|
||||
|
||||
"@emnapi/runtime/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"@mendable/firecrawl-js/axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core": ["@opentelemetry/core@1.30.1", "", { "dependencies": { "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/otlp-exporter-base": ["@opentelemetry/otlp-exporter-base@0.57.2", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/otlp-transformer": "0.57.2" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-XdxEzL23Urhidyebg5E6jZoaiW5ygP/mRjxLHixogbqwDy2Faduzb5N0o/Oi+XTIJu+iyxXdVORjXax+Qgfxag=="],
|
||||
@@ -1377,6 +1379,8 @@
|
||||
|
||||
"cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
|
||||
"firecrawl/axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
|
||||
|
||||
"form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
|
||||
|
||||
"gaxios/is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="],
|
||||
|
||||
@@ -48,6 +48,8 @@ export OPENAI_MODEL=gpt-4o
|
||||
`codexplan` maps to GPT-5.4 on the Codex backend with high reasoning.
|
||||
`codexspark` maps to GPT-5.3 Codex Spark for faster loops.
|
||||
|
||||
If you use the in-app provider wizard, choose `Codex OAuth` to open ChatGPT sign-in in your browser and let OpenClaude store Codex credentials securely.
|
||||
|
||||
If you already use the Codex CLI, OpenClaude reads `~/.codex/auth.json` automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or override the token directly with `CODEX_API_KEY`.
|
||||
|
||||
```bash
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@gitlawb/openclaude",
|
||||
"version": "0.2.2",
|
||||
"version": "0.3.0",
|
||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -76,7 +76,7 @@
|
||||
"@opentelemetry/semantic-conventions": "1.40.0",
|
||||
"ajv": "8.18.0",
|
||||
"auto-bind": "5.0.1",
|
||||
"axios": "1.14.0",
|
||||
"axios": "1.15.0",
|
||||
"bidi-js": "1.0.3",
|
||||
"chalk": "5.6.2",
|
||||
"chokidar": "4.0.3",
|
||||
|
||||
135
scripts/build.ts
135
scripts/build.ts
@@ -8,7 +8,8 @@
|
||||
* - src/ path aliases
|
||||
*/
|
||||
|
||||
import { readFileSync } from 'fs'
|
||||
import { readFileSync, readdirSync, writeFileSync } from 'fs'
|
||||
import { join } from 'path'
|
||||
import { noTelemetryPlugin } from './no-telemetry-plugin'
|
||||
|
||||
const pkg = JSON.parse(readFileSync('./package.json', 'utf-8'))
|
||||
@@ -24,25 +25,84 @@ const featureFlags: Record<string, boolean> = {
|
||||
BRIDGE_MODE: false,
|
||||
DAEMON: false,
|
||||
AGENT_TRIGGERS: false,
|
||||
MONITOR_TOOL: false,
|
||||
MONITOR_TOOL: true,
|
||||
ABLATION_BASELINE: false,
|
||||
DUMP_SYSTEM_PROMPT: false,
|
||||
CACHED_MICROCOMPACT: false,
|
||||
COORDINATOR_MODE: false,
|
||||
COORDINATOR_MODE: true,
|
||||
BUILTIN_EXPLORE_PLAN_AGENTS: true,
|
||||
CONTEXT_COLLAPSE: false,
|
||||
COMMIT_ATTRIBUTION: false,
|
||||
TEAMMEM: false,
|
||||
TEAMMEM: true,
|
||||
UDS_INBOX: false,
|
||||
BG_SESSIONS: false,
|
||||
AWAY_SUMMARY: false,
|
||||
TRANSCRIPT_CLASSIFIER: false,
|
||||
WEB_BROWSER_TOOL: false,
|
||||
MESSAGE_ACTIONS: false,
|
||||
MESSAGE_ACTIONS: true,
|
||||
BUDDY: true,
|
||||
CHICAGO_MCP: false,
|
||||
COWORKER_TYPE_TELEMETRY: false,
|
||||
}
|
||||
|
||||
// ── Pre-process: replace feature() calls with boolean literals ──────
|
||||
// Bun v1.3.9+ resolves `import { feature } from 'bun:bundle'` natively
|
||||
// before plugins can intercept it via onResolve. The bun: namespace is
|
||||
// handled by Bun's C++ resolver which runs before the JS plugin phase,
|
||||
// so the previous onResolve/onLoad shim was silently ineffective — ALL
|
||||
// feature() calls evaluated to false regardless of the featureFlags map.
|
||||
//
|
||||
// Fix: pre-process source files to strip the bun:bundle import and
|
||||
// replace feature('FLAG') calls with their boolean literal. Files are
|
||||
// modified in-place before Bun.build() and restored in a finally block.
|
||||
|
||||
// Match feature('FLAG') calls, including multi-line: feature(\n 'FLAG',\n)
|
||||
const featureCallRe = /\bfeature\(\s*['"](\w+)['"][,\s]*\)/gs
|
||||
const featureImportRe = /import\s*\{[^}]*\bfeature\b[^}]*\}\s*from\s*['"]bun:bundle['"];?\s*\n?/g
|
||||
const modifiedFiles = new Map<string, string>() // path → original content
|
||||
|
||||
function preProcessFeatureFlags(dir: string) {
|
||||
for (const ent of readdirSync(dir, { withFileTypes: true })) {
|
||||
const full = join(dir, ent.name)
|
||||
if (ent.isDirectory()) { preProcessFeatureFlags(full); continue }
|
||||
if (!/\.(ts|tsx)$/.test(ent.name)) continue
|
||||
|
||||
const raw = readFileSync(full, 'utf-8')
|
||||
if (!raw.includes('feature(')) continue
|
||||
|
||||
let contents = raw
|
||||
contents = contents.replace(featureImportRe, '')
|
||||
contents = contents.replace(featureCallRe, (_match, name) =>
|
||||
String((featureFlags as Record<string, boolean>)[name] ?? false),
|
||||
)
|
||||
|
||||
if (contents !== raw) {
|
||||
modifiedFiles.set(full, raw)
|
||||
writeFileSync(full, contents)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function restoreModifiedFiles() {
|
||||
for (const [path, original] of modifiedFiles) {
|
||||
writeFileSync(path, original)
|
||||
}
|
||||
modifiedFiles.clear()
|
||||
}
|
||||
|
||||
preProcessFeatureFlags(join(import.meta.dir, '..', 'src'))
|
||||
const numModified = modifiedFiles.size
|
||||
|
||||
// Restore source files on abrupt termination (Ctrl+C, kill, etc.)
|
||||
for (const signal of ['SIGINT', 'SIGTERM'] as const) {
|
||||
process.on(signal, () => {
|
||||
restoreModifiedFiles()
|
||||
process.exit(signal === 'SIGINT' ? 130 : 143)
|
||||
})
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
const result = await Bun.build({
|
||||
entrypoints: ['./src/entrypoints/cli.tsx'],
|
||||
outdir: './dist',
|
||||
@@ -103,18 +163,11 @@ export async function handleBgFlag() { throw new Error("Background sessions are
|
||||
],
|
||||
] as const)
|
||||
|
||||
// Resolve `import { feature } from 'bun:bundle'` to a shim
|
||||
build.onResolve({ filter: /^bun:bundle$/ }, () => ({
|
||||
path: 'bun:bundle',
|
||||
namespace: 'bun-bundle-shim',
|
||||
}))
|
||||
build.onLoad(
|
||||
{ filter: /.*/, namespace: 'bun-bundle-shim' },
|
||||
() => ({
|
||||
contents: `const featureFlags = ${JSON.stringify(featureFlags)};\nexport function feature(name) { return featureFlags[name] ?? false; }`,
|
||||
loader: 'js',
|
||||
}),
|
||||
)
|
||||
// bun:bundle feature() replacement is handled by the source
|
||||
// pre-processing step above (see preProcessFeatureFlags).
|
||||
// The previous onResolve/onLoad shim was ineffective in Bun
|
||||
// v1.3.9+ because the bun: namespace is resolved natively
|
||||
// before the JS plugin phase runs.
|
||||
|
||||
build.onResolve(
|
||||
{ filter: /^\.\.\/(daemon\/workerRegistry|daemon\/main|cli\/bg|cli\/handlers\/templateJobs|environment-runner\/main|self-hosted-runner\/main)\.js$/ },
|
||||
@@ -274,16 +327,7 @@ export const SeverityNumber = {};
|
||||
|
||||
// Scan source to find imports that can't resolve
|
||||
function scanForMissingImports() {
|
||||
function walk(dir: string) {
|
||||
for (const ent of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
const full = pathMod.join(dir, ent.name)
|
||||
if (ent.isDirectory()) { walk(full); continue }
|
||||
if (!/\.(ts|tsx)$/.test(ent.name)) continue
|
||||
const code: string = fs.readFileSync(full, 'utf-8')
|
||||
// Collect all imports
|
||||
for (const m of code.matchAll(/import\s+(?:\{([^}]*)\}|(\w+))?\s*(?:,\s*\{([^}]*)\})?\s*from\s+['"](.*?)['"]/g)) {
|
||||
const specifier = m[4]
|
||||
const namedPart = m[1] || m[3] || ''
|
||||
function checkAndRegister(specifier: string, fileDir: string, namedPart: string) {
|
||||
const names = namedPart.split(',')
|
||||
.map((s: string) => s.trim().replace(/^type\s+/, ''))
|
||||
.filter((s: string) => s && !s.startsWith('type '))
|
||||
@@ -303,8 +347,7 @@ export const SeverityNumber = {};
|
||||
}
|
||||
// Check relative .js imports
|
||||
else if (specifier.endsWith('.js') && (specifier.startsWith('./') || specifier.startsWith('../'))) {
|
||||
const dir2 = pathMod.dirname(full)
|
||||
const resolved = pathMod.resolve(dir2, specifier)
|
||||
const resolved = pathMod.resolve(fileDir, specifier)
|
||||
const tsVariant = resolved.replace(/\.js$/, '.ts')
|
||||
const tsxVariant = resolved.replace(/\.js$/, '.tsx')
|
||||
if (!fs.existsSync(resolved) && !fs.existsSync(tsVariant) && !fs.existsSync(tsxVariant)) {
|
||||
@@ -317,6 +360,30 @@ export const SeverityNumber = {};
|
||||
if (!missingModuleExports.has(specifier)) missingModuleExports.set(specifier, new Set())
|
||||
for (const n of names) missingModuleExports.get(specifier)!.add(n)
|
||||
}
|
||||
}
|
||||
|
||||
function walk(dir: string) {
|
||||
for (const ent of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
const full = pathMod.join(dir, ent.name)
|
||||
if (ent.isDirectory()) { walk(full); continue }
|
||||
if (!/\.(ts|tsx)$/.test(ent.name)) continue
|
||||
const code: string = fs.readFileSync(full, 'utf-8')
|
||||
const fileDir = pathMod.dirname(full)
|
||||
|
||||
// Collect static imports: import { X } from '...'
|
||||
for (const m of code.matchAll(/import\s+(?:\{([^}]*)\}|(\w+))?\s*(?:,\s*\{([^}]*)\})?\s*from\s+['"](.*?)['"]/g)) {
|
||||
checkAndRegister(m[4], fileDir, m[1] || m[3] || '')
|
||||
}
|
||||
|
||||
// Collect dynamic requires: require('...') — these are used
|
||||
// behind feature() gates and become live when flags are enabled.
|
||||
for (const m of code.matchAll(/require\(\s*['"](\.\.?\/[^'"]+)['"]\s*\)/g)) {
|
||||
checkAndRegister(m[1], fileDir, '')
|
||||
}
|
||||
|
||||
// Collect dynamic imports: import('...')
|
||||
for (const m of code.matchAll(/import\(\s*['"](\.\.?\/[^'"]+)['"]\s*\)/g)) {
|
||||
checkAndRegister(m[1], fileDir, '')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -389,7 +456,13 @@ if (!result.success) {
|
||||
for (const log of result.logs) {
|
||||
console.error(log)
|
||||
}
|
||||
process.exit(1)
|
||||
process.exitCode = 1
|
||||
} else {
|
||||
console.log(`✓ Built openclaude v${version} → dist/cli.mjs`)
|
||||
}
|
||||
|
||||
console.log(`✓ Built openclaude v${version} → dist/cli.mjs`)
|
||||
} finally {
|
||||
// Always restore source files, even if Bun.build() throws
|
||||
restoreModifiedFiles()
|
||||
console.log(` 🔄 feature-flags: pre-processed ${numModified} files (restored)`)
|
||||
}
|
||||
|
||||
146
scripts/no-telemetry-growthbook-stub.test.ts
Normal file
146
scripts/no-telemetry-growthbook-stub.test.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
import { afterAll, beforeEach, describe, expect, test } from 'bun:test'
|
||||
import { mkdirSync, readFileSync, rmSync, unlinkSync, writeFileSync } from 'node:fs'
|
||||
import { join } from 'node:path'
|
||||
import { tmpdir } from 'node:os'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Setup: extract the growthbook stub from no-telemetry-plugin.ts, write it to
|
||||
// a temp .mjs file, and dynamically import it so we can test the real code
|
||||
// that gets bundled.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const pluginSource = readFileSync(join(__dirname, 'no-telemetry-plugin.ts'), 'utf-8')
|
||||
const stubMatch = pluginSource.match(/'services\/analytics\/growthbook': `([\s\S]*?)`/)
|
||||
if (!stubMatch) throw new Error('Could not extract growthbook stub from no-telemetry-plugin.ts')
|
||||
|
||||
const testDir = join(tmpdir(), `growthbook-stub-test-${process.pid}`)
|
||||
const stubFile = join(testDir, 'growthbook-stub.mjs')
|
||||
const flagsFile = join(testDir, 'test-flags.json')
|
||||
|
||||
mkdirSync(testDir, { recursive: true })
|
||||
writeFileSync(stubFile, stubMatch[1])
|
||||
|
||||
// Point the stub at our test flags file (checked by _loadFlags on first access)
|
||||
process.env.CLAUDE_FEATURE_FLAGS_FILE = flagsFile
|
||||
|
||||
const stub = await import(stubFile)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('growthbook stub — local feature flag overrides', () => {
|
||||
beforeEach(() => {
|
||||
stub.resetGrowthBook()
|
||||
try { unlinkSync(flagsFile) } catch { /* may not exist */ }
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
rmSync(testDir, { recursive: true, force: true })
|
||||
delete process.env.CLAUDE_FEATURE_FLAGS_FILE
|
||||
})
|
||||
|
||||
// ── File absent ──────────────────────────────────────────────────
|
||||
|
||||
test('returns defaultValue when flags file is absent', () => {
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 42)).toBe(42)
|
||||
})
|
||||
|
||||
test('getAllGrowthBookFeatures returns {} when file is absent', () => {
|
||||
expect(stub.getAllGrowthBookFeatures()).toEqual({})
|
||||
})
|
||||
|
||||
// ── Valid JSON object ────────────────────────────────────────────
|
||||
|
||||
test('loads and returns values from a valid JSON file', () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: true, tengu_bar: 'hello' }))
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', false)).toBe(true)
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_bar', 'default')).toBe('hello')
|
||||
})
|
||||
|
||||
test('returns defaultValue for keys not present in the file', () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: true }))
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_missing', 99)).toBe(99)
|
||||
})
|
||||
|
||||
test('getAllGrowthBookFeatures returns the full flags object', () => {
|
||||
const flags = { tengu_a: true, tengu_b: false, tengu_c: 42 }
|
||||
writeFileSync(flagsFile, JSON.stringify(flags))
|
||||
|
||||
expect(stub.getAllGrowthBookFeatures()).toEqual(flags)
|
||||
})
|
||||
|
||||
// ── Malformed / non-object JSON ──────────────────────────────────
|
||||
|
||||
test('falls back to defaults on malformed JSON', () => {
|
||||
writeFileSync(flagsFile, '{not valid json!!!')
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
|
||||
})
|
||||
|
||||
test('falls back to defaults when JSON is a primitive (true)', () => {
|
||||
writeFileSync(flagsFile, 'true')
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
|
||||
})
|
||||
|
||||
test('falls back to defaults when JSON is an array', () => {
|
||||
writeFileSync(flagsFile, '["a", "b"]')
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
|
||||
})
|
||||
|
||||
// ── Cache invalidation ───────────────────────────────────────────
|
||||
|
||||
test('resetGrowthBook clears cache so the file is re-read', () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'first' }))
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('first')
|
||||
|
||||
// Update the file — cached value is still 'first'
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'second' }))
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('first')
|
||||
|
||||
// After reset, the new value is picked up
|
||||
stub.resetGrowthBook()
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('second')
|
||||
})
|
||||
|
||||
test('refreshGrowthBookFeatures clears cache', async () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'v1' }))
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('v1')
|
||||
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'v2' }))
|
||||
await stub.refreshGrowthBookFeatures()
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('v2')
|
||||
})
|
||||
|
||||
// ── Multiple getter variants ─────────────────────────────────────
|
||||
|
||||
test('all getter functions read from local flags', async () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_gate: true, tengu_config: { a: 1 } }))
|
||||
|
||||
expect(await stub.getFeatureValue_DEPRECATED('tengu_gate', false)).toBe(true)
|
||||
stub.resetGrowthBook()
|
||||
expect(stub.getFeatureValue_CACHED_WITH_REFRESH('tengu_gate', false)).toBe(true)
|
||||
stub.resetGrowthBook()
|
||||
expect(stub.checkStatsigFeatureGate_CACHED_MAY_BE_STALE('tengu_gate')).toBe(true)
|
||||
stub.resetGrowthBook()
|
||||
expect(await stub.checkGate_CACHED_OR_BLOCKING('tengu_gate')).toBe(true)
|
||||
stub.resetGrowthBook()
|
||||
expect(await stub.getDynamicConfig_BLOCKS_ON_INIT('tengu_config', {})).toEqual({ a: 1 })
|
||||
stub.resetGrowthBook()
|
||||
expect(stub.getDynamicConfig_CACHED_MAY_BE_STALE('tengu_config', {})).toEqual({ a: 1 })
|
||||
})
|
||||
|
||||
// ── Security gate ────────────────────────────────────────────────
|
||||
|
||||
test('checkSecurityRestrictionGate always returns false regardless of flags', async () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({
|
||||
tengu_disable_bypass_permissions_mode: true,
|
||||
}))
|
||||
|
||||
expect(await stub.checkSecurityRestrictionGate()).toBe(false)
|
||||
})
|
||||
})
|
||||
@@ -34,28 +34,55 @@ export function _resetForTesting() {}
|
||||
`,
|
||||
|
||||
'services/analytics/growthbook': `
|
||||
import _fs from 'node:fs';
|
||||
import _path from 'node:path';
|
||||
import _os from 'node:os';
|
||||
|
||||
let _flags = undefined;
|
||||
|
||||
function _loadFlags() {
|
||||
if (_flags !== undefined) return;
|
||||
try {
|
||||
const flagsPath = process.env.CLAUDE_FEATURE_FLAGS_FILE
|
||||
|| _path.join(_os.homedir(), '.claude', 'feature-flags.json');
|
||||
const parsed = JSON.parse(_fs.readFileSync(flagsPath, 'utf-8'));
|
||||
_flags = (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) ? parsed : null;
|
||||
} catch {
|
||||
_flags = null;
|
||||
}
|
||||
}
|
||||
|
||||
function _getFlagValue(key, defaultValue) {
|
||||
_loadFlags();
|
||||
if (_flags != null && Object.hasOwn(_flags, key)) return _flags[key];
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
const noop = () => {};
|
||||
export function onGrowthBookRefresh() { return noop; }
|
||||
export function hasGrowthBookEnvOverride() { return false; }
|
||||
export function getAllGrowthBookFeatures() { return {}; }
|
||||
export function getAllGrowthBookFeatures() { _loadFlags(); return _flags || {}; }
|
||||
export function getGrowthBookConfigOverrides() { return {}; }
|
||||
export function setGrowthBookConfigOverride() {}
|
||||
export function clearGrowthBookConfigOverrides() {}
|
||||
export function getApiBaseUrlHost() { return undefined; }
|
||||
export const initializeGrowthBook = async () => null;
|
||||
export async function getFeatureValue_DEPRECATED(feature, defaultValue) { return defaultValue; }
|
||||
export function getFeatureValue_CACHED_MAY_BE_STALE(feature, defaultValue) { return defaultValue; }
|
||||
export function getFeatureValue_CACHED_WITH_REFRESH(feature, defaultValue) { return defaultValue; }
|
||||
export function checkStatsigFeatureGate_CACHED_MAY_BE_STALE() { return false; }
|
||||
export async function checkSecurityRestrictionGate() { return false; }
|
||||
export async function checkGate_CACHED_OR_BLOCKING() { return false; }
|
||||
export async function getFeatureValue_DEPRECATED(feature, defaultValue) { return _getFlagValue(feature, defaultValue); }
|
||||
export function getFeatureValue_CACHED_MAY_BE_STALE(feature, defaultValue) { return _getFlagValue(feature, defaultValue); }
|
||||
export function getFeatureValue_CACHED_WITH_REFRESH(feature, defaultValue) { return _getFlagValue(feature, defaultValue); }
|
||||
export function checkStatsigFeatureGate_CACHED_MAY_BE_STALE(gate) { return Boolean(_getFlagValue(gate, false)); }
|
||||
// Security killswitch — always false in the open build. Anthropic uses this
|
||||
// gate to remotely disable bypassPermissions mode; exposing it via local flags
|
||||
// would let users accidentally lock themselves out of --dangerously-skip-permissions.
|
||||
export async function checkSecurityRestrictionGate(gate) { return false; }
|
||||
export async function checkGate_CACHED_OR_BLOCKING(gate) { return Boolean(_getFlagValue(gate, false)); }
|
||||
export function refreshGrowthBookAfterAuthChange() {}
|
||||
export function resetGrowthBook() {}
|
||||
export async function refreshGrowthBookFeatures() {}
|
||||
export function resetGrowthBook() { _flags = undefined; }
|
||||
export async function refreshGrowthBookFeatures() { _flags = undefined; }
|
||||
export function setupPeriodicGrowthBookRefresh() {}
|
||||
export function stopPeriodicGrowthBookRefresh() {}
|
||||
export async function getDynamicConfig_BLOCKS_ON_INIT(configName, defaultValue) { return defaultValue; }
|
||||
export function getDynamicConfig_CACHED_MAY_BE_STALE(configName, defaultValue) { return defaultValue; }
|
||||
export async function getDynamicConfig_BLOCKS_ON_INIT(configName, defaultValue) { return _getFlagValue(configName, defaultValue); }
|
||||
export function getDynamicConfig_CACHED_MAY_BE_STALE(configName, defaultValue) { return _getFlagValue(configName, defaultValue); }
|
||||
`,
|
||||
|
||||
'services/analytics/sink': `
|
||||
|
||||
282
src/__tests__/bugfixes.test.ts
Normal file
282
src/__tests__/bugfixes.test.ts
Normal file
@@ -0,0 +1,282 @@
|
||||
/**
|
||||
* Tests for Bug Fixes applied to openclaude.
|
||||
*
|
||||
* Covers:
|
||||
* 1. Gemini `store: false` rejection fix
|
||||
* 2. Session timeout / 500 error fix (stream idle timeout)
|
||||
* 3. Agent loop continuation nudge
|
||||
* 4. Web search result count improvements
|
||||
*/
|
||||
|
||||
import { describe, test, expect } from 'bun:test'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const SRC = resolve(import.meta.dir, '..')
|
||||
const file = (relative: string) => Bun.file(resolve(SRC, relative))
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 1: Gemini `store: false` rejection
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Gemini store field fix', () => {
|
||||
test('isGeminiMode is imported and used in openaiShim', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// Verify the fix: store deletion should check for Gemini mode
|
||||
expect(content).toContain('isGeminiMode()')
|
||||
expect(content).toContain("mistral and gemini don't recognize body.store")
|
||||
// Ensure the delete body.store is guarded for both Mistral and Gemini
|
||||
expect(content).toMatch(/isMistral\s*\|\|\s*isGeminiMode\(\)/)
|
||||
})
|
||||
|
||||
test('store: false is still set by default (OpenAI needs it)', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// The body should still have store: false by default
|
||||
expect(content).toMatch(/store:\s*false/)
|
||||
// But it should be deleted for non-OpenAI providers
|
||||
expect(content).toMatch(/delete body\.store/)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 2: Session timeout — stream idle timeout
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Session timeout fix', () => {
|
||||
test('openaiShim has idle timeout for SSE streams', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
expect(content).toContain('STREAM_IDLE_TIMEOUT_MS')
|
||||
expect(content).toContain('readWithTimeout')
|
||||
expect(content).toMatch(/readWithTimeout\(\)/)
|
||||
})
|
||||
|
||||
test('codexShim has idle timeout for SSE streams', async () => {
|
||||
const content = await file('services/api/codexShim.ts').text()
|
||||
|
||||
expect(content).toContain('STREAM_IDLE_TIMEOUT_MS')
|
||||
expect(content).toContain('readWithTimeout')
|
||||
expect(content).toMatch(/readWithTimeout\(\)/)
|
||||
})
|
||||
|
||||
test('idle timeout is set to a reasonable value (>= 60s)', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// Extract the timeout value (supports numeric separators like 120_000)
|
||||
const match = content.match(/STREAM_IDLE_TIMEOUT_MS\s*=\s*([\d_]+)/)
|
||||
expect(match).not.toBeNull()
|
||||
const timeoutMs = parseInt(match![1].replace(/_/g, ''), 10)
|
||||
expect(timeoutMs).toBeGreaterThanOrEqual(60_000)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 3: Agent loop continuation nudge
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Agent loop continuation nudge', () => {
|
||||
test('query.ts has continuation signal detection', async () => {
|
||||
const content = await file('query.ts').text()
|
||||
|
||||
expect(content).toContain('continuationSignals')
|
||||
expect(content).toContain('Continuation nudge triggered')
|
||||
expect(content).toContain('continuation_nudge')
|
||||
})
|
||||
|
||||
test('continuation signals include tightened patterns', async () => {
|
||||
const content = await file('query.ts').text()
|
||||
|
||||
// Should detect tightened patterns requiring explicit action verbs
|
||||
expect(content).toMatch(/so now \(i\|let me\|we\)/)
|
||||
expect(content).toContain('completionMarkers')
|
||||
expect(content).toContain('MAX_CONTINUATION_NUDGES')
|
||||
// Verify the nudge counter guard exists
|
||||
expect(content).toMatch(/continuationNudgeCount\s*<\s*MAX_CONTINUATION_NUDGES/)
|
||||
})
|
||||
|
||||
test('nudge creates a meta user message to continue', async () => {
|
||||
const content = await file('query.ts').text()
|
||||
|
||||
expect(content).toContain(
|
||||
'Continue with the task. Use the appropriate tools to proceed.',
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 4: Web search result count improvements
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Web search result count improvements', () => {
|
||||
test('Bing provider requests at least 15 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/bing.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/count.*['"]15['"]/)
|
||||
})
|
||||
|
||||
test('Tavily provider requests at least 15 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/tavily.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/max_results:\s*15/)
|
||||
})
|
||||
|
||||
test('Exa provider requests at least 15 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/exa.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/numResults:\s*15/)
|
||||
})
|
||||
|
||||
test('Firecrawl provider requests at least 15 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/firecrawl.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/limit:\s*15/)
|
||||
})
|
||||
|
||||
test('Mojeek provider requests at least 10 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/mojeek.ts',
|
||||
).text()
|
||||
|
||||
// Mojeek uses 't' param for result count — verify it's set to 10
|
||||
expect(content).toMatch(/searchParams\.set\('t',\s*'10'\)/)
|
||||
})
|
||||
|
||||
test('You.com provider requests at least 10 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/you.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/num_web_results.*['"]10['"]/)
|
||||
})
|
||||
|
||||
test('Jina provider requests at least 10 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/jina.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/count.*['"]10['"]/)
|
||||
})
|
||||
|
||||
test('Native Anthropic web search max_uses increased to 15', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/WebSearchTool.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/max_uses:\s*15/)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 5: MCP tool timeout fix
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('MCP tool timeout fix', () => {
|
||||
test('default MCP tool timeout is reasonable (not 27 hours)', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
|
||||
// Should NOT have the old ~27.8 hour default
|
||||
expect(content).not.toContain('100_000_000')
|
||||
// Should have a reasonable timeout (5 minutes = 300_000ms)
|
||||
expect(content).toMatch(/DEFAULT_MCP_TOOL_TIMEOUT_MS\s*=\s*300_000/)
|
||||
})
|
||||
|
||||
test('MCP tools/list has retry logic', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
|
||||
expect(content).toContain('tools/list failed (attempt')
|
||||
expect(content).toContain('Retrying...')
|
||||
})
|
||||
|
||||
test('MCP URL elicitation checks abort signal', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
|
||||
expect(content).toContain('signal.aborted')
|
||||
expect(content).toContain('Tool call aborted during URL elicitation')
|
||||
})
|
||||
|
||||
test('MCP tool error messages include server and tool name in telemetry', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
|
||||
// Telemetry message should include context like "MCP tool [serverName] toolName: error"
|
||||
// The human-readable message stays unchanged to avoid breaking error consumers
|
||||
expect(content).toContain('MCP tool [${name}] ${tool}:')
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cross-cutting: verify no regressions
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Regression checks', () => {
|
||||
test('store field is still set for OpenAI (not deleted unconditionally)', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// store: false should exist in body construction
|
||||
expect(content).toMatch(/store:\s*false/)
|
||||
// But delete body.store should be conditional (guarded by if)
|
||||
const deleteLines = content.split('\n').filter(l => l.includes('delete body.store'))
|
||||
expect(deleteLines.length).toBeGreaterThan(0)
|
||||
// Verify the delete is inside a conditional block by checking surrounding context
|
||||
for (const line of deleteLines) {
|
||||
const trimmed = line.trim()
|
||||
// Should be either inside an if block (indented delete) or a comment
|
||||
expect(
|
||||
trimmed.startsWith('delete') && !trimmed.includes('// unconditional'),
|
||||
).toBe(true)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 6: SendMessageTool race condition guard
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('SendMessageTool race condition fix', () => {
|
||||
test('SendMessageTool has double-check for concurrent resume', async () => {
|
||||
const content = await file('tools/SendMessageTool/SendMessageTool.ts').text()
|
||||
|
||||
// Should have a second status check before resuming to prevent race
|
||||
expect(content).toContain('was concurrently resumed')
|
||||
// The freshTask check should re-read from getAppState
|
||||
expect(content).toMatch(/const freshTask = context\.getAppState\(\)\.tasks\[agentId\]/)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 7: AgentTool dump state cleanup
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('AgentTool cleanup fix', () => {
|
||||
test('backgrounded agent always cleans up dump state', async () => {
|
||||
const content = await file('tools/AgentTool/AgentTool.tsx').text()
|
||||
|
||||
// The backgrounded agent's finally block should clean up regardless
|
||||
// of whether the agent crashed or completed normally
|
||||
expect(content).toContain('Defensive cleanup: wrap each call so one failure')
|
||||
// Verify cleanup is wrapped in try/catch for defensive execution
|
||||
expect(content).toMatch(/try\s*\{\s*clearInvokedSkillsForAgent/)
|
||||
expect(content).toMatch(/try\s*\{\s*clearDumpState/)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 8: Context overflow 500 error handling
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Context overflow 500 fix', () => {
|
||||
test('errors.ts has handler for context overflow 500 errors', async () => {
|
||||
const content = await file('services/api/errors.ts').text()
|
||||
|
||||
expect(content).toContain('500 errors caused by context overflow')
|
||||
expect(content).toContain('too many tokens')
|
||||
expect(content).toContain('The conversation has grown too large')
|
||||
})
|
||||
|
||||
test('query.ts has circuit breaker safety net for oversized context', async () => {
|
||||
const content = await file('query.ts').text()
|
||||
|
||||
expect(content).toContain('Safety net: when auto-compact')
|
||||
expect(content).toContain('circuit breaker has tripped')
|
||||
expect(content).toContain('automatic compaction has failed')
|
||||
})
|
||||
})
|
||||
55
src/__tests__/providerCounts.test.ts
Normal file
55
src/__tests__/providerCounts.test.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
/**
|
||||
* Tests for Web Search Provider result count configurations.
|
||||
*/
|
||||
|
||||
import { describe, test, expect } from 'bun:test'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const SRC = resolve(import.meta.dir, '..', 'tools', 'WebSearchTool', 'providers')
|
||||
const file = (name: string) => Bun.file(resolve(SRC, name))
|
||||
|
||||
describe('Provider result counts', () => {
|
||||
const providers = [
|
||||
'bing.ts',
|
||||
'tavily.ts',
|
||||
'exa.ts',
|
||||
'firecrawl.ts',
|
||||
'mojeek.ts',
|
||||
'you.ts',
|
||||
'jina.ts',
|
||||
'duckduckgo.ts',
|
||||
// linkup.ts excluded — uses depth param, not a result count field
|
||||
]
|
||||
|
||||
for (const name of providers) {
|
||||
test(`${name} exists and is readable`, async () => {
|
||||
const f = file(name)
|
||||
expect(await f.exists()).toBe(true)
|
||||
const content = await f.text()
|
||||
expect(content.length).toBeGreaterThan(100)
|
||||
})
|
||||
}
|
||||
|
||||
test('No provider hardcodes a limit below 10', async () => {
|
||||
const suspiciousPatterns = [
|
||||
/count['":\s]*['"]([1-9])['"]/i,
|
||||
/limit['":\s]*([1-9])\b/,
|
||||
/max_results['":\s]*([1-9])\b/,
|
||||
/numResults['":\s]*([1-9])\b/,
|
||||
]
|
||||
|
||||
for (const name of providers) {
|
||||
const content = await file(name).text()
|
||||
for (const pattern of suspiciousPatterns) {
|
||||
const match = content.match(pattern)
|
||||
if (match) {
|
||||
const num = parseInt(match[1], 10)
|
||||
expect(num).toBeGreaterThanOrEqual(
|
||||
10,
|
||||
`${name} has suspiciously low result count: ${match[0]}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -1562,29 +1562,8 @@ export function clearInvokedSkillsForAgent(agentId: string): void {
|
||||
}
|
||||
}
|
||||
|
||||
// Slow operations tracking for dev bar
|
||||
const MAX_SLOW_OPERATIONS = 10
|
||||
const SLOW_OPERATION_TTL_MS = 10000
|
||||
|
||||
export function addSlowOperation(operation: string, durationMs: number): void {
|
||||
if (process.env.USER_TYPE !== 'ant') return
|
||||
// Skip tracking for editor sessions (user editing a prompt file in $EDITOR)
|
||||
// These are intentionally slow since the user is drafting text
|
||||
if (operation.includes('exec') && operation.includes('claude-prompt-')) {
|
||||
return
|
||||
}
|
||||
const now = Date.now()
|
||||
// Remove stale operations
|
||||
STATE.slowOperations = STATE.slowOperations.filter(
|
||||
op => now - op.timestamp < SLOW_OPERATION_TTL_MS,
|
||||
)
|
||||
// Add new operation
|
||||
STATE.slowOperations.push({ operation, durationMs, timestamp: now })
|
||||
// Keep only the most recent operations
|
||||
if (STATE.slowOperations.length > MAX_SLOW_OPERATIONS) {
|
||||
STATE.slowOperations = STATE.slowOperations.slice(-MAX_SLOW_OPERATIONS)
|
||||
}
|
||||
}
|
||||
// Slow operations tracking removed (was internal-only).
|
||||
// Functions kept as no-ops to avoid breaking callers.
|
||||
|
||||
const EMPTY_SLOW_OPERATIONS: ReadonlyArray<{
|
||||
operation: string
|
||||
@@ -1592,32 +1571,17 @@ const EMPTY_SLOW_OPERATIONS: ReadonlyArray<{
|
||||
timestamp: number
|
||||
}> = []
|
||||
|
||||
export function addSlowOperation(
|
||||
_operation: string,
|
||||
_durationMs: number,
|
||||
): void {}
|
||||
|
||||
export function getSlowOperations(): ReadonlyArray<{
|
||||
operation: string
|
||||
durationMs: number
|
||||
timestamp: number
|
||||
}> {
|
||||
// Most common case: nothing tracked. Return a stable reference so the
|
||||
// caller's setState() can bail via Object.is instead of re-rendering at 2fps.
|
||||
if (STATE.slowOperations.length === 0) {
|
||||
return EMPTY_SLOW_OPERATIONS
|
||||
}
|
||||
const now = Date.now()
|
||||
// Only allocate a new array when something actually expired; otherwise keep
|
||||
// the reference stable across polls while ops are still fresh.
|
||||
if (
|
||||
STATE.slowOperations.some(op => now - op.timestamp >= SLOW_OPERATION_TTL_MS)
|
||||
) {
|
||||
STATE.slowOperations = STATE.slowOperations.filter(
|
||||
op => now - op.timestamp < SLOW_OPERATION_TTL_MS,
|
||||
)
|
||||
if (STATE.slowOperations.length === 0) {
|
||||
return EMPTY_SLOW_OPERATIONS
|
||||
}
|
||||
}
|
||||
// Safe to return directly: addSlowOperation() reassigns STATE.slowOperations
|
||||
// before pushing, so the array held in React state is never mutated.
|
||||
return STATE.slowOperations
|
||||
return EMPTY_SLOW_OPERATIONS
|
||||
}
|
||||
|
||||
export function getMainThreadAgentType(): string | undefined {
|
||||
|
||||
@@ -14,21 +14,14 @@
|
||||
import { getOauthConfig } from '../constants/oauth.js'
|
||||
import { getClaudeAIOAuthTokens } from '../utils/auth.js'
|
||||
|
||||
/** Ant-only dev override: CLAUDE_BRIDGE_OAUTH_TOKEN, else undefined. */
|
||||
/** Dev override: CLAUDE_BRIDGE_OAUTH_TOKEN, else undefined. */
|
||||
export function getBridgeTokenOverride(): string | undefined {
|
||||
return (
|
||||
(process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_OAUTH_TOKEN) ||
|
||||
undefined
|
||||
)
|
||||
return process.env.CLAUDE_BRIDGE_OAUTH_TOKEN || undefined
|
||||
}
|
||||
|
||||
/** Ant-only dev override: CLAUDE_BRIDGE_BASE_URL, else undefined. */
|
||||
/** Dev override: CLAUDE_BRIDGE_BASE_URL, else undefined. */
|
||||
export function getBridgeBaseUrlOverride(): string | undefined {
|
||||
return (
|
||||
(process.env.USER_TYPE === 'ant' && process.env.CLAUDE_BRIDGE_BASE_URL) ||
|
||||
undefined
|
||||
)
|
||||
return process.env.CLAUDE_BRIDGE_BASE_URL || undefined
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -2194,14 +2194,10 @@ export async function bridgeMain(args: string[]): Promise<void> {
|
||||
|
||||
// Session ingress URL for WebSocket connections. In production this is the
|
||||
// same as baseUrl (Envoy routes /v1/session_ingress/* to session-ingress).
|
||||
// Locally, session-ingress runs on a different port (9413) than the
|
||||
// contain-provide-api (8211), so CLAUDE_BRIDGE_SESSION_INGRESS_URL must be
|
||||
// set explicitly. Ant-only, matching CLAUDE_BRIDGE_BASE_URL.
|
||||
// Locally, session-ingress may run on a different port, so
|
||||
// CLAUDE_BRIDGE_SESSION_INGRESS_URL can override the default.
|
||||
const sessionIngressUrl =
|
||||
process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
: baseUrl
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl
|
||||
|
||||
const { getBranch, getRemoteUrl, findGitRoot } = await import(
|
||||
'../utils/git.js'
|
||||
@@ -2851,10 +2847,7 @@ export async function runBridgeHeadless(
|
||||
)
|
||||
}
|
||||
const sessionIngressUrl =
|
||||
process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
: baseUrl
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl
|
||||
|
||||
const { getBranch, getRemoteUrl, findGitRoot } = await import(
|
||||
'../utils/git.js'
|
||||
|
||||
@@ -217,25 +217,39 @@ export async function getBridgeSession(
|
||||
}
|
||||
|
||||
const url = `${opts?.baseUrl ?? getOauthConfig().BASE_API_URL}/v1/sessions/${sessionId}`
|
||||
const timeoutMs = 10_000
|
||||
logForDebugging(`[bridge] Fetching session ${sessionId}`)
|
||||
|
||||
let response
|
||||
try {
|
||||
response = await axios.get<{ environment_id?: string; title?: string }>(
|
||||
url,
|
||||
{ headers, timeout: 10_000, validateStatus: s => s < 500 },
|
||||
{ headers, timeout: timeoutMs, validateStatus: s => s < 500 },
|
||||
)
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch request failed: ${errorMessage(err)}`,
|
||||
)
|
||||
if (axios.isAxiosError(err)) {
|
||||
const status = err.response?.status ?? 'no-response'
|
||||
const code = err.code ?? 'unknown-code'
|
||||
const requestUrl = err.config?.url ?? url
|
||||
const method = err.config?.method?.toUpperCase() ?? 'GET'
|
||||
const message = err.message ?? errorMessage(err)
|
||||
const timeout = err.config?.timeout ?? timeoutMs
|
||||
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch request failed: status=${status} code=${code} method=${method} url=${requestUrl} timeout=${timeout} message=${message}`,
|
||||
)
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch request failed: url=${url} timeout=${timeoutMs} message=${errorMessage(err)}`,
|
||||
)
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
if (response.status !== 200) {
|
||||
const detail = extractErrorDetail(response.data)
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch failed with status ${response.status}${detail ? `: ${detail}` : ''}`,
|
||||
`[bridge] Session fetch failed with status ${response.status} url=${url}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -465,10 +465,7 @@ export async function initReplBridge(
|
||||
const branch = await getBranch()
|
||||
const gitRepoUrl = await getRemoteUrl()
|
||||
const sessionIngressUrl =
|
||||
process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
: baseUrl
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl
|
||||
|
||||
// Assistant-mode sessions advertise a distinct worker_type so the web UI
|
||||
// can filter them into a dedicated picker. KAIROS guard keeps the
|
||||
|
||||
@@ -362,15 +362,9 @@ const proactiveModule =
|
||||
feature('PROACTIVE') || feature('KAIROS')
|
||||
? (require('../proactive/index.js') as typeof import('../proactive/index.js'))
|
||||
: null
|
||||
const cronSchedulerModule = feature('AGENT_TRIGGERS')
|
||||
? (require('../utils/cronScheduler.js') as typeof import('../utils/cronScheduler.js'))
|
||||
: null
|
||||
const cronJitterConfigModule = feature('AGENT_TRIGGERS')
|
||||
? (require('../utils/cronJitterConfig.js') as typeof import('../utils/cronJitterConfig.js'))
|
||||
: null
|
||||
const cronGate = feature('AGENT_TRIGGERS')
|
||||
? (require('../tools/ScheduleCronTool/prompt.js') as typeof import('../tools/ScheduleCronTool/prompt.js'))
|
||||
: null
|
||||
const cronSchedulerModule = require('../utils/cronScheduler.js') as typeof import('../utils/cronScheduler.js')
|
||||
const cronJitterConfigModule = require('../utils/cronJitterConfig.js') as typeof import('../utils/cronJitterConfig.js')
|
||||
const cronGate = require('../tools/ScheduleCronTool/prompt.js') as typeof import('../tools/ScheduleCronTool/prompt.js')
|
||||
const extractMemoriesModule = feature('EXTRACT_MEMORIES')
|
||||
? (require('../services/extractMemories/extractMemories.js') as typeof import('../services/extractMemories/extractMemories.js'))
|
||||
: null
|
||||
@@ -2701,11 +2695,7 @@ function runHeadlessStreaming(
|
||||
// the end of run() picks up the queued command.
|
||||
let cronScheduler: import('../utils/cronScheduler.js').CronScheduler | null =
|
||||
null
|
||||
if (
|
||||
feature('AGENT_TRIGGERS') &&
|
||||
cronSchedulerModule &&
|
||||
cronGate?.isKairosCronEnabled()
|
||||
) {
|
||||
if (cronGate.isKairosCronEnabled()) {
|
||||
cronScheduler = cronSchedulerModule.createCronScheduler({
|
||||
onFire: prompt => {
|
||||
if (inputClosed) return
|
||||
@@ -2727,8 +2717,8 @@ function runHeadlessStreaming(
|
||||
void run()
|
||||
},
|
||||
isLoading: () => running || inputClosed,
|
||||
getJitterConfig: cronJitterConfigModule?.getCronJitterConfig,
|
||||
isKilled: () => !cronGate?.isKairosCronEnabled(),
|
||||
getJitterConfig: cronJitterConfigModule.getCronJitterConfig,
|
||||
isKilled: () => !cronGate.isKairosCronEnabled(),
|
||||
})
|
||||
cronScheduler.start()
|
||||
}
|
||||
@@ -4592,7 +4582,7 @@ function handleSetPermissionMode(
|
||||
subtype: 'error',
|
||||
request_id: requestId,
|
||||
error:
|
||||
'Cannot set permission mode to bypassPermissions because the session was not launched with --dangerously-skip-permissions',
|
||||
'Cannot set permission mode to bypassPermissions. Enable it with --allow-dangerously-skip-permissions or set permissions.allowBypassPermissionsMode in settings.json',
|
||||
},
|
||||
})
|
||||
return toolPermissionContext
|
||||
|
||||
@@ -1,17 +1,12 @@
|
||||
import { execFileSync } from 'child_process'
|
||||
import { diffLines } from 'diff'
|
||||
import { constants as fsConstants } from 'fs'
|
||||
import {
|
||||
copyFile,
|
||||
mkdir,
|
||||
mkdtemp,
|
||||
readdir,
|
||||
readFile,
|
||||
rm,
|
||||
unlink,
|
||||
writeFile,
|
||||
} from 'fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
import { extname, join } from 'path'
|
||||
import type { Command } from '../commands.js'
|
||||
import { queryWithModel } from '../services/api/claude.js'
|
||||
@@ -22,7 +17,6 @@ import {
|
||||
import type { LogOption } from '../types/logs.js'
|
||||
import { getClaudeConfigHomeDir } from '../utils/envUtils.js'
|
||||
import { toError } from '../utils/errors.js'
|
||||
import { execFileNoThrow } from '../utils/execFileNoThrow.js'
|
||||
import { logError } from '../utils/log.js'
|
||||
import { extractTextContent } from '../utils/messages.js'
|
||||
import { getDefaultOpusModel } from '../utils/model/model.js'
|
||||
@@ -47,180 +41,6 @@ function getInsightsModel(): string {
|
||||
return getDefaultOpusModel()
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Homespace Data Collection
|
||||
// ============================================================================
|
||||
|
||||
type RemoteHostInfo = {
|
||||
name: string
|
||||
sessionCount: number
|
||||
}
|
||||
|
||||
/* eslint-disable custom-rules/no-process-env-top-level */
|
||||
const getRunningRemoteHosts: () => Promise<string[]> =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? async () => {
|
||||
const { stdout, code } = await execFileNoThrow(
|
||||
'coder',
|
||||
['list', '-o', 'json'],
|
||||
{ timeout: 30000 },
|
||||
)
|
||||
if (code !== 0) return []
|
||||
try {
|
||||
const workspaces = jsonParse(stdout) as Array<{
|
||||
name: string
|
||||
latest_build?: { status?: string }
|
||||
}>
|
||||
return workspaces
|
||||
.filter(w => w.latest_build?.status === 'running')
|
||||
.map(w => w.name)
|
||||
} catch {
|
||||
return []
|
||||
}
|
||||
}
|
||||
: async () => []
|
||||
|
||||
const getRemoteHostSessionCount: (hs: string) => Promise<number> =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? async (homespace: string) => {
|
||||
const { stdout, code } = await execFileNoThrow(
|
||||
'ssh',
|
||||
[
|
||||
`${homespace}.coder`,
|
||||
'find /root/.claude/projects -name "*.jsonl" 2>/dev/null | wc -l',
|
||||
],
|
||||
{ timeout: 30000 },
|
||||
)
|
||||
if (code !== 0) return 0
|
||||
return parseInt(stdout.trim(), 10) || 0
|
||||
}
|
||||
: async () => 0
|
||||
|
||||
const collectFromRemoteHost: (
|
||||
hs: string,
|
||||
destDir: string,
|
||||
) => Promise<{ copied: number; skipped: number }> =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? async (homespace: string, destDir: string) => {
|
||||
const result = { copied: 0, skipped: 0 }
|
||||
|
||||
// Create temp directory
|
||||
const tempDir = await mkdtemp(join(tmpdir(), 'claude-hs-'))
|
||||
|
||||
try {
|
||||
// SCP the projects folder
|
||||
const scpResult = await execFileNoThrow(
|
||||
'scp',
|
||||
['-rq', `${homespace}.coder:/root/.claude/projects/`, tempDir],
|
||||
{ timeout: 300000 },
|
||||
)
|
||||
if (scpResult.code !== 0) {
|
||||
// SCP failed
|
||||
return result
|
||||
}
|
||||
|
||||
const projectsDir = join(tempDir, 'projects')
|
||||
let projectDirents: Awaited<ReturnType<typeof readdir>>
|
||||
try {
|
||||
projectDirents = await readdir(projectsDir, { withFileTypes: true })
|
||||
} catch {
|
||||
return result
|
||||
}
|
||||
|
||||
// Merge into destination (parallel per project directory)
|
||||
await Promise.all(
|
||||
projectDirents.map(async dirent => {
|
||||
const projectName = dirent.name
|
||||
const projectPath = join(projectsDir, projectName)
|
||||
|
||||
// Skip if not a directory
|
||||
if (!dirent.isDirectory()) return
|
||||
|
||||
const destProjectName = `${projectName}__${homespace}`
|
||||
const destProjectPath = join(destDir, destProjectName)
|
||||
|
||||
try {
|
||||
await mkdir(destProjectPath, { recursive: true })
|
||||
} catch {
|
||||
// Directory may already exist
|
||||
}
|
||||
|
||||
// Copy session files (skip existing)
|
||||
let files: Awaited<ReturnType<typeof readdir>>
|
||||
try {
|
||||
files = await readdir(projectPath, { withFileTypes: true })
|
||||
} catch {
|
||||
return
|
||||
}
|
||||
await Promise.all(
|
||||
files.map(async fileDirent => {
|
||||
const fileName = fileDirent.name
|
||||
if (!fileName.endsWith('.jsonl')) return
|
||||
|
||||
const srcFile = join(projectPath, fileName)
|
||||
const destFile = join(destProjectPath, fileName)
|
||||
|
||||
try {
|
||||
await copyFile(srcFile, destFile, fsConstants.COPYFILE_EXCL)
|
||||
result.copied++
|
||||
} catch {
|
||||
// EEXIST from COPYFILE_EXCL means dest already exists
|
||||
result.skipped++
|
||||
}
|
||||
}),
|
||||
)
|
||||
}),
|
||||
)
|
||||
} finally {
|
||||
try {
|
||||
await rm(tempDir, { recursive: true, force: true })
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
: async () => ({ copied: 0, skipped: 0 })
|
||||
|
||||
const collectAllRemoteHostData: (destDir: string) => Promise<{
|
||||
hosts: RemoteHostInfo[]
|
||||
totalCopied: number
|
||||
totalSkipped: number
|
||||
}> =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? async (destDir: string) => {
|
||||
const rHosts = await getRunningRemoteHosts()
|
||||
const result: RemoteHostInfo[] = []
|
||||
let totalCopied = 0
|
||||
let totalSkipped = 0
|
||||
|
||||
// Collect from all hosts in parallel (SCP per host can take seconds)
|
||||
const hostResults = await Promise.all(
|
||||
rHosts.map(async hs => {
|
||||
const sessionCount = await getRemoteHostSessionCount(hs)
|
||||
if (sessionCount > 0) {
|
||||
const { copied, skipped } = await collectFromRemoteHost(
|
||||
hs,
|
||||
destDir,
|
||||
)
|
||||
return { name: hs, sessionCount, copied, skipped }
|
||||
}
|
||||
return { name: hs, sessionCount, copied: 0, skipped: 0 }
|
||||
}),
|
||||
)
|
||||
|
||||
for (const hr of hostResults) {
|
||||
result.push({ name: hr.name, sessionCount: hr.sessionCount })
|
||||
totalCopied += hr.copied
|
||||
totalSkipped += hr.skipped
|
||||
}
|
||||
|
||||
return { hosts: result, totalCopied, totalSkipped }
|
||||
}
|
||||
: async () => ({ hosts: [], totalCopied: 0, totalSkipped: 0 })
|
||||
/* eslint-enable custom-rules/no-process-env-top-level */
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
@@ -2659,7 +2479,6 @@ export type InsightsExport = {
|
||||
claude_code_version: string
|
||||
date_range: { start: string; end: string }
|
||||
session_count: number
|
||||
remote_hosts_collected?: string[]
|
||||
}
|
||||
aggregated_data: AggregatedData
|
||||
insights: InsightResults
|
||||
@@ -2680,14 +2499,9 @@ export function buildExportData(
|
||||
data: AggregatedData,
|
||||
insights: InsightResults,
|
||||
facets: Map<string, SessionFacets>,
|
||||
remoteStats?: { hosts: RemoteHostInfo[]; totalCopied: number },
|
||||
): InsightsExport {
|
||||
const version = typeof MACRO !== 'undefined' ? MACRO.VERSION : 'unknown'
|
||||
|
||||
const remote_hosts_collected = remoteStats?.hosts
|
||||
.filter(h => h.sessionCount > 0)
|
||||
.map(h => h.name)
|
||||
|
||||
const facets_summary = {
|
||||
total: facets.size,
|
||||
goal_categories: {} as Record<string, number>,
|
||||
@@ -2725,10 +2539,6 @@ export function buildExportData(
|
||||
claude_code_version: version,
|
||||
date_range: data.date_range,
|
||||
session_count: data.total_sessions,
|
||||
...(remote_hosts_collected &&
|
||||
remote_hosts_collected.length > 0 && {
|
||||
remote_hosts_collected,
|
||||
}),
|
||||
},
|
||||
aggregated_data: data,
|
||||
insights,
|
||||
@@ -2793,24 +2603,12 @@ async function scanAllSessions(): Promise<LiteSessionInfo[]> {
|
||||
// Main Function
|
||||
// ============================================================================
|
||||
|
||||
export async function generateUsageReport(options?: {
|
||||
collectRemote?: boolean
|
||||
}): Promise<{
|
||||
export async function generateUsageReport(): Promise<{
|
||||
insights: InsightResults
|
||||
htmlPath: string
|
||||
data: AggregatedData
|
||||
remoteStats?: { hosts: RemoteHostInfo[]; totalCopied: number }
|
||||
facets: Map<string, SessionFacets>
|
||||
}> {
|
||||
let remoteStats: { hosts: RemoteHostInfo[]; totalCopied: number } | undefined
|
||||
|
||||
// Optionally collect data from remote hosts first (internal-only)
|
||||
if (process.env.USER_TYPE === 'ant' && options?.collectRemote) {
|
||||
const destDir = join(getClaudeConfigHomeDir(), 'projects')
|
||||
const { hosts, totalCopied } = await collectAllRemoteHostData(destDir)
|
||||
remoteStats = { hosts, totalCopied }
|
||||
}
|
||||
|
||||
// Phase 1: Lite scan — filesystem metadata only (no JSONL parsing)
|
||||
const allScannedSessions = await scanAllSessions()
|
||||
const totalSessionsScanned = allScannedSessions.length
|
||||
@@ -3017,7 +2815,6 @@ export async function generateUsageReport(options?: {
|
||||
insights,
|
||||
htmlPath,
|
||||
data: aggregated,
|
||||
remoteStats,
|
||||
facets: substantiveFacets,
|
||||
}
|
||||
}
|
||||
@@ -3043,31 +2840,8 @@ const usageReport: Command = {
|
||||
contentLength: 0, // Dynamic content
|
||||
progressMessage: 'analyzing your sessions',
|
||||
source: 'builtin',
|
||||
async getPromptForCommand(args) {
|
||||
let collectRemote = false
|
||||
let remoteHosts: string[] = []
|
||||
let hasRemoteHosts = false
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
// Parse --homespaces flag
|
||||
collectRemote = args?.includes('--homespaces') ?? false
|
||||
|
||||
// Check for available remote hosts
|
||||
remoteHosts = await getRunningRemoteHosts()
|
||||
hasRemoteHosts = remoteHosts.length > 0
|
||||
|
||||
// Show collection message if collecting
|
||||
if (collectRemote && hasRemoteHosts) {
|
||||
// biome-ignore lint/suspicious/noConsole: intentional
|
||||
console.error(
|
||||
`Collecting sessions from ${remoteHosts.length} homespace(s): ${remoteHosts.join(', ')}...`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const { insights, htmlPath, data, remoteStats } = await generateUsageReport(
|
||||
{ collectRemote },
|
||||
)
|
||||
async getPromptForCommand(_args) {
|
||||
const { insights, htmlPath, data } = await generateUsageReport()
|
||||
|
||||
let reportUrl = `file://${htmlPath}`
|
||||
let uploadHint = ''
|
||||
@@ -3085,20 +2859,6 @@ const usageReport: Command = {
|
||||
`${data.git_commits} commits`,
|
||||
].join(' · ')
|
||||
|
||||
// Build remote host info (internal-only)
|
||||
let remoteInfo = ''
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
if (remoteStats && remoteStats.totalCopied > 0) {
|
||||
const hsNames = remoteStats.hosts
|
||||
.filter(h => h.sessionCount > 0)
|
||||
.map(h => h.name)
|
||||
.join(', ')
|
||||
remoteInfo = `\n_Collected ${remoteStats.totalCopied} new sessions from: ${hsNames}_\n`
|
||||
} else if (!collectRemote && hasRemoteHosts) {
|
||||
// Suggest using --homespaces if they have remote hosts but didn't use the flag
|
||||
remoteInfo = `\n_Tip: Run \`/insights --homespaces\` to include sessions from your ${remoteHosts.length} running homespace(s)_\n`
|
||||
}
|
||||
}
|
||||
|
||||
// Build markdown summary from insights
|
||||
const atAGlance = insights.at_a_glance
|
||||
@@ -3118,7 +2878,6 @@ ${atAGlance.ambitious_workflows ? `**Ambitious workflows:** ${atAGlance.ambitiou
|
||||
|
||||
${stats}
|
||||
${data.date_range.start} to ${data.date_range.end}
|
||||
${remoteInfo}
|
||||
`
|
||||
|
||||
const userSummary = `${header}${summaryText}
|
||||
|
||||
@@ -1,20 +1,28 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { expect, test } from 'bun:test'
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot, render, useApp } from '../../ink.js'
|
||||
import { AppStateProvider } from '../../state/AppState.js'
|
||||
import {
|
||||
applySavedProfileToCurrentSession,
|
||||
buildCodexOAuthProfileEnv,
|
||||
buildCurrentProviderSummary,
|
||||
buildProfileSaveMessage,
|
||||
getProviderWizardDefaults,
|
||||
ProviderWizard,
|
||||
TextEntryDialog,
|
||||
} from './provider.js'
|
||||
import { createProfileFile } from '../../utils/providerProfile.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
const ORIGINAL_SIMPLE_ENV = process.env.CLAUDE_CODE_SIMPLE
|
||||
const ORIGINAL_CODEX_API_KEY = process.env.CODEX_API_KEY
|
||||
const ORIGINAL_CHATGPT_ACCOUNT_ID = process.env.CHATGPT_ACCOUNT_ID
|
||||
const ORIGINAL_CODEX_ACCOUNT_ID = process.env.CODEX_ACCOUNT_ID
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
@@ -60,6 +68,51 @@ async function renderFinalFrame(node: React.ReactNode): Promise<string> {
|
||||
return stripAnsi(extractLastFrame(getOutput()))
|
||||
}
|
||||
|
||||
async function waitForOutput(
|
||||
getOutput: () => string,
|
||||
predicate: (output: string) => boolean,
|
||||
timeoutMs = 2500,
|
||||
): Promise<string> {
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
const output = stripAnsi(extractLastFrame(getOutput()))
|
||||
if (predicate(output)) {
|
||||
return output
|
||||
}
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for ProviderWizard test output')
|
||||
}
|
||||
|
||||
async function renderProviderWizardFrame(): Promise<string> {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<ProviderWizard onDone={() => {}} />
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
try {
|
||||
return await waitForOutput(
|
||||
getOutput,
|
||||
output => output.includes('Set up a provider profile'),
|
||||
)
|
||||
} finally {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(0)
|
||||
}
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
@@ -94,6 +147,34 @@ function createTestStreams(): {
|
||||
}
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
|
||||
if (ORIGINAL_SIMPLE_ENV === undefined) {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_SIMPLE = ORIGINAL_SIMPLE_ENV
|
||||
}
|
||||
|
||||
if (ORIGINAL_CODEX_API_KEY === undefined) {
|
||||
delete process.env.CODEX_API_KEY
|
||||
} else {
|
||||
process.env.CODEX_API_KEY = ORIGINAL_CODEX_API_KEY
|
||||
}
|
||||
|
||||
if (ORIGINAL_CHATGPT_ACCOUNT_ID === undefined) {
|
||||
delete process.env.CHATGPT_ACCOUNT_ID
|
||||
} else {
|
||||
process.env.CHATGPT_ACCOUNT_ID = ORIGINAL_CHATGPT_ACCOUNT_ID
|
||||
}
|
||||
|
||||
if (ORIGINAL_CODEX_ACCOUNT_ID === undefined) {
|
||||
delete process.env.CODEX_ACCOUNT_ID
|
||||
} else {
|
||||
process.env.CODEX_ACCOUNT_ID = ORIGINAL_CODEX_ACCOUNT_ID
|
||||
}
|
||||
})
|
||||
|
||||
function StepChangeHarness(): React.ReactNode {
|
||||
const { exit } = useApp()
|
||||
const [step, setStep] = React.useState<'api' | 'model'>('api')
|
||||
@@ -233,6 +314,167 @@ test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly',
|
||||
expect(message).not.toContain('AIza')
|
||||
})
|
||||
|
||||
test('buildProfileSaveMessage reflects immediate Codex activation for existing credentials', () => {
|
||||
const message = buildProfileSaveMessage(
|
||||
'codex',
|
||||
{
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_codex',
|
||||
},
|
||||
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
|
||||
{
|
||||
activatedInSession: true,
|
||||
},
|
||||
)
|
||||
|
||||
expect(message).toContain('Saved Codex profile.')
|
||||
expect(message).toContain('OpenClaude switched to it for this session.')
|
||||
expect(message).not.toContain('Restart OpenClaude to use it.')
|
||||
})
|
||||
|
||||
test('buildProfileSaveMessage reflects immediate Codex OAuth activation when the session switched successfully', () => {
|
||||
const message = buildProfileSaveMessage(
|
||||
'codex',
|
||||
{
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_codex',
|
||||
CODEX_CREDENTIAL_SOURCE: 'oauth',
|
||||
},
|
||||
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
|
||||
{
|
||||
activatedInSession: true,
|
||||
},
|
||||
)
|
||||
|
||||
expect(message).toContain('Saved Codex profile.')
|
||||
expect(message).toContain('OpenClaude switched to it for this session.')
|
||||
expect(message).not.toContain('Restart OpenClaude to use it.')
|
||||
})
|
||||
|
||||
test('buildCodexOAuthProfileEnv uses the fresh OAuth account id without persisting an API key', () => {
|
||||
process.env.CODEX_API_KEY = 'stale-codex-key'
|
||||
process.env.CHATGPT_ACCOUNT_ID = 'acct_stale'
|
||||
|
||||
const env = buildCodexOAuthProfileEnv({
|
||||
accessToken: 'oauth-access-token',
|
||||
accountId: 'acct_oauth',
|
||||
})
|
||||
|
||||
expect(env).toEqual({
|
||||
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_oauth',
|
||||
CODEX_CREDENTIAL_SOURCE: 'oauth',
|
||||
})
|
||||
expect(env).not.toHaveProperty('CODEX_API_KEY')
|
||||
})
|
||||
|
||||
test('buildCodexProfileEnv derives oauth source from secure storage when no explicit source is provided', async () => {
|
||||
const actualProviderConfig = await import('../../services/api/providerConfig.js')
|
||||
|
||||
mock.module('../../services/api/providerConfig.js', () => ({
|
||||
...actualProviderConfig,
|
||||
resolveCodexApiCredentials: () => ({
|
||||
apiKey: 'stored-access-token',
|
||||
accountId: 'acct_secure_storage',
|
||||
source: 'secure-storage' as const,
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { buildCodexProfileEnv } = await import(
|
||||
'../../utils/providerProfile.js?secure-storage-codex-source'
|
||||
)
|
||||
|
||||
const env = buildCodexProfileEnv({
|
||||
model: 'codexplan',
|
||||
processEnv: {},
|
||||
})
|
||||
|
||||
expect(env).toEqual({
|
||||
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_secure_storage',
|
||||
CODEX_CREDENTIAL_SOURCE: 'oauth',
|
||||
})
|
||||
})
|
||||
|
||||
test('applySavedProfileToCurrentSession switches the current env to the saved Codex profile', async () => {
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { applySavedProfileToCurrentSession } = await import(
|
||||
'../../utils/providerProfile.js?apply-saved-profile-codex'
|
||||
)
|
||||
const processEnv: NodeJS.ProcessEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OPENAI_BASE_URL: 'https://api.openai.com/v1',
|
||||
OPENAI_API_KEY: 'sk-openai',
|
||||
CODEX_API_KEY: 'codex-live',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_codex',
|
||||
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',
|
||||
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID: 'provider_old',
|
||||
}
|
||||
const profileFile = createProfileFile('codex', {
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
|
||||
CODEX_API_KEY: 'codex-live',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_codex',
|
||||
})
|
||||
|
||||
const warning = await applySavedProfileToCurrentSession({
|
||||
profileFile,
|
||||
processEnv,
|
||||
})
|
||||
|
||||
expect(warning).toBeNull()
|
||||
expect(processEnv.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(processEnv.OPENAI_MODEL).toBe('codexplan')
|
||||
expect(processEnv.OPENAI_BASE_URL).toBe(
|
||||
'https://chatgpt.com/backend-api/codex',
|
||||
)
|
||||
expect(processEnv.CODEX_API_KEY).toBe('codex-live')
|
||||
expect(processEnv.CHATGPT_ACCOUNT_ID).toBe('acct_codex')
|
||||
expect(processEnv.OPENAI_API_KEY).toBeUndefined()
|
||||
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
|
||||
expect(processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBeUndefined()
|
||||
})
|
||||
|
||||
test('applySavedProfileToCurrentSession ignores stale Codex env overrides for OAuth-backed profiles', async () => {
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { applySavedProfileToCurrentSession } = await import(
|
||||
'../../utils/providerProfile.js?apply-saved-profile-codex-oauth'
|
||||
)
|
||||
const processEnv: NodeJS.ProcessEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OPENAI_BASE_URL: 'https://api.openai.com/v1',
|
||||
CODEX_API_KEY: 'stale-codex-key',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_stale',
|
||||
}
|
||||
const profileFile = createProfileFile('codex', {
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_oauth',
|
||||
CODEX_CREDENTIAL_SOURCE: 'oauth',
|
||||
})
|
||||
|
||||
const warning = await applySavedProfileToCurrentSession({
|
||||
profileFile,
|
||||
processEnv,
|
||||
})
|
||||
|
||||
expect(warning).toBeNull()
|
||||
expect(processEnv.OPENAI_MODEL).toBe('codexplan')
|
||||
expect(processEnv.OPENAI_BASE_URL).toBe(
|
||||
'https://chatgpt.com/backend-api/codex',
|
||||
)
|
||||
expect(processEnv.CODEX_API_KEY).toBeUndefined()
|
||||
expect(processEnv.CHATGPT_ACCOUNT_ID).not.toBe('acct_stale')
|
||||
expect(processEnv.CHATGPT_ACCOUNT_ID).toBeTruthy()
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary redacts poisoned model and endpoint values', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
@@ -264,7 +506,7 @@ test('buildCurrentProviderSummary labels generic local openai-compatible provide
|
||||
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => {
|
||||
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex when custom base URL is set', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
@@ -307,3 +549,12 @@ test('getProviderWizardDefaults ignores poisoned current provider values', () =>
|
||||
expect(defaults.openAIBaseUrl).toBe('https://api.openai.com/v1')
|
||||
expect(defaults.geminiModel).toBe('gemini-2.0-flash')
|
||||
})
|
||||
|
||||
test('ProviderWizard hides Codex OAuth while running in bare mode', async () => {
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
|
||||
const output = await renderProviderWizardFrame()
|
||||
|
||||
expect(output).toContain('Set up a provider profile')
|
||||
expect(output).not.toContain('Codex OAuth')
|
||||
})
|
||||
|
||||
@@ -10,8 +10,12 @@ import {
|
||||
} from '../../components/CustomSelect/index.js'
|
||||
import { Dialog } from '../../components/design-system/Dialog.js'
|
||||
import { LoadingState } from '../../components/design-system/LoadingState.js'
|
||||
import { useCodexOAuthFlow } from '../../components/useCodexOAuthFlow.js'
|
||||
import { useTerminalSize } from '../../hooks/useTerminalSize.js'
|
||||
import { Box, Text } from '../../ink.js'
|
||||
import {
|
||||
type CodexOAuthTokens,
|
||||
} from '../../services/api/codexOAuth.js'
|
||||
import {
|
||||
DEFAULT_CODEX_BASE_URL,
|
||||
DEFAULT_OPENAI_BASE_URL,
|
||||
@@ -20,6 +24,8 @@ import {
|
||||
resolveProviderRequest,
|
||||
} from '../../services/api/providerConfig.js'
|
||||
import {
|
||||
applySavedProfileToCurrentSession as applySharedProfileToCurrentSession,
|
||||
buildCodexOAuthProfileEnv as buildSharedCodexOAuthProfileEnv,
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildMistralProfileEnv,
|
||||
@@ -49,6 +55,7 @@ import {
|
||||
readGeminiAccessToken,
|
||||
saveGeminiAccessToken,
|
||||
} from '../../utils/geminiCredentials.js'
|
||||
import { isBareMode } from '../../utils/envUtils.js'
|
||||
import {
|
||||
getGoalDefaultOpenAIModel,
|
||||
normalizeRecommendationGoal,
|
||||
@@ -57,12 +64,13 @@ import {
|
||||
type RecommendationGoal,
|
||||
} from '../../utils/providerRecommendation.js'
|
||||
import {
|
||||
getOllamaChatBaseUrl,
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
hasLocalOllama,
|
||||
listOllamaModels,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
|
||||
type ProviderChoice = 'auto' | ProviderProfile | 'clear'
|
||||
type ProviderChoice = 'auto' | ProviderProfile | 'codex-oauth' | 'clear'
|
||||
|
||||
type Step =
|
||||
| { name: 'choose' }
|
||||
@@ -93,6 +101,7 @@ type Step =
|
||||
apiKey?: string
|
||||
authMode: 'api-key' | 'access-token' | 'adc'
|
||||
}
|
||||
| { name: 'codex-oauth' }
|
||||
| { name: 'codex-check' }
|
||||
|
||||
type CurrentProviderSummary = {
|
||||
@@ -131,6 +140,8 @@ type ProviderWizardDefaults = {
|
||||
mistralBaseUrl: string
|
||||
}
|
||||
|
||||
type SecretSourceEnv = NodeJS.ProcessEnv & Partial<ProfileEnv>
|
||||
|
||||
function isEnvTruthy(value: string | undefined): boolean {
|
||||
if (!value) return false
|
||||
const normalized = value.trim().toLowerCase()
|
||||
@@ -139,7 +150,7 @@ function isEnvTruthy(value: string | undefined): boolean {
|
||||
|
||||
function getSafeDisplayValue(
|
||||
value: string | undefined,
|
||||
processEnv: NodeJS.ProcessEnv,
|
||||
processEnv: SecretSourceEnv,
|
||||
profileEnv?: ProfileEnv,
|
||||
fallback = '(not set)',
|
||||
): string {
|
||||
@@ -151,14 +162,15 @@ function getSafeDisplayValue(
|
||||
export function getProviderWizardDefaults(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): ProviderWizardDefaults {
|
||||
const secretSource = processEnv as SecretSourceEnv
|
||||
const safeOpenAIModel =
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, processEnv) ||
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
|
||||
'gpt-4o'
|
||||
const safeOpenAIBaseUrl =
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, processEnv) ||
|
||||
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
|
||||
DEFAULT_OPENAI_BASE_URL
|
||||
const safeGeminiModel =
|
||||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, processEnv) ||
|
||||
sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) ||
|
||||
DEFAULT_GEMINI_MODEL
|
||||
const safeMistralModel =
|
||||
sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, processEnv) ||
|
||||
@@ -181,6 +193,7 @@ export function buildCurrentProviderSummary(options?: {
|
||||
persisted?: ProfileFile | null
|
||||
}): CurrentProviderSummary {
|
||||
const processEnv = options?.processEnv ?? process.env
|
||||
const secretSource = processEnv as SecretSourceEnv
|
||||
const persisted = options?.persisted ?? loadProfileFile()
|
||||
const savedProfileLabel = persisted?.profile ?? 'none'
|
||||
|
||||
@@ -189,11 +202,11 @@ export function buildCurrentProviderSummary(options?: {
|
||||
providerLabel: 'Google Gemini',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
processEnv.GEMINI_MODEL ?? DEFAULT_GEMINI_MODEL,
|
||||
processEnv,
|
||||
secretSource,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
processEnv.GEMINI_BASE_URL ?? DEFAULT_GEMINI_BASE_URL,
|
||||
processEnv,
|
||||
secretSource,
|
||||
),
|
||||
savedProfileLabel,
|
||||
}
|
||||
@@ -219,13 +232,13 @@ export function buildCurrentProviderSummary(options?: {
|
||||
providerLabel: 'GitHub Models',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
processEnv.OPENAI_MODEL ?? 'github:copilot',
|
||||
processEnv,
|
||||
secretSource,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
processEnv.OPENAI_BASE_URL ??
|
||||
processEnv.OPENAI_API_BASE ??
|
||||
'https://models.github.ai/inference',
|
||||
processEnv,
|
||||
secretSource,
|
||||
),
|
||||
savedProfileLabel,
|
||||
}
|
||||
@@ -246,8 +259,8 @@ export function buildCurrentProviderSummary(options?: {
|
||||
|
||||
return {
|
||||
providerLabel,
|
||||
modelLabel: getSafeDisplayValue(request.requestedModel, processEnv),
|
||||
endpointLabel: getSafeDisplayValue(request.baseUrl, processEnv),
|
||||
modelLabel: getSafeDisplayValue(request.requestedModel, secretSource),
|
||||
endpointLabel: getSafeDisplayValue(request.baseUrl, secretSource),
|
||||
savedProfileLabel,
|
||||
}
|
||||
}
|
||||
@@ -258,11 +271,11 @@ export function buildCurrentProviderSummary(options?: {
|
||||
processEnv.ANTHROPIC_MODEL ??
|
||||
processEnv.CLAUDE_MODEL ??
|
||||
'claude-sonnet-4-6',
|
||||
processEnv,
|
||||
secretSource,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
processEnv.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com',
|
||||
processEnv,
|
||||
secretSource,
|
||||
),
|
||||
savedProfileLabel,
|
||||
}
|
||||
@@ -376,6 +389,10 @@ export function buildProfileSaveMessage(
|
||||
profile: ProviderProfile,
|
||||
env: ProfileEnv,
|
||||
filePath: string,
|
||||
options?: {
|
||||
activatedInSession?: boolean
|
||||
activationWarning?: string | null
|
||||
},
|
||||
): string {
|
||||
const summary = buildSavedProfileSummary(profile, env)
|
||||
const lines = [
|
||||
@@ -389,13 +406,24 @@ export function buildProfileSaveMessage(
|
||||
}
|
||||
|
||||
lines.push(`Profile: ${filePath}`)
|
||||
lines.push('Restart OpenClaude to use it.')
|
||||
if (options?.activatedInSession) {
|
||||
lines.push('OpenClaude switched to it for this session.')
|
||||
} else if (options?.activationWarning) {
|
||||
lines.push(
|
||||
`Saved for next startup. Warning: could not activate it in this session (${options.activationWarning}).`,
|
||||
)
|
||||
} else {
|
||||
lines.push('Restart OpenClaude to use it.')
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
function buildUsageText(): string {
|
||||
const summary = buildCurrentProviderSummary()
|
||||
const availableProviders = isBareMode()
|
||||
? 'Choose Auto, Ollama, OpenAI-compatible, Gemini, or Codex, then save a provider profile.'
|
||||
: 'Choose Auto, Ollama, OpenAI-compatible, Gemini, Codex, or Codex OAuth, then save a provider profile.'
|
||||
return [
|
||||
'Usage: /provider',
|
||||
'',
|
||||
@@ -406,7 +434,7 @@ function buildUsageText(): string {
|
||||
`Current endpoint: ${summary.endpointLabel}`,
|
||||
`Saved profile: ${summary.savedProfileLabel}`,
|
||||
'',
|
||||
'Choose Auto, Ollama, OpenAI-compatible, Gemini, or Codex, then save a profile for the next OpenClaude restart.',
|
||||
availableProviders,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
@@ -415,12 +443,45 @@ function finishProfileSave(
|
||||
profile: ProviderProfile,
|
||||
env: ProfileEnv,
|
||||
): void {
|
||||
void saveProfileAndNotify(onDone, profile, env)
|
||||
}
|
||||
|
||||
export function buildCodexOAuthProfileEnv(
|
||||
tokens: Pick<CodexOAuthTokens, 'accessToken' | 'idToken' | 'accountId'>,
|
||||
): ProfileEnv | null {
|
||||
return buildSharedCodexOAuthProfileEnv(tokens)
|
||||
}
|
||||
|
||||
export async function applySavedProfileToCurrentSession(options: {
|
||||
profileFile: ProfileFile
|
||||
processEnv?: NodeJS.ProcessEnv
|
||||
}): Promise<string | null> {
|
||||
return applySharedProfileToCurrentSession(options)
|
||||
}
|
||||
|
||||
async function saveProfileAndNotify(
|
||||
onDone: LocalJSXCommandOnDone,
|
||||
profile: ProviderProfile,
|
||||
env: ProfileEnv,
|
||||
): Promise<void> {
|
||||
try {
|
||||
const profileFile = createProfileFile(profile, env)
|
||||
const filePath = saveProfileFile(profileFile)
|
||||
onDone(buildProfileSaveMessage(profile, env, filePath), {
|
||||
display: 'system',
|
||||
})
|
||||
const shouldActivateInSession = profile === 'codex'
|
||||
const activationWarning = shouldActivateInSession
|
||||
? await applySharedProfileToCurrentSession({ profileFile })
|
||||
: null
|
||||
|
||||
onDone(
|
||||
buildProfileSaveMessage(profile, env, filePath, {
|
||||
activatedInSession:
|
||||
shouldActivateInSession && activationWarning === null,
|
||||
activationWarning,
|
||||
}),
|
||||
{
|
||||
display: 'system',
|
||||
},
|
||||
)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
onDone(`Failed to save provider profile: ${message}`, {
|
||||
@@ -504,6 +565,10 @@ function ProviderChooser({
|
||||
onCancel: () => void
|
||||
}): React.ReactNode {
|
||||
const summary = buildCurrentProviderSummary()
|
||||
const canUseCodexOAuth = !isBareMode()
|
||||
const helperText = canUseCodexOAuth
|
||||
? 'Save a provider profile without editing environment variables first. Codex profiles backed by env, auth.json, or OpenClaude secure storage can switch this session immediately when validation succeeds.'
|
||||
: 'Save a provider profile without editing environment variables first. Codex profiles backed by env or auth.json can switch this session immediately.'
|
||||
const options: OptionWithDescription<ProviderChoice>[] = [
|
||||
{
|
||||
label: 'Auto',
|
||||
@@ -537,6 +602,16 @@ function ProviderChooser({
|
||||
value: 'codex',
|
||||
description: 'Use existing ChatGPT Codex CLI auth or env credentials',
|
||||
},
|
||||
...(canUseCodexOAuth
|
||||
? [
|
||||
{
|
||||
label: 'Codex OAuth',
|
||||
value: 'codex-oauth' as const,
|
||||
description:
|
||||
'Sign in with ChatGPT in your browser and store Codex tokens securely',
|
||||
},
|
||||
]
|
||||
: []),
|
||||
]
|
||||
|
||||
if (summary.savedProfileLabel !== 'none') {
|
||||
@@ -554,10 +629,7 @@ function ProviderChooser({
|
||||
onCancel={onCancel}
|
||||
>
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>
|
||||
Save a provider profile for the next OpenClaude restart without
|
||||
editing environment variables first.
|
||||
</Text>
|
||||
<Text>{helperText}</Text>
|
||||
<Box flexDirection="column">
|
||||
<Text dimColor>Current model: {summary.modelLabel}</Text>
|
||||
<Text dimColor>Current endpoint: {summary.endpointLabel}</Text>
|
||||
@@ -709,7 +781,9 @@ function AutoRecommendationStep({
|
||||
{ label: 'Back', value: 'back' },
|
||||
{ label: 'Cancel', value: 'cancel' },
|
||||
]}
|
||||
onChange={value => (value === 'back' ? onBack() : onCancel())}
|
||||
onChange={(value: string) =>
|
||||
value === 'back' ? onBack() : onCancel()
|
||||
}
|
||||
onCancel={onCancel}
|
||||
/>
|
||||
</Box>
|
||||
@@ -732,7 +806,7 @@ function AutoRecommendationStep({
|
||||
{ label: 'Back', value: 'back' },
|
||||
{ label: 'Cancel', value: 'cancel' },
|
||||
]}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
if (value === 'continue') {
|
||||
onNeedOpenAI(status.defaultModel)
|
||||
} else if (value === 'back') {
|
||||
@@ -765,7 +839,7 @@ function AutoRecommendationStep({
|
||||
{ label: 'Back', value: 'back' },
|
||||
{ label: 'Cancel', value: 'cancel' },
|
||||
]}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
if (value === 'save') {
|
||||
onSave(
|
||||
'ollama',
|
||||
@@ -867,7 +941,9 @@ function OllamaModelStep({
|
||||
{ label: 'Back', value: 'back' },
|
||||
{ label: 'Cancel', value: 'cancel' },
|
||||
]}
|
||||
onChange={value => (value === 'back' ? onBack() : onCancel())}
|
||||
onChange={(value: string) =>
|
||||
value === 'back' ? onBack() : onCancel()
|
||||
}
|
||||
onCancel={onCancel}
|
||||
/>
|
||||
</Box>
|
||||
@@ -888,7 +964,7 @@ function OllamaModelStep({
|
||||
defaultFocusValue={status.defaultValue}
|
||||
inlineDescriptions
|
||||
visibleOptionCount={Math.min(8, status.options.length)}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
onSave(
|
||||
'ollama',
|
||||
buildOllamaProfileEnv(value, {
|
||||
@@ -903,6 +979,84 @@ function OllamaModelStep({
|
||||
)
|
||||
}
|
||||
|
||||
function CodexOAuthStep({
|
||||
onSave,
|
||||
onBack,
|
||||
onCancel,
|
||||
}: {
|
||||
onSave: (profile: ProviderProfile, env: ProfileEnv) => void
|
||||
onBack: () => void
|
||||
onCancel: () => void
|
||||
}): React.ReactNode {
|
||||
const handleAuthenticated = React.useCallback(async (
|
||||
tokens: CodexOAuthTokens,
|
||||
persistCredentials: (options?: { profileId?: string }) => void,
|
||||
) => {
|
||||
const env = buildCodexOAuthProfileEnv(tokens)
|
||||
if (!env) {
|
||||
throw new Error(
|
||||
'Codex OAuth succeeded, but OpenClaude could not build a Codex profile from the stored credentials.',
|
||||
)
|
||||
}
|
||||
|
||||
persistCredentials()
|
||||
onSave('codex', env)
|
||||
}, [onSave])
|
||||
|
||||
const status = useCodexOAuthFlow({
|
||||
onAuthenticated: handleAuthenticated,
|
||||
})
|
||||
|
||||
if (status.state === 'error') {
|
||||
return (
|
||||
<Dialog title="Codex OAuth failed" onCancel={onCancel} color="warning">
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>{status.message}</Text>
|
||||
<Select
|
||||
options={[
|
||||
{ label: 'Back', value: 'back' },
|
||||
{ label: 'Cancel', value: 'cancel' },
|
||||
]}
|
||||
onChange={(value: string) =>
|
||||
value === 'back' ? onBack() : onCancel()
|
||||
}
|
||||
onCancel={onCancel}
|
||||
/>
|
||||
</Box>
|
||||
</Dialog>
|
||||
)
|
||||
}
|
||||
|
||||
if (status.state === 'starting') {
|
||||
return <LoadingState message="Starting Codex OAuth..." />
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog title="Codex OAuth" onCancel={onBack}>
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>
|
||||
Finish signing in with ChatGPT in your browser. OpenClaude will store
|
||||
the resulting Codex credentials securely for future sessions.
|
||||
</Text>
|
||||
{status.browserOpened === false ? (
|
||||
<Text color="warning">
|
||||
Browser did not open automatically. Visit this URL to continue:
|
||||
</Text>
|
||||
) : status.browserOpened === true ? (
|
||||
<Text dimColor>
|
||||
Browser opened. Complete the sign-in there, then OpenClaude will
|
||||
finish setup automatically.
|
||||
</Text>
|
||||
) : (
|
||||
<Text dimColor>Opening your browser...</Text>
|
||||
)}
|
||||
<Text>{status.authUrl}</Text>
|
||||
<Text dimColor>Press Esc to cancel and go back.</Text>
|
||||
</Box>
|
||||
</Dialog>
|
||||
)
|
||||
}
|
||||
|
||||
function CodexCredentialStep({
|
||||
onSave,
|
||||
onBack,
|
||||
@@ -924,7 +1078,9 @@ function CodexCredentialStep({
|
||||
{ label: 'Back', value: 'back' },
|
||||
{ label: 'Cancel', value: 'cancel' },
|
||||
]}
|
||||
onChange={value => (value === 'back' ? onBack() : onCancel())}
|
||||
onChange={(value: string) =>
|
||||
value === 'back' ? onBack() : onCancel()
|
||||
}
|
||||
onCancel={onCancel}
|
||||
/>
|
||||
</Box>
|
||||
@@ -958,9 +1114,10 @@ function CodexCredentialStep({
|
||||
defaultFocusValue="codexplan"
|
||||
inlineDescriptions
|
||||
visibleOptionCount={options.length}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
const env = buildCodexProfileEnv({
|
||||
model: value,
|
||||
credentialSource: credentials.credentialSource,
|
||||
processEnv: process.env,
|
||||
})
|
||||
if (env) {
|
||||
@@ -975,9 +1132,16 @@ function CodexCredentialStep({
|
||||
}
|
||||
|
||||
function resolveCodexCredentials(processEnv: NodeJS.ProcessEnv):
|
||||
| { ok: true; sourceDescription: string }
|
||||
| {
|
||||
ok: true
|
||||
sourceDescription: string
|
||||
credentialSource: 'oauth' | 'existing'
|
||||
}
|
||||
| { ok: false; message: string } {
|
||||
const credentials = resolveCodexApiCredentials(processEnv)
|
||||
const oauthHint = isBareMode()
|
||||
? 'Re-login with the Codex CLI'
|
||||
: 'Choose Codex OAuth in /provider, or re-login with the Codex CLI'
|
||||
|
||||
if (!credentials.apiKey) {
|
||||
const authHint = credentials.authPath
|
||||
@@ -985,7 +1149,7 @@ function resolveCodexCredentials(processEnv: NodeJS.ProcessEnv):
|
||||
: 'Set CODEX_API_KEY or re-login with the Codex CLI.'
|
||||
return {
|
||||
ok: false,
|
||||
message: `Codex setup needs existing credentials. Re-login with the Codex CLI or set CODEX_API_KEY. ${authHint}`,
|
||||
message: `Codex setup needs existing credentials. ${oauthHint}, or set CODEX_API_KEY. ${authHint}`,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -993,15 +1157,19 @@ function resolveCodexCredentials(processEnv: NodeJS.ProcessEnv):
|
||||
return {
|
||||
ok: false,
|
||||
message:
|
||||
'Codex auth is missing chatgpt_account_id. Re-login with the Codex CLI or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID first.',
|
||||
`Codex auth is missing chatgpt_account_id. ${oauthHint}, or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID first.`,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
ok: true,
|
||||
credentialSource:
|
||||
credentials.source === 'secure-storage' ? 'oauth' : 'existing',
|
||||
sourceDescription:
|
||||
credentials.source === 'env'
|
||||
? 'the current shell environment'
|
||||
: credentials.source === 'secure-storage'
|
||||
? 'OpenClaude secure storage'
|
||||
: credentials.authPath ?? DEFAULT_CODEX_BASE_URL,
|
||||
}
|
||||
}
|
||||
@@ -1035,6 +1203,8 @@ export function ProviderWizard({
|
||||
name: 'mistral-key',
|
||||
defaultModel: defaults.mistralModel,
|
||||
})
|
||||
} else if (value === 'codex-oauth') {
|
||||
setStep({ name: 'codex-oauth' })
|
||||
} else if (value === 'clear') {
|
||||
const filePath = deleteProfileFile()
|
||||
onDone(`Removed saved provider profile at ${filePath}. Restart OpenClaude to go back to normal startup.`, {
|
||||
@@ -1314,7 +1484,7 @@ export function ProviderWizard({
|
||||
options={options}
|
||||
inlineDescriptions
|
||||
visibleOptionCount={options.length}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
if (value === 'api-key') {
|
||||
setStep({ name: 'gemini-key' })
|
||||
} else if (value === 'access-token') {
|
||||
@@ -1470,6 +1640,15 @@ export function ProviderWizard({
|
||||
onCancel={() => onDone()}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'codex-oauth':
|
||||
return (
|
||||
<CodexOAuthStep
|
||||
onSave={(profile, env) => finishProfileSave(onDone, profile, env)}
|
||||
onBack={() => setStep({ name: 'choose' })}
|
||||
onCancel={() => onDone()}
|
||||
/>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -101,9 +101,9 @@ export function EffortPicker({ onSelect, onCancel }: Props) {
|
||||
<Box marginBottom={1} flexDirection="column">
|
||||
<Text color="remember" bold={true}>Set effort level</Text>
|
||||
<Text dimColor={true}>
|
||||
{usesOpenAIEffort
|
||||
? `OpenAI/Codex provider (${provider})`
|
||||
: supportsEffort
|
||||
{supportsEffort && usesOpenAIEffort
|
||||
? `OpenAI/Codex provider (${provider})`
|
||||
: supportsEffort
|
||||
? `Claude model · ${provider} provider`
|
||||
: `Effort not supported for this model`
|
||||
}
|
||||
|
||||
@@ -5,13 +5,14 @@ import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
import { KeybindingSetup } from '../keybindings/KeybindingProviderSetup.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
const ORIGINAL_ENV = {
|
||||
CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
@@ -109,6 +110,9 @@ function createDeferred<T>(): {
|
||||
|
||||
function mockProviderProfilesModule(options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
getProviderProfiles?: () => unknown[]
|
||||
updateProviderProfile?: (...args: unknown[]) => unknown
|
||||
setActiveProviderProfile?: (...args: unknown[]) => unknown
|
||||
}): void {
|
||||
mock.module('../utils/providerProfiles.js', () => ({
|
||||
addProviderProfile: options?.addProviderProfile ?? (() => null),
|
||||
@@ -131,17 +135,20 @@ function mockProviderProfilesModule(options?: {
|
||||
model: 'mock-model',
|
||||
apiKey: '',
|
||||
},
|
||||
getProviderProfiles: () => [],
|
||||
setActiveProviderProfile: () => null,
|
||||
updateProviderProfile: () => null,
|
||||
getProviderProfiles: options?.getProviderProfiles ?? (() => []),
|
||||
setActiveProviderProfile: options?.setActiveProviderProfile ?? (() => null),
|
||||
updateProviderProfile: options?.updateProviderProfile ?? (() => null),
|
||||
}))
|
||||
}
|
||||
|
||||
function mockProviderManagerDependencies(
|
||||
syncRead: () => string | undefined,
|
||||
asyncRead: () => Promise<string | undefined>,
|
||||
githubSyncRead: () => string | undefined,
|
||||
githubAsyncRead: () => Promise<string | undefined>,
|
||||
options?: {
|
||||
addProviderProfile?: (...args: unknown[]) => unknown
|
||||
applySavedProfileToCurrentSession?: (...args: unknown[]) => Promise<string | null>
|
||||
clearCodexCredentials?: () => { success: boolean; warning?: string }
|
||||
getProviderProfiles?: () => unknown[]
|
||||
hasLocalOllama?: () => Promise<boolean>
|
||||
listOllamaModels?: () => Promise<
|
||||
Array<{
|
||||
@@ -153,9 +160,33 @@ function mockProviderManagerDependencies(
|
||||
quantizationLevel?: string | null
|
||||
}>
|
||||
>
|
||||
codexSyncRead?: () => unknown
|
||||
codexAsyncRead?: () => Promise<unknown>
|
||||
updateProviderProfile?: (...args: unknown[]) => unknown
|
||||
setActiveProviderProfile?: (...args: unknown[]) => unknown
|
||||
useCodexOAuthFlow?: (options: {
|
||||
onAuthenticated: (tokens: {
|
||||
accessToken: string
|
||||
refreshToken: string
|
||||
accountId?: string
|
||||
idToken?: string
|
||||
apiKey?: string
|
||||
}, persistCredentials: (options?: { profileId?: string }) => void) =>
|
||||
void | Promise<void>
|
||||
}) => {
|
||||
state: 'starting' | 'waiting' | 'error'
|
||||
authUrl?: string
|
||||
browserOpened?: boolean | null
|
||||
message?: string
|
||||
}
|
||||
},
|
||||
): void {
|
||||
mockProviderProfilesModule({ addProviderProfile: options?.addProviderProfile })
|
||||
mockProviderProfilesModule({
|
||||
addProviderProfile: options?.addProviderProfile,
|
||||
getProviderProfiles: options?.getProviderProfiles,
|
||||
updateProviderProfile: options?.updateProviderProfile,
|
||||
setActiveProviderProfile: options?.setActiveProviderProfile,
|
||||
})
|
||||
|
||||
mock.module('../utils/providerDiscovery.js', () => ({
|
||||
hasLocalOllama: options?.hasLocalOllama ?? (async () => false),
|
||||
@@ -166,13 +197,65 @@ function mockProviderManagerDependencies(
|
||||
clearGithubModelsToken: () => ({ success: true }),
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER: 'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED',
|
||||
hydrateGithubModelsTokenFromSecureStorage: () => {},
|
||||
readGithubModelsToken: syncRead,
|
||||
readGithubModelsTokenAsync: asyncRead,
|
||||
readGithubModelsToken: githubSyncRead,
|
||||
readGithubModelsTokenAsync: githubAsyncRead,
|
||||
}))
|
||||
|
||||
mock.module('../utils/codexCredentials.js', () => ({
|
||||
attachCodexProfileIdToStoredCredentials: () => ({ success: true }),
|
||||
clearCodexCredentials:
|
||||
options?.clearCodexCredentials ?? (() => ({ success: true })),
|
||||
readCodexCredentials:
|
||||
options?.codexSyncRead ?? (() => undefined),
|
||||
readCodexCredentialsAsync:
|
||||
options?.codexAsyncRead ?? (async () => undefined),
|
||||
}))
|
||||
|
||||
mock.module('../utils/providerProfile.js', () => ({
|
||||
applySavedProfileToCurrentSession:
|
||||
options?.applySavedProfileToCurrentSession ?? (async () => null),
|
||||
buildCodexOAuthProfileEnv: (tokens: {
|
||||
accessToken: string
|
||||
accountId?: string
|
||||
idToken?: string
|
||||
}) => {
|
||||
const accountId =
|
||||
tokens.accountId ??
|
||||
(tokens.idToken ? 'acct_from_id_token' : undefined) ??
|
||||
(tokens.accessToken ? 'acct_from_access_token' : undefined)
|
||||
|
||||
if (!accountId) {
|
||||
return null
|
||||
}
|
||||
|
||||
return {
|
||||
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
CHATGPT_ACCOUNT_ID: accountId,
|
||||
CODEX_CREDENTIAL_SOURCE: 'oauth' as const,
|
||||
}
|
||||
},
|
||||
clearPersistedCodexOAuthProfile: () => null,
|
||||
createProfileFile: (profile: string, env: Record<string, unknown>) => ({
|
||||
profile,
|
||||
env,
|
||||
createdAt: '2026-04-10T00:00:00.000Z',
|
||||
}),
|
||||
}))
|
||||
|
||||
mock.module('../utils/settings/settings.js', () => ({
|
||||
updateSettingsForSource: () => ({ error: null }),
|
||||
}))
|
||||
|
||||
mock.module('./useCodexOAuthFlow.js', () => ({
|
||||
useCodexOAuthFlow:
|
||||
options?.useCodexOAuthFlow ??
|
||||
(() => ({
|
||||
state: 'waiting' as const,
|
||||
authUrl: 'https://chatgpt.com/codex',
|
||||
browserOpened: true,
|
||||
})),
|
||||
}))
|
||||
}
|
||||
|
||||
async function waitForFrameOutput(
|
||||
@@ -240,9 +323,9 @@ async function renderProviderManagerFrame(
|
||||
onDone: (result?: unknown) => void
|
||||
}>,
|
||||
options?: {
|
||||
mode?: 'first-run' | 'manage'
|
||||
waitForOutput?: (output: string) => boolean
|
||||
timeoutMs?: number
|
||||
mode?: 'first-run' | 'manage'
|
||||
},
|
||||
): Promise<string> {
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
@@ -305,6 +388,47 @@ test('ProviderManager resolves GitHub virtual provider from async storage withou
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const syncRead = mock(() => {
|
||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const deferredStoredToken = createDeferred<string | undefined>()
|
||||
const asyncRead = mock(async () => deferredStoredToken.promise)
|
||||
|
||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
|
||||
const firstFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Provider manager'),
|
||||
)
|
||||
|
||||
expect(firstFrame).toContain('Checking GitHub Models credentials...')
|
||||
expect(firstFrame).not.toContain('No provider profiles configured yet.')
|
||||
|
||||
deferredStoredToken.resolve('stored-token')
|
||||
|
||||
const resolvedFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('GitHub Models') && frame.includes('token stored'),
|
||||
)
|
||||
|
||||
expect(resolvedFrame).toContain('GitHub Models')
|
||||
expect(resolvedFrame).toContain('token stored')
|
||||
|
||||
await mounted.dispose()
|
||||
|
||||
expect(syncRead).not.toHaveBeenCalled()
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager first-run Ollama preset auto-detects installed models', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
@@ -395,43 +519,411 @@ test('ProviderManager first-run Ollama preset auto-detects installed models', as
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
|
||||
test('ProviderManager first-run Codex OAuth switches the current session after login completes', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const syncRead = mock(() => {
|
||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const deferredStoredToken = createDeferred<string | undefined>()
|
||||
const asyncRead = mock(async () => deferredStoredToken.promise)
|
||||
const onDone = mock(() => {})
|
||||
const applySavedProfileToCurrentSession = mock(async () => null)
|
||||
const persistCredentials = mock(() => {})
|
||||
const addProviderProfile = mock((payload: {
|
||||
provider: string
|
||||
name: string
|
||||
baseUrl: string
|
||||
model: string
|
||||
apiKey?: string
|
||||
}) => ({
|
||||
id: 'provider_codex_oauth',
|
||||
provider: payload.provider,
|
||||
name: payload.name,
|
||||
baseUrl: payload.baseUrl,
|
||||
model: payload.model,
|
||||
apiKey: payload.apiKey,
|
||||
}))
|
||||
|
||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
addProviderProfile,
|
||||
applySavedProfileToCurrentSession,
|
||||
useCodexOAuthFlow: ({ onAuthenticated }) => {
|
||||
React.useEffect(() => {
|
||||
void onAuthenticated({
|
||||
accessToken: 'oauth-access-token',
|
||||
refreshToken: 'oauth-refresh-token',
|
||||
accountId: 'acct_oauth',
|
||||
}, persistCredentials)
|
||||
}, [onAuthenticated])
|
||||
|
||||
return {
|
||||
state: 'waiting',
|
||||
authUrl: 'https://chatgpt.com/codex',
|
||||
browserOpened: true,
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: 'first-run',
|
||||
onDone,
|
||||
})
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Set up provider') && frame.includes('Codex OAuth'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => onDone.mock.calls.length > 0)
|
||||
|
||||
expect(addProviderProfile).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
provider: 'openai',
|
||||
name: 'Codex OAuth',
|
||||
baseUrl: 'https://chatgpt.com/backend-api/codex',
|
||||
model: 'codexplan',
|
||||
apiKey: '',
|
||||
}),
|
||||
expect.objectContaining({ makeActive: true }),
|
||||
)
|
||||
expect(applySavedProfileToCurrentSession).toHaveBeenCalled()
|
||||
expect(persistCredentials).toHaveBeenCalledWith({
|
||||
profileId: 'provider_codex_oauth',
|
||||
})
|
||||
expect(onDone).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
action: 'saved',
|
||||
message:
|
||||
'Codex OAuth configured. OpenClaude switched to it for this session.',
|
||||
}),
|
||||
)
|
||||
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager first-run Codex OAuth reports next-startup fallback when session activation fails', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const onDone = mock(() => {})
|
||||
const applySavedProfileToCurrentSession = mock(
|
||||
async () => 'validation failed',
|
||||
)
|
||||
const persistCredentials = mock(() => {})
|
||||
const addProviderProfile = mock((payload: {
|
||||
provider: string
|
||||
name: string
|
||||
baseUrl: string
|
||||
model: string
|
||||
apiKey?: string
|
||||
}) => ({
|
||||
id: 'provider_codex_oauth',
|
||||
provider: payload.provider,
|
||||
name: payload.name,
|
||||
baseUrl: payload.baseUrl,
|
||||
model: payload.model,
|
||||
apiKey: payload.apiKey,
|
||||
}))
|
||||
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
addProviderProfile,
|
||||
applySavedProfileToCurrentSession,
|
||||
useCodexOAuthFlow: ({ onAuthenticated }) => {
|
||||
React.useEffect(() => {
|
||||
void onAuthenticated({
|
||||
accessToken: 'oauth-access-token',
|
||||
refreshToken: 'oauth-refresh-token',
|
||||
accountId: 'acct_oauth',
|
||||
}, persistCredentials)
|
||||
}, [onAuthenticated])
|
||||
|
||||
return {
|
||||
state: 'waiting',
|
||||
authUrl: 'https://chatgpt.com/codex',
|
||||
browserOpened: true,
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: 'first-run',
|
||||
onDone,
|
||||
})
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Set up provider') && frame.includes('Codex OAuth'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => onDone.mock.calls.length > 0)
|
||||
|
||||
expect(persistCredentials).toHaveBeenCalledWith({
|
||||
profileId: 'provider_codex_oauth',
|
||||
})
|
||||
expect(onDone).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
action: 'saved',
|
||||
message:
|
||||
'Codex OAuth configured. Saved for next startup. Warning: validation failed.',
|
||||
}),
|
||||
)
|
||||
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager does not hijack a manual Codex profile when OAuth credentials are not yet linked', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const onDone = mock(() => {})
|
||||
const manualProfile = {
|
||||
id: 'provider_manual_codex',
|
||||
provider: 'openai',
|
||||
name: 'Codex OAuth',
|
||||
baseUrl: 'https://chatgpt.com/backend-api/codex',
|
||||
model: 'gpt-5.4',
|
||||
apiKey: 'manual-key',
|
||||
}
|
||||
const addProviderProfile = mock((payload: {
|
||||
provider: string
|
||||
name: string
|
||||
baseUrl: string
|
||||
model: string
|
||||
apiKey?: string
|
||||
}) => ({
|
||||
id: 'provider_codex_oauth',
|
||||
provider: payload.provider,
|
||||
name: payload.name,
|
||||
baseUrl: payload.baseUrl,
|
||||
model: payload.model,
|
||||
apiKey: payload.apiKey,
|
||||
}))
|
||||
const updateProviderProfile = mock(() => manualProfile)
|
||||
const persistCredentials = mock(() => {})
|
||||
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
addProviderProfile,
|
||||
getProviderProfiles: () => [manualProfile],
|
||||
updateProviderProfile,
|
||||
useCodexOAuthFlow: ({ onAuthenticated }) => {
|
||||
const hasAuthenticated = React.useRef(false)
|
||||
|
||||
React.useEffect(() => {
|
||||
if (hasAuthenticated.current) {
|
||||
return
|
||||
}
|
||||
hasAuthenticated.current = true
|
||||
void onAuthenticated({
|
||||
accessToken: 'oauth-access-token',
|
||||
refreshToken: 'oauth-refresh-token',
|
||||
accountId: 'acct_oauth',
|
||||
}, persistCredentials)
|
||||
}, [onAuthenticated])
|
||||
|
||||
return {
|
||||
state: 'waiting',
|
||||
authUrl: 'https://chatgpt.com/codex',
|
||||
browserOpened: true,
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager, {
|
||||
mode: 'first-run',
|
||||
onDone,
|
||||
})
|
||||
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Set up provider') && frame.includes('Codex OAuth'),
|
||||
)
|
||||
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => onDone.mock.calls.length > 0)
|
||||
|
||||
expect(addProviderProfile).toHaveBeenCalledTimes(1)
|
||||
expect(updateProviderProfile).not.toHaveBeenCalled()
|
||||
expect(persistCredentials).toHaveBeenCalledWith({
|
||||
profileId: 'provider_codex_oauth',
|
||||
})
|
||||
|
||||
await mounted.dispose()
|
||||
})
|
||||
|
||||
test('ProviderManager keeps Codex OAuth as next-startup only when activating the session fails from the menu', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const codexProfile = {
|
||||
id: 'provider_codex_oauth',
|
||||
provider: 'openai',
|
||||
name: 'Codex OAuth',
|
||||
baseUrl: 'https://chatgpt.com/backend-api/codex',
|
||||
model: 'codexplan',
|
||||
apiKey: '',
|
||||
}
|
||||
|
||||
const applySavedProfileToCurrentSession = mock(
|
||||
async () => 'validation failed',
|
||||
)
|
||||
const setActiveProviderProfile = mock(() => codexProfile)
|
||||
|
||||
mockProviderManagerDependencies(
|
||||
() => undefined,
|
||||
async () => undefined,
|
||||
{
|
||||
applySavedProfileToCurrentSession,
|
||||
getProviderProfiles: () => [codexProfile],
|
||||
setActiveProviderProfile,
|
||||
codexAsyncRead: async () => ({
|
||||
accessToken: 'oauth-access-token',
|
||||
refreshToken: 'oauth-refresh-token',
|
||||
accountId: 'acct_oauth',
|
||||
profileId: 'provider_codex_oauth',
|
||||
}),
|
||||
},
|
||||
)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
|
||||
const firstFrame = await waitForFrameOutput(
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Provider manager'),
|
||||
frame =>
|
||||
frame.includes('Provider manager') &&
|
||||
frame.includes('Set active provider') &&
|
||||
frame.includes('Log out Codex OAuth'),
|
||||
)
|
||||
|
||||
expect(firstFrame).toContain('Checking GitHub Models credentials...')
|
||||
expect(firstFrame).not.toContain('No provider profiles configured yet.')
|
||||
mounted.stdin.write('j')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
deferredStoredToken.resolve('stored-token')
|
||||
|
||||
const resolvedFrame = await waitForFrameOutput(
|
||||
await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('GitHub Models') && frame.includes('token stored'),
|
||||
frame => frame.includes('Set active provider') && frame.includes('Codex OAuth'),
|
||||
)
|
||||
|
||||
expect(resolvedFrame).toContain('GitHub Models')
|
||||
expect(resolvedFrame).toContain('token stored')
|
||||
await Bun.sleep(25)
|
||||
mounted.stdin.write('\r')
|
||||
|
||||
await waitForCondition(() => setActiveProviderProfile.mock.calls.length > 0)
|
||||
await waitForCondition(
|
||||
() => applySavedProfileToCurrentSession.mock.calls.length > 0,
|
||||
)
|
||||
await Bun.sleep(50)
|
||||
const output = stripAnsi(extractLastFrame(mounted.getOutput()))
|
||||
|
||||
expect(output).toContain(
|
||||
'Active provider: Codex OAuth. Saved for next startup. Warning: validation failed.',
|
||||
)
|
||||
expect(applySavedProfileToCurrentSession).toHaveBeenCalled()
|
||||
expect(setActiveProviderProfile).toHaveBeenCalledWith('provider_codex_oauth')
|
||||
|
||||
await mounted.dispose()
|
||||
|
||||
expect(syncRead).not.toHaveBeenCalled()
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager resolves Codex OAuth state from async storage without sync reads in render flow', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const githubSyncRead = mock(() => undefined)
|
||||
const githubAsyncRead = mock(async () => undefined)
|
||||
const codexSyncRead = mock(() => {
|
||||
throw new Error('sync codex credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const codexAsyncRead = mock(async () => ({
|
||||
accessToken: 'codex-access-token',
|
||||
refreshToken: 'codex-refresh-token',
|
||||
}))
|
||||
|
||||
mockProviderManagerDependencies(githubSyncRead, githubAsyncRead, {
|
||||
codexSyncRead,
|
||||
codexAsyncRead,
|
||||
})
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const output = await renderProviderManagerFrame(ProviderManager, {
|
||||
waitForOutput: frame =>
|
||||
frame.includes('Provider manager') &&
|
||||
frame.includes('Log out Codex OAuth'),
|
||||
})
|
||||
|
||||
expect(output).toContain('Provider manager')
|
||||
expect(output).toContain('Log out Codex OAuth')
|
||||
expect(codexSyncRead).not.toHaveBeenCalled()
|
||||
expect(codexAsyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager hides Codex OAuth setup in bare mode', async () => {
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const githubSyncRead = mock(() => undefined)
|
||||
const githubAsyncRead = mock(async () => undefined)
|
||||
|
||||
mockProviderManagerDependencies(githubSyncRead, githubAsyncRead)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const output = await renderProviderManagerFrame(ProviderManager, {
|
||||
mode: 'first-run',
|
||||
waitForOutput: frame =>
|
||||
frame.includes('Set up provider') && frame.includes('OpenAI'),
|
||||
})
|
||||
|
||||
expect(output).toContain('Set up provider')
|
||||
expect(output).not.toContain('Codex OAuth')
|
||||
})
|
||||
|
||||
@@ -1,9 +1,20 @@
|
||||
import figures from 'figures'
|
||||
import * as React from 'react'
|
||||
import { DEFAULT_CODEX_BASE_URL } from '../services/api/providerConfig.js'
|
||||
import { Box, Text } from '../ink.js'
|
||||
import { useKeybinding } from '../keybindings/useKeybinding.js'
|
||||
import type { ProviderProfile } from '../utils/config.js'
|
||||
import { hasLocalOllama, listOllamaModels } from '../utils/providerDiscovery.js'
|
||||
import {
|
||||
clearCodexCredentials,
|
||||
readCodexCredentialsAsync,
|
||||
} from '../utils/codexCredentials.js'
|
||||
import { isBareMode, isEnvTruthy } from '../utils/envUtils.js'
|
||||
import {
|
||||
applySavedProfileToCurrentSession,
|
||||
buildCodexOAuthProfileEnv,
|
||||
clearPersistedCodexOAuthProfile,
|
||||
createProfileFile,
|
||||
} from '../utils/providerProfile.js'
|
||||
import {
|
||||
addProviderProfile,
|
||||
applyActiveProviderProfileFromConfig,
|
||||
@@ -16,10 +27,6 @@ import {
|
||||
type ProviderProfileInput,
|
||||
updateProviderProfile,
|
||||
} from '../utils/providerProfiles.js'
|
||||
import {
|
||||
rankOllamaModels,
|
||||
recommendOllamaModel,
|
||||
} from '../utils/providerRecommendation.js'
|
||||
import {
|
||||
clearGithubModelsToken,
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER,
|
||||
@@ -27,11 +34,22 @@ import {
|
||||
readGithubModelsToken,
|
||||
readGithubModelsTokenAsync,
|
||||
} from '../utils/githubModelsCredentials.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import {
|
||||
hasLocalOllama,
|
||||
listOllamaModels,
|
||||
} from '../utils/providerDiscovery.js'
|
||||
import {
|
||||
rankOllamaModels,
|
||||
recommendOllamaModel,
|
||||
} from '../utils/providerRecommendation.js'
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||
import { type OptionWithDescription, Select } from './CustomSelect/index.js'
|
||||
import {
|
||||
type OptionWithDescription,
|
||||
Select,
|
||||
} from './CustomSelect/index.js'
|
||||
import { Pane } from './design-system/Pane.js'
|
||||
import TextInput from './TextInput.js'
|
||||
import { useCodexOAuthFlow } from './useCodexOAuthFlow.js'
|
||||
|
||||
export type ProviderManagerResult = {
|
||||
action: 'saved' | 'cancelled'
|
||||
@@ -48,6 +66,7 @@ type Screen =
|
||||
| 'menu'
|
||||
| 'select-preset'
|
||||
| 'select-ollama-model'
|
||||
| 'codex-oauth'
|
||||
| 'form'
|
||||
| 'select-active'
|
||||
| 'select-edit'
|
||||
@@ -105,6 +124,8 @@ const GITHUB_PROVIDER_ID = '__github_models__'
|
||||
const GITHUB_PROVIDER_LABEL = 'GitHub Models'
|
||||
const GITHUB_PROVIDER_DEFAULT_MODEL = 'github:copilot'
|
||||
const GITHUB_PROVIDER_DEFAULT_BASE_URL = 'https://models.github.ai/inference'
|
||||
const CODEX_OAUTH_PROVIDER_NAME = 'Codex OAuth'
|
||||
const CODEX_OAUTH_PROVIDER_MODEL = 'codexplan'
|
||||
|
||||
type GithubCredentialSource = 'stored' | 'env' | 'none'
|
||||
|
||||
@@ -193,6 +214,111 @@ function getGithubProviderSummary(
|
||||
return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}`
|
||||
}
|
||||
|
||||
function findCodexOAuthProfile(
|
||||
profiles: ProviderProfile[],
|
||||
profileId?: string,
|
||||
): ProviderProfile | undefined {
|
||||
if (!profileId) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return profiles.find(profile => profile.id === profileId)
|
||||
}
|
||||
|
||||
function isCodexOAuthProfile(
|
||||
profile: ProviderProfile | null | undefined,
|
||||
profileId?: string,
|
||||
): boolean {
|
||||
return Boolean(profile && profileId && profile.id === profileId)
|
||||
}
|
||||
|
||||
function CodexOAuthSetup({
|
||||
onBack,
|
||||
onConfigured,
|
||||
}: {
|
||||
onBack: () => void
|
||||
onConfigured: (tokens: {
|
||||
accessToken: string
|
||||
refreshToken: string
|
||||
accountId?: string
|
||||
idToken?: string
|
||||
apiKey?: string
|
||||
}, persistCredentials: (options?: { profileId?: string }) => void) => void | Promise<void>
|
||||
}): React.ReactNode {
|
||||
const handleAuthenticated = React.useCallback(async (tokens: {
|
||||
accessToken: string
|
||||
refreshToken: string
|
||||
accountId?: string
|
||||
idToken?: string
|
||||
apiKey?: string
|
||||
}, persistCredentials: (options?: { profileId?: string }) => void) => {
|
||||
await onConfigured(tokens, persistCredentials)
|
||||
}, [onConfigured])
|
||||
useKeybinding('confirm:no', onBack, [onBack])
|
||||
|
||||
const status = useCodexOAuthFlow({
|
||||
onAuthenticated: handleAuthenticated,
|
||||
})
|
||||
|
||||
if (status.state === 'error') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="error" bold>
|
||||
Codex OAuth failed
|
||||
</Text>
|
||||
<Text>{status.message}</Text>
|
||||
<Text dimColor>Press Enter or Esc to go back.</Text>
|
||||
<Select
|
||||
options={[
|
||||
{
|
||||
value: 'back',
|
||||
label: 'Back',
|
||||
description: 'Return to provider presets',
|
||||
},
|
||||
]}
|
||||
onChange={onBack}
|
||||
onCancel={onBack}
|
||||
visibleOptionCount={1}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
Codex OAuth
|
||||
</Text>
|
||||
<Text>
|
||||
Sign in with your ChatGPT account in the browser. OpenClaude will store
|
||||
the resulting Codex credentials securely and switch this session to the
|
||||
new Codex login when setup completes.
|
||||
</Text>
|
||||
{status.state === 'starting' ? (
|
||||
<Text dimColor>Starting local callback and preparing your browser...</Text>
|
||||
) : status.browserOpened === false ? (
|
||||
<>
|
||||
<Text color="warning">
|
||||
Browser did not open automatically. Visit this URL to continue:
|
||||
</Text>
|
||||
<Text>{status.authUrl}</Text>
|
||||
</>
|
||||
) : status.browserOpened === true ? (
|
||||
<>
|
||||
<Text dimColor>
|
||||
Browser opened. Finish the ChatGPT sign-in there and this setup will
|
||||
complete automatically.
|
||||
</Text>
|
||||
<Text>{status.authUrl}</Text>
|
||||
</>
|
||||
) : (
|
||||
<Text dimColor>Opening your browser...</Text>
|
||||
)}
|
||||
<Text dimColor>Press Esc to cancel and go back.</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const initialGithubCredentialSource = getGithubCredentialSourceFromEnv()
|
||||
const initialIsGithubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
@@ -212,6 +338,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const [isGithubCredentialSourceResolved, setIsGithubCredentialSourceResolved] =
|
||||
React.useState(() => initialHasGithubCredential || initialIsGithubActive)
|
||||
const githubRefreshEpochRef = React.useRef(0)
|
||||
const codexRefreshEpochRef = React.useRef(0)
|
||||
const [screen, setScreen] = React.useState<Screen>(
|
||||
mode === 'first-run' ? 'select-preset' : 'menu',
|
||||
)
|
||||
@@ -226,6 +353,10 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const [cursorOffset, setCursorOffset] = React.useState(0)
|
||||
const [statusMessage, setStatusMessage] = React.useState<string | undefined>()
|
||||
const [errorMessage, setErrorMessage] = React.useState<string | undefined>()
|
||||
const [hasStoredCodexOAuthCredentials, setHasStoredCodexOAuthCredentials] =
|
||||
React.useState(false)
|
||||
const [storedCodexOAuthProfileId, setStoredCodexOAuthProfileId] =
|
||||
React.useState<string | undefined>()
|
||||
const [ollamaSelection, setOllamaSelection] = React.useState<OllamaSelectionState>({
|
||||
state: 'idle',
|
||||
})
|
||||
@@ -263,19 +394,102 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
})()
|
||||
}, [])
|
||||
|
||||
const refreshCodexOAuthCredentialState = React.useCallback((): void => {
|
||||
if (isBareMode()) {
|
||||
codexRefreshEpochRef.current += 1
|
||||
setHasStoredCodexOAuthCredentials(false)
|
||||
setStoredCodexOAuthProfileId(undefined)
|
||||
return
|
||||
}
|
||||
|
||||
const refreshEpoch = ++codexRefreshEpochRef.current
|
||||
void (async () => {
|
||||
const credentials = await readCodexCredentialsAsync()
|
||||
if (refreshEpoch !== codexRefreshEpochRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
setHasStoredCodexOAuthCredentials(
|
||||
Boolean(
|
||||
credentials?.apiKey ||
|
||||
credentials?.accessToken ||
|
||||
credentials?.refreshToken ||
|
||||
credentials?.idToken,
|
||||
),
|
||||
)
|
||||
setStoredCodexOAuthProfileId(credentials?.profileId)
|
||||
})()
|
||||
}, [])
|
||||
|
||||
React.useEffect(() => {
|
||||
refreshGithubProviderState()
|
||||
refreshCodexOAuthCredentialState()
|
||||
|
||||
return () => {
|
||||
githubRefreshEpochRef.current += 1
|
||||
codexRefreshEpochRef.current += 1
|
||||
}
|
||||
}, [refreshGithubProviderState])
|
||||
}, [refreshCodexOAuthCredentialState, refreshGithubProviderState])
|
||||
|
||||
React.useEffect(() => {
|
||||
if (screen !== 'select-ollama-model') {
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
|
||||
void (async () => {
|
||||
const available = await hasLocalOllama(draft.baseUrl)
|
||||
if (!available) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Could not reach Ollama. Start Ollama first, or enter the endpoint manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels(draft.baseUrl)
|
||||
if (models.length === 0) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const ranked = rankOllamaModels(models, 'balanced')
|
||||
const recommended = recommendOllamaModel(models, 'balanced')
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'ready',
|
||||
defaultValue: recommended?.name ?? ranked[0]?.name,
|
||||
options: ranked.map(model => ({
|
||||
label: model.name,
|
||||
value: model.name,
|
||||
description: model.summary,
|
||||
})),
|
||||
})
|
||||
}
|
||||
})()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [draft.baseUrl, screen])
|
||||
|
||||
function refreshProfiles(): void {
|
||||
const nextProfiles = getProviderProfiles()
|
||||
setProfiles(nextProfiles)
|
||||
setActiveProfileId(getActiveProviderProfile()?.id)
|
||||
refreshGithubProviderState()
|
||||
refreshCodexOAuthCredentialState()
|
||||
}
|
||||
|
||||
function clearStartupProviderOverrideFromUserSettings(): string | null {
|
||||
@@ -292,6 +506,123 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return error ? error.message : null
|
||||
}
|
||||
|
||||
function buildCodexOAuthActivationMessage(options: {
|
||||
prefix: string
|
||||
activationWarning: string | null
|
||||
warnings: string[]
|
||||
}): string {
|
||||
if (options.activationWarning) {
|
||||
return `${options.prefix}. Saved for next startup. Warning: ${options.warnings.join('; ')}.`
|
||||
}
|
||||
|
||||
if (options.warnings.length > 0) {
|
||||
return `${options.prefix}. OpenClaude switched to it for this session with warnings: ${options.warnings.join('; ')}.`
|
||||
}
|
||||
|
||||
return `${options.prefix}. OpenClaude switched to it for this session.`
|
||||
}
|
||||
|
||||
async function activateCodexOAuthSession(tokens?: {
|
||||
accessToken: string
|
||||
refreshToken?: string
|
||||
accountId?: string
|
||||
idToken?: string
|
||||
}): Promise<string | null> {
|
||||
const oauthEnv = buildCodexOAuthProfileEnv({
|
||||
accessToken: tokens?.accessToken ?? '',
|
||||
accountId: tokens?.accountId,
|
||||
idToken: tokens?.idToken,
|
||||
})
|
||||
|
||||
if (oauthEnv) {
|
||||
return applySavedProfileToCurrentSession({
|
||||
profileFile: createProfileFile('codex', oauthEnv),
|
||||
})
|
||||
}
|
||||
|
||||
const storedCredentials = await readCodexCredentialsAsync()
|
||||
if (!storedCredentials) {
|
||||
return 'stored Codex OAuth credentials could not be loaded'
|
||||
}
|
||||
|
||||
const storedEnv = buildCodexOAuthProfileEnv({
|
||||
accessToken: storedCredentials.accessToken,
|
||||
accountId: storedCredentials.accountId,
|
||||
idToken: storedCredentials.idToken,
|
||||
})
|
||||
if (!storedEnv) {
|
||||
return 'stored Codex OAuth credentials are missing a ChatGPT account id'
|
||||
}
|
||||
|
||||
return applySavedProfileToCurrentSession({
|
||||
profileFile: createProfileFile('codex', storedEnv),
|
||||
})
|
||||
}
|
||||
|
||||
async function activateSelectedProvider(profileId: string): Promise<void> {
|
||||
let providerLabel = 'provider'
|
||||
|
||||
try {
|
||||
if (profileId === GITHUB_PROVIDER_ID) {
|
||||
providerLabel = GITHUB_PROVIDER_LABEL
|
||||
const githubError = activateGithubProvider()
|
||||
if (githubError) {
|
||||
setErrorMessage(`Could not activate GitHub provider: ${githubError}`)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
refreshProfiles()
|
||||
setStatusMessage(`Active provider: ${GITHUB_PROVIDER_LABEL}`)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
const active = setActiveProviderProfile(profileId)
|
||||
if (!active) {
|
||||
setErrorMessage('Could not change active provider.')
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
providerLabel = active.name
|
||||
const settingsOverrideError =
|
||||
clearStartupProviderOverrideFromUserSettings()
|
||||
const isActiveCodexOAuth = isCodexOAuthProfile(
|
||||
active,
|
||||
storedCodexOAuthProfileId,
|
||||
)
|
||||
const activationWarning = isActiveCodexOAuth
|
||||
? await activateCodexOAuthSession()
|
||||
: null
|
||||
|
||||
refreshProfiles()
|
||||
setStatusMessage(
|
||||
isActiveCodexOAuth
|
||||
? buildCodexOAuthActivationMessage({
|
||||
prefix: `Active provider: ${active.name}`,
|
||||
activationWarning,
|
||||
warnings: [
|
||||
activationWarning,
|
||||
settingsOverrideError
|
||||
? `could not clear startup provider override (${settingsOverrideError})`
|
||||
: null,
|
||||
].filter((warning): warning is string => Boolean(warning)),
|
||||
})
|
||||
: settingsOverrideError
|
||||
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: `Active provider: ${active.name}`,
|
||||
)
|
||||
setScreen('menu')
|
||||
} catch (error) {
|
||||
refreshProfiles()
|
||||
setStatusMessage(undefined)
|
||||
const detail = error instanceof Error ? error.message : String(error)
|
||||
setErrorMessage(`Could not finish activating ${providerLabel}: ${detail}`)
|
||||
setScreen('menu')
|
||||
}
|
||||
}
|
||||
|
||||
function closeWithCancelled(message: string): void {
|
||||
onDone({ action: 'cancelled', message })
|
||||
}
|
||||
@@ -383,59 +714,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return null
|
||||
}
|
||||
|
||||
React.useEffect(() => {
|
||||
if (screen !== 'select-ollama-model') {
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
setOllamaSelection({ state: 'loading' })
|
||||
|
||||
void (async () => {
|
||||
const available = await hasLocalOllama(draft.baseUrl)
|
||||
if (!available) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Could not reach Ollama. Start Ollama first, or enter the endpoint manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const models = await listOllamaModels(draft.baseUrl)
|
||||
if (models.length === 0) {
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'unavailable',
|
||||
message:
|
||||
'Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first, or enter details manually.',
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const ranked = rankOllamaModels(models, 'balanced')
|
||||
const recommended = recommendOllamaModel(models, 'balanced')
|
||||
if (!cancelled) {
|
||||
setOllamaSelection({
|
||||
state: 'ready',
|
||||
defaultValue: recommended?.name ?? ranked[0]?.name,
|
||||
options: ranked.map(model => ({
|
||||
label: model.name,
|
||||
value: model.name,
|
||||
description: model.summary,
|
||||
})),
|
||||
})
|
||||
}
|
||||
})()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [draft.baseUrl, screen])
|
||||
|
||||
function startCreateFromPreset(preset: ProviderPreset): void {
|
||||
const defaults = getProviderPresetDefaults(preset)
|
||||
const nextDraft = {
|
||||
@@ -557,7 +835,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
description: 'Choose another provider preset',
|
||||
},
|
||||
]}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
if (value === 'manual') {
|
||||
setFormStepIndex(0)
|
||||
setCursorOffset(draft.name.length)
|
||||
@@ -588,7 +866,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
defaultFocusValue={ollamaSelection.defaultValue}
|
||||
inlineDescriptions
|
||||
visibleOptionCount={Math.min(8, ollamaSelection.options.length)}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
const nextDraft = {
|
||||
...draft,
|
||||
model: value,
|
||||
@@ -654,6 +932,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
})
|
||||
|
||||
function renderPresetSelection(): React.ReactNode {
|
||||
const canUseCodexOAuth = !isBareMode()
|
||||
const options = [
|
||||
{
|
||||
value: 'anthropic',
|
||||
@@ -670,6 +949,16 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
label: 'OpenAI',
|
||||
description: 'OpenAI API with API key',
|
||||
},
|
||||
...(canUseCodexOAuth
|
||||
? [
|
||||
{
|
||||
value: 'codex-oauth',
|
||||
label: 'Codex OAuth',
|
||||
description:
|
||||
'Sign in with ChatGPT in your browser and store Codex credentials securely',
|
||||
},
|
||||
]
|
||||
: []),
|
||||
{
|
||||
value: 'moonshotai',
|
||||
label: 'Moonshot AI',
|
||||
@@ -741,11 +1030,15 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
</Text>
|
||||
<Select
|
||||
options={options}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
if (value === 'skip') {
|
||||
closeWithCancelled('Provider setup skipped')
|
||||
return
|
||||
}
|
||||
if (value === 'codex-oauth') {
|
||||
setScreen('codex-oauth')
|
||||
return
|
||||
}
|
||||
startCreateFromPreset(value as ProviderPreset)
|
||||
}}
|
||||
onCancel={() => {
|
||||
@@ -755,7 +1048,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
}
|
||||
setScreen('menu')
|
||||
}}
|
||||
visibleOptionCount={Math.min(12, options.length)}
|
||||
visibleOptionCount={Math.min(13, options.length)}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
@@ -832,6 +1125,15 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
description: 'Remove a provider profile',
|
||||
disabled: !hasSelectableProviders,
|
||||
},
|
||||
...(hasStoredCodexOAuthCredentials
|
||||
? [
|
||||
{
|
||||
value: 'logout-codex-oauth',
|
||||
label: 'Log out Codex OAuth',
|
||||
description: 'Clear securely stored Codex OAuth credentials',
|
||||
},
|
||||
]
|
||||
: []),
|
||||
{
|
||||
value: 'done',
|
||||
label: 'Done',
|
||||
@@ -876,7 +1178,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
</Box>
|
||||
<Select
|
||||
options={options}
|
||||
onChange={value => {
|
||||
onChange={(value: string) => {
|
||||
setErrorMessage(undefined)
|
||||
switch (value) {
|
||||
case 'add':
|
||||
@@ -897,6 +1199,47 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('select-delete')
|
||||
}
|
||||
break
|
||||
case 'logout-codex-oauth': {
|
||||
const cleared = clearCodexCredentials()
|
||||
if (!cleared.success) {
|
||||
setErrorMessage(
|
||||
cleared.warning ??
|
||||
'Could not clear Codex OAuth credentials.',
|
||||
)
|
||||
break
|
||||
}
|
||||
|
||||
setHasStoredCodexOAuthCredentials(false)
|
||||
setStoredCodexOAuthProfileId(undefined)
|
||||
const codexProfile = findCodexOAuthProfile(
|
||||
getProviderProfiles(),
|
||||
storedCodexOAuthProfileId,
|
||||
)
|
||||
let settingsOverrideError: string | null = null
|
||||
if (codexProfile) {
|
||||
const result = deleteProviderProfile(codexProfile.id)
|
||||
if (!result.removed) {
|
||||
setErrorMessage(
|
||||
'Codex OAuth credentials were cleared, but the Codex profile could not be removed.',
|
||||
)
|
||||
refreshProfiles()
|
||||
break
|
||||
}
|
||||
|
||||
clearPersistedCodexOAuthProfile()
|
||||
settingsOverrideError = result.activeProfileId
|
||||
? clearStartupProviderOverrideFromUserSettings()
|
||||
: null
|
||||
}
|
||||
|
||||
refreshProfiles()
|
||||
setStatusMessage(
|
||||
settingsOverrideError
|
||||
? `Codex OAuth logged out. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: 'Codex OAuth logged out.',
|
||||
)
|
||||
break
|
||||
}
|
||||
default:
|
||||
closeWithCancelled('Provider manager closed')
|
||||
break
|
||||
@@ -975,51 +1318,100 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
|
||||
let content: React.ReactNode
|
||||
|
||||
switch (screen) {
|
||||
case 'select-preset':
|
||||
content = renderPresetSelection()
|
||||
break
|
||||
case 'select-ollama-model':
|
||||
content = renderOllamaSelection()
|
||||
break
|
||||
case 'form':
|
||||
content = renderForm()
|
||||
break
|
||||
switch (screen) {
|
||||
case 'select-preset':
|
||||
content = renderPresetSelection()
|
||||
break
|
||||
case 'select-ollama-model':
|
||||
content = renderOllamaSelection()
|
||||
break
|
||||
case 'codex-oauth':
|
||||
content = (
|
||||
<CodexOAuthSetup
|
||||
onBack={() => setScreen('select-preset')}
|
||||
onConfigured={async (tokens, persistCredentials) => {
|
||||
const payload: ProviderProfileInput = {
|
||||
provider: 'openai',
|
||||
name: CODEX_OAUTH_PROVIDER_NAME,
|
||||
baseUrl: DEFAULT_CODEX_BASE_URL,
|
||||
model: CODEX_OAUTH_PROVIDER_MODEL,
|
||||
apiKey: '',
|
||||
}
|
||||
|
||||
const existing = findCodexOAuthProfile(
|
||||
getProviderProfiles(),
|
||||
storedCodexOAuthProfileId,
|
||||
)
|
||||
const saved = existing
|
||||
? updateProviderProfile(existing.id, payload)
|
||||
: addProviderProfile(payload, { makeActive: true })
|
||||
|
||||
if (!saved) {
|
||||
setErrorMessage(
|
||||
'Codex OAuth login finished, but the provider profile could not be saved.',
|
||||
)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
const active =
|
||||
existing && activeProfileId !== saved.id
|
||||
? setActiveProviderProfile(saved.id)
|
||||
: saved
|
||||
if (!active) {
|
||||
setErrorMessage(
|
||||
'Codex OAuth login finished, but the provider could not be set as the startup provider.',
|
||||
)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
persistCredentials({ profileId: saved.id })
|
||||
const settingsOverrideError =
|
||||
clearStartupProviderOverrideFromUserSettings()
|
||||
const activationWarning = await activateCodexOAuthSession(tokens)
|
||||
setHasStoredCodexOAuthCredentials(true)
|
||||
setStoredCodexOAuthProfileId(saved.id)
|
||||
refreshProfiles()
|
||||
const warnings = [
|
||||
activationWarning,
|
||||
settingsOverrideError
|
||||
? `could not clear startup provider override (${settingsOverrideError})`
|
||||
: null,
|
||||
].filter((warning): warning is string => Boolean(warning))
|
||||
const message = buildCodexOAuthActivationMessage({
|
||||
prefix: 'Codex OAuth configured',
|
||||
activationWarning,
|
||||
warnings,
|
||||
})
|
||||
|
||||
if (mode === 'first-run') {
|
||||
onDone({
|
||||
action: 'saved',
|
||||
activeProfileId: active.id,
|
||||
message,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
setStatusMessage(message)
|
||||
setErrorMessage(undefined)
|
||||
setScreen('menu')
|
||||
}}
|
||||
/>
|
||||
)
|
||||
break
|
||||
case 'form':
|
||||
content = renderForm()
|
||||
break
|
||||
case 'select-active':
|
||||
content = renderProfileSelection(
|
||||
'Set active provider',
|
||||
'No providers available. Add one first.',
|
||||
profileId => {
|
||||
if (profileId === GITHUB_PROVIDER_ID) {
|
||||
const githubError = activateGithubProvider()
|
||||
if (githubError) {
|
||||
setErrorMessage(`Could not activate GitHub provider: ${githubError}`)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
refreshProfiles()
|
||||
setStatusMessage(`Active provider: ${GITHUB_PROVIDER_LABEL}`)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
const active = setActiveProviderProfile(profileId)
|
||||
if (!active) {
|
||||
setErrorMessage('Could not change active provider.')
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
const settingsOverrideError =
|
||||
clearStartupProviderOverrideFromUserSettings()
|
||||
refreshProfiles()
|
||||
setStatusMessage(
|
||||
settingsOverrideError
|
||||
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: `Active provider: ${active.name}`,
|
||||
)
|
||||
setScreen('menu')
|
||||
void activateSelectedProvider(profileId)
|
||||
},
|
||||
{ includeGithub: true },
|
||||
{ includeGithub: true },
|
||||
)
|
||||
break
|
||||
case 'select-edit':
|
||||
@@ -1048,10 +1440,27 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return
|
||||
}
|
||||
|
||||
const deletedCodexOAuthProfile =
|
||||
findCodexOAuthProfile(
|
||||
profiles,
|
||||
storedCodexOAuthProfileId,
|
||||
)?.id === profileId
|
||||
const result = deleteProviderProfile(profileId)
|
||||
if (!result.removed) {
|
||||
setErrorMessage('Could not delete provider.')
|
||||
} else {
|
||||
if (deletedCodexOAuthProfile) {
|
||||
const cleared = clearCodexCredentials()
|
||||
if (!cleared.success) {
|
||||
setErrorMessage(
|
||||
cleared.warning ??
|
||||
'Provider deleted, but Codex OAuth credentials could not be cleared.',
|
||||
)
|
||||
} else {
|
||||
setStoredCodexOAuthProfileId(undefined)
|
||||
}
|
||||
clearPersistedCodexOAuthProfile()
|
||||
}
|
||||
const settingsOverrideError = result.activeProfileId
|
||||
? clearStartupProviderOverrideFromUserSettings()
|
||||
: null
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* Addresses: https://github.com/Gitlawb/openclaude/issues/55
|
||||
*/
|
||||
|
||||
import { isLocalProviderUrl } from '../services/api/providerConfig.js'
|
||||
import { isLocalProviderUrl, resolveProviderRequest } from '../services/api/providerConfig.js'
|
||||
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
|
||||
import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
|
||||
import { parseUserSpecifiedModel } from '../utils/model/model.js'
|
||||
@@ -110,10 +110,17 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
|
||||
if (useOpenAI) {
|
||||
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
|
||||
const resolvedRequest = resolveProviderRequest({
|
||||
model: rawModel,
|
||||
baseUrl: process.env.OPENAI_BASE_URL,
|
||||
})
|
||||
const baseUrl = resolvedRequest.baseUrl
|
||||
const isLocal = isLocalProviderUrl(baseUrl)
|
||||
let name = 'OpenAI'
|
||||
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
|
||||
// Override to Codex when resolved endpoint is Codex
|
||||
if (resolvedRequest.transport === 'codex_responses' || baseUrl.includes('chatgpt.com/backend-api/codex')) {
|
||||
name = 'Codex'
|
||||
} else if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
|
||||
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
|
||||
else if (/together/i.test(baseUrl)) name = 'Together AI'
|
||||
else if (/groq/i.test(baseUrl)) name = 'Groq'
|
||||
@@ -123,26 +130,9 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
|
||||
// Resolve model alias to actual model name + reasoning effort
|
||||
let displayModel = rawModel
|
||||
const codexAliases: Record<string, { model: string; reasoningEffort?: string }> = {
|
||||
codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' },
|
||||
'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' },
|
||||
'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' },
|
||||
'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' },
|
||||
codexspark: { model: 'gpt-5.3-codex-spark' },
|
||||
'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' },
|
||||
'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' },
|
||||
'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' },
|
||||
'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' },
|
||||
'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' },
|
||||
}
|
||||
const alias = rawModel.toLowerCase()
|
||||
if (alias in codexAliases) {
|
||||
const resolved = codexAliases[alias]
|
||||
displayModel = resolved.model
|
||||
if (resolved.reasoningEffort) {
|
||||
displayModel = `${displayModel} (${resolved.reasoningEffort})`
|
||||
}
|
||||
let displayModel = resolvedRequest.resolvedModel
|
||||
if (resolvedRequest.reasoning?.effort) {
|
||||
displayModel = `${displayModel} (${resolvedRequest.reasoning.effort})`
|
||||
}
|
||||
|
||||
return { name, model: displayModel, baseUrl, isLocal }
|
||||
|
||||
@@ -0,0 +1,173 @@
|
||||
import React from 'react'
|
||||
import { getOriginalCwd } from '../../../bootstrap/state.js'
|
||||
import { Box, Text } from '../../../ink.js'
|
||||
import { sanitizeToolNameForAnalytics } from '../../../services/analytics/metadata.js'
|
||||
import { env } from '../../../utils/env.js'
|
||||
import { shouldShowAlwaysAllowOptions } from '../../../utils/permissions/permissionsLoader.js'
|
||||
import { usePermissionRequestLogging } from '../hooks.js'
|
||||
import { PermissionDialog } from '../PermissionDialog.js'
|
||||
import {
|
||||
PermissionPrompt,
|
||||
type PermissionPromptOption,
|
||||
} from '../PermissionPrompt.js'
|
||||
import type { PermissionRequestProps } from '../PermissionRequest.js'
|
||||
import { PermissionRuleExplanation } from '../PermissionRuleExplanation.js'
|
||||
import { logUnaryPermissionEvent } from '../utils.js'
|
||||
|
||||
type OptionValue = 'yes' | 'yes-dont-ask-again' | 'no'
|
||||
|
||||
export function MonitorPermissionRequest({
|
||||
toolUseConfirm,
|
||||
onDone,
|
||||
onReject,
|
||||
workerBadge,
|
||||
}: PermissionRequestProps) {
|
||||
const { command, description } = toolUseConfirm.input as {
|
||||
command?: string
|
||||
description?: string
|
||||
}
|
||||
|
||||
usePermissionRequestLogging(toolUseConfirm, {
|
||||
completion_type: 'tool_use_single',
|
||||
language_name: 'none',
|
||||
})
|
||||
|
||||
const handleSelect = (
|
||||
value: OptionValue,
|
||||
feedback?: string,
|
||||
) => {
|
||||
switch (value) {
|
||||
case 'yes': {
|
||||
logUnaryPermissionEvent({
|
||||
completion_type: 'tool_use_single',
|
||||
event: 'accept',
|
||||
metadata: {
|
||||
language_name: 'none',
|
||||
message_id: toolUseConfirm.assistantMessage.message.id,
|
||||
platform: env.platform,
|
||||
},
|
||||
})
|
||||
toolUseConfirm.onAllow(toolUseConfirm.input, [], feedback)
|
||||
onDone()
|
||||
break
|
||||
}
|
||||
case 'yes-dont-ask-again': {
|
||||
logUnaryPermissionEvent({
|
||||
completion_type: 'tool_use_single',
|
||||
event: 'accept',
|
||||
metadata: {
|
||||
language_name: 'none',
|
||||
message_id: toolUseConfirm.assistantMessage.message.id,
|
||||
platform: env.platform,
|
||||
},
|
||||
})
|
||||
// Save the rule under 'Bash' toolName because checkPermissions
|
||||
// delegates to bashToolHasPermission which matches rules against
|
||||
// BashTool. Using 'Monitor' here would create a rule that's never
|
||||
// checked. Command-specific prefix (like BashTool's shellRuleMatching).
|
||||
const cmdForRule = command?.trim() || ''
|
||||
const prefix = cmdForRule.split(/\s+/).slice(0, 2).join(' ')
|
||||
toolUseConfirm.onAllow(toolUseConfirm.input, prefix ? [
|
||||
{
|
||||
type: 'addRules',
|
||||
rules: [{ toolName: 'Bash', ruleContent: `${prefix}:*` }],
|
||||
behavior: 'allow',
|
||||
destination: 'localSettings',
|
||||
},
|
||||
] : [])
|
||||
onDone()
|
||||
break
|
||||
}
|
||||
case 'no': {
|
||||
logUnaryPermissionEvent({
|
||||
completion_type: 'tool_use_single',
|
||||
event: 'reject',
|
||||
metadata: {
|
||||
language_name: 'none',
|
||||
message_id: toolUseConfirm.assistantMessage.message.id,
|
||||
platform: env.platform,
|
||||
},
|
||||
})
|
||||
toolUseConfirm.onReject(feedback)
|
||||
onReject()
|
||||
onDone()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const handleCancel = () => {
|
||||
logUnaryPermissionEvent({
|
||||
completion_type: 'tool_use_single',
|
||||
event: 'reject',
|
||||
metadata: {
|
||||
language_name: 'none',
|
||||
message_id: toolUseConfirm.assistantMessage.message.id,
|
||||
platform: env.platform,
|
||||
},
|
||||
})
|
||||
toolUseConfirm.onReject()
|
||||
onReject()
|
||||
onDone()
|
||||
}
|
||||
|
||||
const showAlwaysAllow = shouldShowAlwaysAllowOptions()
|
||||
const originalCwd = getOriginalCwd()
|
||||
|
||||
const options: PermissionPromptOption<OptionValue>[] = [
|
||||
{
|
||||
label: 'Yes',
|
||||
value: 'yes',
|
||||
feedbackConfig: { type: 'accept' },
|
||||
},
|
||||
]
|
||||
|
||||
if (showAlwaysAllow) {
|
||||
options.push({
|
||||
label: (
|
||||
<Text>
|
||||
Yes, and don't ask again for{' '}
|
||||
<Text bold>Monitor</Text> commands in{' '}
|
||||
<Text bold>{originalCwd}</Text>
|
||||
</Text>
|
||||
),
|
||||
value: 'yes-dont-ask-again',
|
||||
})
|
||||
}
|
||||
|
||||
options.push({
|
||||
label: 'No',
|
||||
value: 'no',
|
||||
feedbackConfig: { type: 'reject' },
|
||||
})
|
||||
|
||||
const toolAnalyticsContext = {
|
||||
toolName: sanitizeToolNameForAnalytics(toolUseConfirm.tool.name),
|
||||
isMcp: toolUseConfirm.tool.isMcp ?? false,
|
||||
}
|
||||
|
||||
return (
|
||||
<PermissionDialog title="Monitor" workerBadge={workerBadge}>
|
||||
<Box flexDirection="column" paddingX={2} paddingY={1}>
|
||||
<Text>
|
||||
Monitor({command ?? ''})
|
||||
</Text>
|
||||
{description ? (
|
||||
<Text dimColor>{description}</Text>
|
||||
) : null}
|
||||
</Box>
|
||||
<Box flexDirection="column">
|
||||
<PermissionRuleExplanation
|
||||
permissionResult={toolUseConfirm.permissionResult}
|
||||
toolType="tool"
|
||||
/>
|
||||
<PermissionPrompt
|
||||
options={options}
|
||||
onSelect={handleSelect}
|
||||
onCancel={handleCancel}
|
||||
toolAnalyticsContext={toolAnalyticsContext}
|
||||
/>
|
||||
</Box>
|
||||
</PermissionDialog>
|
||||
)
|
||||
}
|
||||
220
src/components/useCodexOAuthFlow.test.tsx
Normal file
220
src/components/useCodexOAuthFlow.test.tsx
Normal file
@@ -0,0 +1,220 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
|
||||
import { createRoot, Text } from '../ink.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
options?: { timeoutMs?: number; intervalMs?: number },
|
||||
): Promise<void> {
|
||||
const timeoutMs = options?.timeoutMs ?? 5000
|
||||
const intervalMs = options?.intervalMs ?? 10
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
await Bun.sleep(intervalMs)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for useCodexOAuthFlow test condition')
|
||||
}
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) break
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) break
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
const TOKENS = {
|
||||
accessToken: 'oauth-access-token',
|
||||
refreshToken: 'oauth-refresh-token',
|
||||
accountId: 'acct_oauth',
|
||||
idToken: 'oauth-id-token',
|
||||
apiKey: 'oauth-api-key',
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
test('does not persist credentials when downstream setup rejects', async () => {
|
||||
const saveCodexCredentials = mock(() => ({ success: true }))
|
||||
const cleanup = mock(() => {})
|
||||
const onAuthenticated = mock(async () => {
|
||||
throw new Error('profile save failed')
|
||||
})
|
||||
const deps = {
|
||||
createOAuthService: () => ({
|
||||
async startOAuthFlow(
|
||||
onAuthorizationUrl: (authUrl: string) => void | Promise<void>,
|
||||
) {
|
||||
await onAuthorizationUrl('https://chatgpt.com/codex')
|
||||
return TOKENS
|
||||
},
|
||||
cleanup,
|
||||
}),
|
||||
openBrowser: async () => true,
|
||||
saveCodexCredentials,
|
||||
isBareMode: () => false,
|
||||
}
|
||||
|
||||
const { useCodexOAuthFlow } = await import(
|
||||
`./useCodexOAuthFlow.js?real-reject-${Date.now()}-${Math.random()}`
|
||||
)
|
||||
|
||||
function Harness(): React.ReactNode {
|
||||
const handleAuthenticated = React.useCallback(onAuthenticated, [onAuthenticated])
|
||||
const status = useCodexOAuthFlow({
|
||||
onAuthenticated: handleAuthenticated,
|
||||
deps,
|
||||
})
|
||||
|
||||
return <Text>{status.state === 'error' ? status.message : status.state}</Text>
|
||||
}
|
||||
|
||||
const streams = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: streams.stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: streams.stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
root.render(<Harness />)
|
||||
|
||||
try {
|
||||
await waitForCondition(() => onAuthenticated.mock.calls.length === 1)
|
||||
await Bun.sleep(0)
|
||||
await Bun.sleep(0)
|
||||
expect(onAuthenticated).toHaveBeenCalled()
|
||||
expect(saveCodexCredentials).not.toHaveBeenCalled()
|
||||
} finally {
|
||||
root.unmount()
|
||||
streams.stdin.end()
|
||||
streams.stdout.end()
|
||||
await Bun.sleep(0)
|
||||
}
|
||||
})
|
||||
|
||||
test('persists credentials with profile linkage after downstream setup succeeds', async () => {
|
||||
const saveCodexCredentials = mock(() => ({ success: true }))
|
||||
const onAuthenticated = mock(
|
||||
async (
|
||||
_tokens: typeof TOKENS,
|
||||
persistCredentials: (options?: { profileId?: string }) => void,
|
||||
) => {
|
||||
persistCredentials({ profileId: 'profile_codex_oauth' })
|
||||
},
|
||||
)
|
||||
const cleanup = mock(() => {})
|
||||
const deps = {
|
||||
createOAuthService: () => ({
|
||||
async startOAuthFlow(
|
||||
onAuthorizationUrl: (authUrl: string) => void | Promise<void>,
|
||||
) {
|
||||
await onAuthorizationUrl('https://chatgpt.com/codex')
|
||||
return TOKENS
|
||||
},
|
||||
cleanup,
|
||||
}),
|
||||
openBrowser: async () => true,
|
||||
saveCodexCredentials,
|
||||
isBareMode: () => false,
|
||||
}
|
||||
|
||||
const { useCodexOAuthFlow } = await import(
|
||||
`./useCodexOAuthFlow.js?real-persist-${Date.now()}-${Math.random()}`
|
||||
)
|
||||
|
||||
function Harness(): React.ReactNode {
|
||||
const handleAuthenticated = React.useCallback(onAuthenticated, [onAuthenticated])
|
||||
useCodexOAuthFlow({
|
||||
onAuthenticated: handleAuthenticated,
|
||||
deps,
|
||||
})
|
||||
return <Text>waiting</Text>
|
||||
}
|
||||
|
||||
const streams = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: streams.stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: streams.stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
root.render(<Harness />)
|
||||
|
||||
try {
|
||||
await waitForCondition(() => onAuthenticated.mock.calls.length === 1)
|
||||
await waitForCondition(() => saveCodexCredentials.mock.calls.length === 1)
|
||||
expect(onAuthenticated).toHaveBeenCalled()
|
||||
expect(saveCodexCredentials).toHaveBeenCalledWith({
|
||||
apiKey: TOKENS.apiKey,
|
||||
accessToken: TOKENS.accessToken,
|
||||
refreshToken: TOKENS.refreshToken,
|
||||
idToken: TOKENS.idToken,
|
||||
accountId: TOKENS.accountId,
|
||||
profileId: 'profile_codex_oauth',
|
||||
})
|
||||
} finally {
|
||||
root.unmount()
|
||||
streams.stdin.end()
|
||||
streams.stdout.end()
|
||||
await Bun.sleep(0)
|
||||
}
|
||||
})
|
||||
134
src/components/useCodexOAuthFlow.ts
Normal file
134
src/components/useCodexOAuthFlow.ts
Normal file
@@ -0,0 +1,134 @@
|
||||
import * as React from 'react'
|
||||
|
||||
import {
|
||||
CodexOAuthService,
|
||||
type CodexOAuthTokens,
|
||||
} from '../services/api/codexOAuth.js'
|
||||
import { openBrowser } from '../utils/browser.js'
|
||||
import { saveCodexCredentials } from '../utils/codexCredentials.js'
|
||||
import { isBareMode } from '../utils/envUtils.js'
|
||||
|
||||
export type CodexOAuthFlowStatus =
|
||||
| { state: 'starting' }
|
||||
| {
|
||||
state: 'waiting'
|
||||
authUrl: string
|
||||
browserOpened: boolean | null
|
||||
}
|
||||
| {
|
||||
state: 'error'
|
||||
message: string
|
||||
}
|
||||
|
||||
type PersistCodexOAuthCredentials = (options?: {
|
||||
profileId?: string
|
||||
}) => void
|
||||
|
||||
type CodexOAuthFlowDependencies = {
|
||||
createOAuthService?: () => Pick<
|
||||
CodexOAuthService,
|
||||
'startOAuthFlow' | 'cleanup'
|
||||
>
|
||||
openBrowser?: typeof openBrowser
|
||||
saveCodexCredentials?: typeof saveCodexCredentials
|
||||
isBareMode?: typeof isBareMode
|
||||
}
|
||||
|
||||
function createDefaultOAuthService(): Pick<
|
||||
CodexOAuthService,
|
||||
'startOAuthFlow' | 'cleanup'
|
||||
> {
|
||||
return new CodexOAuthService()
|
||||
}
|
||||
|
||||
export function useCodexOAuthFlow(options: {
|
||||
onAuthenticated: (
|
||||
tokens: CodexOAuthTokens,
|
||||
persistCredentials: PersistCodexOAuthCredentials,
|
||||
) => void | Promise<void>
|
||||
deps?: CodexOAuthFlowDependencies
|
||||
}): CodexOAuthFlowStatus {
|
||||
const { onAuthenticated } = options
|
||||
const createOAuthService =
|
||||
options.deps?.createOAuthService ?? createDefaultOAuthService
|
||||
const openBrowserFn = options.deps?.openBrowser ?? openBrowser
|
||||
const saveCredentials =
|
||||
options.deps?.saveCodexCredentials ?? saveCodexCredentials
|
||||
const isBareModeFn = options.deps?.isBareMode ?? isBareMode
|
||||
const [status, setStatus] = React.useState<CodexOAuthFlowStatus>({
|
||||
state: 'starting',
|
||||
})
|
||||
|
||||
React.useEffect(() => {
|
||||
if (isBareModeFn()) {
|
||||
setStatus({
|
||||
state: 'error',
|
||||
message:
|
||||
'Codex OAuth is unavailable in --bare because secure storage is disabled.',
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
const oauthService = createOAuthService()
|
||||
|
||||
void oauthService
|
||||
.startOAuthFlow(async authUrl => {
|
||||
if (cancelled) return
|
||||
setStatus({
|
||||
state: 'waiting',
|
||||
authUrl,
|
||||
browserOpened: null,
|
||||
})
|
||||
const browserOpened = await openBrowserFn(authUrl)
|
||||
if (cancelled) return
|
||||
setStatus({
|
||||
state: 'waiting',
|
||||
authUrl,
|
||||
browserOpened,
|
||||
})
|
||||
})
|
||||
.then(async tokens => {
|
||||
if (cancelled) return
|
||||
|
||||
const persistCredentials: PersistCodexOAuthCredentials = options => {
|
||||
const saved = saveCredentials({
|
||||
apiKey: tokens.apiKey,
|
||||
accessToken: tokens.accessToken,
|
||||
refreshToken: tokens.refreshToken,
|
||||
idToken: tokens.idToken,
|
||||
accountId: tokens.accountId,
|
||||
profileId: options?.profileId,
|
||||
})
|
||||
if (!saved.success) {
|
||||
throw new Error(
|
||||
saved.warning ??
|
||||
'Codex OAuth succeeded, but credentials could not be saved securely.',
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
await onAuthenticated(tokens, persistCredentials)
|
||||
})
|
||||
.catch(error => {
|
||||
if (cancelled) return
|
||||
setStatus({
|
||||
state: 'error',
|
||||
message: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
})
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
oauthService.cleanup()
|
||||
}
|
||||
}, [
|
||||
createOAuthService,
|
||||
isBareModeFn,
|
||||
onAuthenticated,
|
||||
openBrowserFn,
|
||||
saveCredentials,
|
||||
])
|
||||
|
||||
return status
|
||||
}
|
||||
@@ -37,8 +37,6 @@ export const ALL_AGENT_DISALLOWED_TOOLS = new Set([
|
||||
TASK_OUTPUT_TOOL_NAME,
|
||||
EXIT_PLAN_MODE_V2_TOOL_NAME,
|
||||
ENTER_PLAN_MODE_TOOL_NAME,
|
||||
// Allow Agent tool for agents when user is ant (enables nested agents)
|
||||
...(process.env.USER_TYPE === 'ant' ? [] : [AGENT_TOOL_NAME]),
|
||||
ASK_USER_QUESTION_TOOL_NAME,
|
||||
TASK_STOP_TOOL_NAME,
|
||||
// Prevent recursive workflow execution inside subagents.
|
||||
@@ -82,9 +80,9 @@ export const IN_PROCESS_TEAMMATE_ALLOWED_TOOLS = new Set([
|
||||
SEND_MESSAGE_TOOL_NAME,
|
||||
// Teammate-created crons are tagged with the creating agentId and routed to
|
||||
// that teammate's pendingUserMessages queue (see useScheduledTasks.ts).
|
||||
...(feature('AGENT_TRIGGERS')
|
||||
? [CRON_CREATE_TOOL_NAME, CRON_DELETE_TOOL_NAME, CRON_LIST_TOOL_NAME]
|
||||
: []),
|
||||
CRON_CREATE_TOOL_NAME,
|
||||
CRON_DELETE_TOOL_NAME,
|
||||
CRON_LIST_TOOL_NAME,
|
||||
])
|
||||
|
||||
/*
|
||||
|
||||
18
src/coordinator/workerAgent.ts
Normal file
18
src/coordinator/workerAgent.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import type { BuiltInAgentDefinition } from '../tools/AgentTool/loadAgentsDir.js'
|
||||
import { EXPLORE_AGENT } from '../tools/AgentTool/built-in/exploreAgent.js'
|
||||
import { GENERAL_PURPOSE_AGENT } from '../tools/AgentTool/built-in/generalPurposeAgent.js'
|
||||
import { PLAN_AGENT } from '../tools/AgentTool/built-in/planAgent.js'
|
||||
|
||||
// The coordinator system prompt instructs the model to spawn workers with
|
||||
// subagent_type: "worker". This agent definition matches that type so
|
||||
// AgentTool.tsx can resolve it. It reuses GENERAL_PURPOSE_AGENT's capabilities.
|
||||
const WORKER_AGENT: BuiltInAgentDefinition = {
|
||||
...GENERAL_PURPOSE_AGENT,
|
||||
agentType: 'worker',
|
||||
whenToUse:
|
||||
'Worker agent for coordinator mode. Executes tasks autonomously — research, implementation, or verification.',
|
||||
}
|
||||
|
||||
export function getCoordinatorAgents(): BuiltInAgentDefinition[] {
|
||||
return [WORKER_AGENT, GENERAL_PURPOSE_AGENT, EXPLORE_AGENT, PLAN_AGENT]
|
||||
}
|
||||
123
src/hooks/useApiKeyVerification.test.tsx
Normal file
123
src/hooks/useApiKeyVerification.test.tsx
Normal file
@@ -0,0 +1,123 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import { createRoot, Text } from '../ink.js'
|
||||
|
||||
type AuthState = {
|
||||
anthropicAuthEnabled: boolean
|
||||
claudeSubscriber: boolean
|
||||
key?: string
|
||||
source?: string
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
} {
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
|
||||
return { stdout, stdin }
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
timeoutMs = 2000,
|
||||
): Promise<void> {
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for useApiKeyVerification test state')
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
test('useApiKeyVerification resets stale missing status when the session switches to a third-party provider', async () => {
|
||||
const authState: AuthState = {
|
||||
anthropicAuthEnabled: true,
|
||||
claudeSubscriber: false,
|
||||
}
|
||||
const seenStatuses: string[] = []
|
||||
|
||||
mock.module('../utils/auth.js', () => ({
|
||||
getAnthropicApiKeyWithSource: () => ({
|
||||
key: authState.key,
|
||||
source: authState.source,
|
||||
}),
|
||||
getApiKeyFromApiKeyHelper: async () => undefined,
|
||||
isAnthropicAuthEnabled: () => authState.anthropicAuthEnabled,
|
||||
isClaudeAISubscriber: () => authState.claudeSubscriber,
|
||||
}))
|
||||
|
||||
mock.module('../bootstrap/state.js', () => ({
|
||||
getIsNonInteractiveSession: () => false,
|
||||
}))
|
||||
|
||||
mock.module('../services/api/claude.js', () => ({
|
||||
verifyApiKey: async () => true,
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { useApiKeyVerification } = await import(
|
||||
'./useApiKeyVerification.ts?switch-to-third-party'
|
||||
)
|
||||
|
||||
function Harness(): React.ReactNode {
|
||||
const { status } = useApiKeyVerification()
|
||||
|
||||
React.useEffect(() => {
|
||||
seenStatuses.push(status)
|
||||
}, [status])
|
||||
|
||||
return <Text>{status}</Text>
|
||||
}
|
||||
|
||||
const { stdout, stdin } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(<Harness />)
|
||||
|
||||
await waitForCondition(() => seenStatuses.includes('missing'))
|
||||
|
||||
authState.anthropicAuthEnabled = false
|
||||
root.render(<Harness />)
|
||||
|
||||
await waitForCondition(() => seenStatuses.includes('valid'))
|
||||
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(0)
|
||||
|
||||
expect(seenStatuses[0]).toBe('missing')
|
||||
expect(seenStatuses).toContain('valid')
|
||||
})
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useCallback, useState } from 'react'
|
||||
import { useCallback, useEffect, useState } from 'react'
|
||||
import { getIsNonInteractiveSession } from '../bootstrap/state.js'
|
||||
import { verifyApiKey } from '../services/api/claude.js'
|
||||
import {
|
||||
@@ -21,24 +21,43 @@ export type ApiKeyVerificationResult = {
|
||||
error: Error | null
|
||||
}
|
||||
|
||||
export function useApiKeyVerification(): ApiKeyVerificationResult {
|
||||
const [status, setStatus] = useState<VerificationStatus>(() => {
|
||||
if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) {
|
||||
return 'valid'
|
||||
}
|
||||
// Use skipRetrievingKeyFromApiKeyHelper to avoid executing apiKeyHelper
|
||||
// before trust dialog is shown (security: prevents RCE via settings.json)
|
||||
const { key, source } = getAnthropicApiKeyWithSource({
|
||||
skipRetrievingKeyFromApiKeyHelper: true,
|
||||
})
|
||||
// If apiKeyHelper is configured, we have a key source even though we
|
||||
// haven't executed it yet - return 'loading' to indicate we'll verify later
|
||||
if (key || source === 'apiKeyHelper') {
|
||||
return 'loading'
|
||||
}
|
||||
return 'missing'
|
||||
function getInitialVerificationStatus(): VerificationStatus {
|
||||
if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) {
|
||||
return 'valid'
|
||||
}
|
||||
// Use skipRetrievingKeyFromApiKeyHelper to avoid executing apiKeyHelper
|
||||
// before trust dialog is shown (security: prevents RCE via settings.json)
|
||||
const { key, source } = getAnthropicApiKeyWithSource({
|
||||
skipRetrievingKeyFromApiKeyHelper: true,
|
||||
})
|
||||
// If apiKeyHelper is configured, we have a key source even though we
|
||||
// haven't executed it yet - return 'loading' to indicate we'll verify later
|
||||
if (key || source === 'apiKeyHelper') {
|
||||
return 'loading'
|
||||
}
|
||||
return 'missing'
|
||||
}
|
||||
|
||||
export function useApiKeyVerification(): ApiKeyVerificationResult {
|
||||
const [status, setStatus] = useState<VerificationStatus>(
|
||||
getInitialVerificationStatus,
|
||||
)
|
||||
const [error, setError] = useState<Error | null>(null)
|
||||
const anthropicVerificationEnabled =
|
||||
isAnthropicAuthEnabled() && !isClaudeAISubscriber()
|
||||
|
||||
useEffect(() => {
|
||||
const nextStatus = anthropicVerificationEnabled
|
||||
? getInitialVerificationStatus()
|
||||
: 'valid'
|
||||
|
||||
setStatus(currentStatus =>
|
||||
currentStatus === nextStatus ? currentStatus : nextStatus,
|
||||
)
|
||||
if (nextStatus !== 'error') {
|
||||
setError(null)
|
||||
}
|
||||
}, [anthropicVerificationEnabled])
|
||||
|
||||
const verify = useCallback(async (): Promise<void> => {
|
||||
if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) {
|
||||
|
||||
@@ -434,7 +434,7 @@ export function useReplBridge(messages: Message[], setMessages: (action: React.S
|
||||
if (!store.getState().toolPermissionContext.isBypassPermissionsModeAvailable) {
|
||||
return {
|
||||
ok: false,
|
||||
error: 'Cannot set permission mode to bypassPermissions because the session was not launched with --dangerously-skip-permissions'
|
||||
error: 'Cannot set permission mode to bypassPermissions. Enable it with --allow-dangerously-skip-permissions or set permissions.allowBypassPermissionsMode in settings.json'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -481,16 +481,16 @@ export const CLEAR_TAB_STATUS = osc(
|
||||
)
|
||||
|
||||
/**
|
||||
* Gate for emitting OSC 21337 (tab-status indicator). Ant-only while the
|
||||
* spec is unstable. Terminals that don't recognize it discard silently, so
|
||||
* emission is safe unconditionally — we don't gate on terminal detection
|
||||
* Gate for emitting OSC 21337 (tab-status indicator). Currently disabled
|
||||
* (spec is unstable). Terminals that don't recognize it discard silently,
|
||||
* so emission is safe unconditionally — we don't gate on terminal detection
|
||||
* since support is expected across several terminals.
|
||||
*
|
||||
* Callers must wrap output with wrapForMultiplexer() so tmux/screen
|
||||
* DCS-passthrough carries the sequence to the outer terminal.
|
||||
*/
|
||||
export function supportsTabStatus(): boolean {
|
||||
return process.env.USER_TYPE === 'ant'
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -74,7 +74,7 @@ export function isTeamMemoryEnabled(): boolean {
|
||||
if (!isAutoMemoryEnabled()) {
|
||||
return false
|
||||
}
|
||||
return getFeatureValue_CACHED_MAY_BE_STALE('tengu_herring_clock', false)
|
||||
return getFeatureValue_CACHED_MAY_BE_STALE('tengu_herring_clock', true)
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
113
src/query.ts
113
src/query.ts
@@ -160,6 +160,7 @@ function* yieldMissingToolResultBlocks(
|
||||
* rules, ye will be punished with an entire day of debugging and hair pulling.
|
||||
*/
|
||||
const MAX_OUTPUT_TOKENS_RECOVERY_LIMIT = 3
|
||||
const MAX_CONTINUATION_NUDGES = 3
|
||||
|
||||
/**
|
||||
* Is this a max_output_tokens error message? If so, the streaming loop should
|
||||
@@ -209,6 +210,10 @@ type State = {
|
||||
pendingToolUseSummary: Promise<ToolUseSummaryMessage | null> | undefined
|
||||
stopHookActive: boolean | undefined
|
||||
turnCount: number
|
||||
// Count of consecutive continuation nudges within the current turn.
|
||||
// Capped at MAX_CONTINUATION_NUDGES to prevent infinite nudge loops
|
||||
// when the model keeps matching continuation signals without tool calls.
|
||||
continuationNudgeCount: number
|
||||
// Why the previous iteration continued. Undefined on first iteration.
|
||||
// Lets tests assert recovery paths fired without inspecting message contents.
|
||||
transition: Continue | undefined
|
||||
@@ -272,6 +277,7 @@ async function* queryLoop(
|
||||
maxOutputTokensRecoveryCount: 0,
|
||||
hasAttemptedReactiveCompact: false,
|
||||
turnCount: 1,
|
||||
continuationNudgeCount: 0,
|
||||
pendingToolUseSummary: undefined,
|
||||
transition: undefined,
|
||||
}
|
||||
@@ -645,6 +651,35 @@ async function* queryLoop(
|
||||
}
|
||||
}
|
||||
|
||||
// Safety net: when auto-compact's circuit breaker has tripped (3+
|
||||
// consecutive failures), the normal blocking check above is gated on
|
||||
// reactiveCompact. If reactiveCompact is also enabled but ALSO fails
|
||||
// (or is disabled), the oversized context goes straight to the API and
|
||||
// gets a 500. This check catches that gap — if compaction is exhausted
|
||||
// and context is still over the autocompact threshold, block immediately
|
||||
// with a clear message instead of burning an API call that will 500.
|
||||
if (
|
||||
tracking?.consecutiveFailures !== undefined &&
|
||||
tracking.consecutiveFailures >= 3 &&
|
||||
isAutoCompactEnabled()
|
||||
) {
|
||||
const model = toolUseContext.options.mainLoopModel
|
||||
const tokenUsage = tokenCountWithEstimation(messagesForQuery) - snipTokensFreed
|
||||
const { isAboveAutoCompactThreshold } = calculateTokenWarningState(
|
||||
tokenUsage,
|
||||
model,
|
||||
)
|
||||
if (isAboveAutoCompactThreshold) {
|
||||
yield createAssistantAPIErrorMessage({
|
||||
content:
|
||||
'The conversation has exceeded the context limit and automatic compaction has failed. ' +
|
||||
'Press esc twice to go up a few messages and try again, or start a new session with /new.',
|
||||
error: 'invalid_request',
|
||||
})
|
||||
return { reason: 'blocking_limit' }
|
||||
}
|
||||
}
|
||||
|
||||
let attemptWithFallback = true
|
||||
|
||||
queryCheckpoint('query_api_loop_start')
|
||||
@@ -1102,6 +1137,7 @@ async function* queryLoop(
|
||||
pendingToolUseSummary: undefined,
|
||||
stopHookActive: undefined,
|
||||
turnCount,
|
||||
continuationNudgeCount: state.continuationNudgeCount,
|
||||
transition: {
|
||||
reason: 'collapse_drain_retry',
|
||||
committed: drained.committed,
|
||||
@@ -1155,6 +1191,7 @@ async function* queryLoop(
|
||||
pendingToolUseSummary: undefined,
|
||||
stopHookActive: undefined,
|
||||
turnCount,
|
||||
continuationNudgeCount: state.continuationNudgeCount,
|
||||
transition: { reason: 'reactive_compact_retry' },
|
||||
}
|
||||
state = next
|
||||
@@ -1210,6 +1247,7 @@ async function* queryLoop(
|
||||
pendingToolUseSummary: undefined,
|
||||
stopHookActive: undefined,
|
||||
turnCount,
|
||||
continuationNudgeCount: state.continuationNudgeCount,
|
||||
transition: { reason: 'max_output_tokens_escalate' },
|
||||
}
|
||||
state = next
|
||||
@@ -1238,6 +1276,7 @@ async function* queryLoop(
|
||||
pendingToolUseSummary: undefined,
|
||||
stopHookActive: undefined,
|
||||
turnCount,
|
||||
continuationNudgeCount: state.continuationNudgeCount,
|
||||
transition: {
|
||||
reason: 'max_output_tokens_recovery',
|
||||
attempt: maxOutputTokensRecoveryCount + 1,
|
||||
@@ -1295,6 +1334,7 @@ async function* queryLoop(
|
||||
pendingToolUseSummary: undefined,
|
||||
stopHookActive: true,
|
||||
turnCount,
|
||||
continuationNudgeCount: state.continuationNudgeCount,
|
||||
transition: { reason: 'stop_hook_blocking' },
|
||||
}
|
||||
state = next
|
||||
@@ -1331,6 +1371,7 @@ async function* queryLoop(
|
||||
pendingToolUseSummary: undefined,
|
||||
stopHookActive: undefined,
|
||||
turnCount,
|
||||
continuationNudgeCount: state.continuationNudgeCount,
|
||||
transition: { reason: 'token_budget_continuation' },
|
||||
}
|
||||
continue
|
||||
@@ -1350,6 +1391,77 @@ async function* queryLoop(
|
||||
}
|
||||
}
|
||||
|
||||
// Continuation nudge: detect when the model signals intent to continue
|
||||
// (e.g., "so now I have to do it", "let me now...", "I'll need to...")
|
||||
// but returned no tool calls. This prevents premature task completion.
|
||||
//
|
||||
// Guard: capped at MAX_CONTINUATION_NUDGES to prevent infinite loops
|
||||
// when the model keeps matching signals without ever calling tools.
|
||||
if (
|
||||
assistantMessages.length > 0 &&
|
||||
turnCount < (maxTurns ?? Infinity) &&
|
||||
state.continuationNudgeCount < MAX_CONTINUATION_NUDGES
|
||||
) {
|
||||
const lastAssistant = assistantMessages.at(-1)
|
||||
if (lastAssistant?.type === 'assistant') {
|
||||
const lastText = lastAssistant.message.content
|
||||
.filter((b): b is { type: 'text'; text: string } => b.type === 'text')
|
||||
.map(b => b.text)
|
||||
.join(' ')
|
||||
.toLowerCase()
|
||||
|
||||
// Tightened patterns: require explicit action verbs and exclude
|
||||
// common explanatory phrasing to reduce false positives.
|
||||
const continuationSignals = [
|
||||
// Only match "so now I/let me/we" followed by an action verb
|
||||
/\bso now (i|let me|we) (need to|have to|should|must|will) (do|create|write|edit|update|fix|implement|add|run|check|make|build|set up)\b/,
|
||||
// "now I'll" + action (not "now I'll explain" etc.)
|
||||
/\bnow i('ll| will) (do|create|write|edit|update|fix|implement|add|run|check|make|build|set up|go|proceed)\b/,
|
||||
// "let me" + action (not "let me think/explain/show")
|
||||
/\blet me (go ahead and |now )?(do|create|write|edit|update|fix|implement|add|run|check|make|build|set up|proceed)\b/,
|
||||
// "I'll/I need to/I have to" + action, only if message is short (<80 chars)
|
||||
...(lastText.length < 80
|
||||
? [/\b(i('ll| will| need to| have to| must) (now )?(do|create|write|edit|update|fix|implement|add|run|check|make|build|set up))\b/]
|
||||
: []),
|
||||
// "time to" + action
|
||||
/\btime to (do|create|write|edit|update|fix|implement|add|run|check|make|build|get started|begin)\b/,
|
||||
// "next, I'll/let me" + action, only if message is short
|
||||
...(lastText.length < 80
|
||||
? [/\bnext,?\s+(i('ll| will)|let me|i need to) (do|create|write|edit|update|fix|implement|add|run|check|make|build)\b/]
|
||||
: []),
|
||||
]
|
||||
|
||||
// Don't nudge if the text contains completion markers
|
||||
const completionMarkers = /\b(done|finished|completed|complete|summary|that's all|that is all|all set|hope this helps|let me know if)\b/
|
||||
if (completionMarkers.test(lastText)) {
|
||||
// Model signaled completion — don't nudge
|
||||
} else if (continuationSignals.some(re => re.test(lastText))) {
|
||||
logForDebugging(
|
||||
`Continuation nudge triggered (${state.continuationNudgeCount + 1}/${MAX_CONTINUATION_NUDGES}): model said "${lastText.slice(-120)}" without tool calls`,
|
||||
)
|
||||
const nudge = createUserMessage({
|
||||
content: 'Continue with the task. Use the appropriate tools to proceed.',
|
||||
isMeta: true,
|
||||
})
|
||||
const next: State = {
|
||||
messages: [...messagesForQuery, ...assistantMessages, nudge],
|
||||
toolUseContext,
|
||||
autoCompactTracking: tracking,
|
||||
maxOutputTokensRecoveryCount: 0,
|
||||
hasAttemptedReactiveCompact: false,
|
||||
maxOutputTokensOverride: undefined,
|
||||
pendingToolUseSummary: undefined,
|
||||
stopHookActive: undefined,
|
||||
turnCount,
|
||||
continuationNudgeCount: state.continuationNudgeCount + 1,
|
||||
transition: { reason: 'continuation_nudge' },
|
||||
}
|
||||
state = next
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { reason: 'completed' }
|
||||
}
|
||||
|
||||
@@ -1715,6 +1827,7 @@ async function* queryLoop(
|
||||
turnCount: nextTurnCount,
|
||||
maxOutputTokensRecoveryCount: 0,
|
||||
hasAttemptedReactiveCompact: false,
|
||||
continuationNudgeCount: 0,
|
||||
pendingToolUseSummary: nextPendingToolUseSummary,
|
||||
maxOutputTokensOverride: undefined,
|
||||
stopHookActive,
|
||||
|
||||
@@ -196,7 +196,7 @@ const PROACTIVE_NO_OP_SUBSCRIBE = (_cb: () => void) => () => { };
|
||||
const PROACTIVE_FALSE = () => false;
|
||||
const SUGGEST_BG_PR_NOOP = (_p: string, _n: string): boolean => false;
|
||||
const useProactive = feature('PROACTIVE') || feature('KAIROS') ? require('../proactive/useProactive.js').useProactive : null;
|
||||
const useScheduledTasks = feature('AGENT_TRIGGERS') ? require('../hooks/useScheduledTasks.js').useScheduledTasks : null;
|
||||
const useScheduledTasks = require('../hooks/useScheduledTasks.js').useScheduledTasks;
|
||||
/* eslint-enable @typescript-eslint/no-require-imports */
|
||||
import { isAgentSwarmsEnabled } from '../utils/agentSwarmsEnabled.js';
|
||||
import { useTaskListWatcher } from '../hooks/useTaskListWatcher.js';
|
||||
@@ -4076,21 +4076,13 @@ export function REPL({
|
||||
});
|
||||
|
||||
// Scheduled tasks from .claude/scheduled_tasks.json (CronCreate/Delete/List)
|
||||
if (feature('AGENT_TRIGGERS')) {
|
||||
// Assistant mode bypasses the isLoading gate (the proactive tick →
|
||||
// Sleep → tick loop would otherwise starve the scheduler).
|
||||
// kairosEnabled is set once in initialState (main.tsx) and never mutated — no
|
||||
// subscription needed. The tengu_kairos_cron runtime gate is checked inside
|
||||
// useScheduledTasks's effect (not here) since wrapping a hook call in a dynamic
|
||||
// condition would break rules-of-hooks.
|
||||
const assistantMode = store.getState().kairosEnabled;
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
|
||||
useScheduledTasks!({
|
||||
isLoading,
|
||||
assistantMode,
|
||||
setMessages
|
||||
});
|
||||
}
|
||||
// and session-only /loop runs.
|
||||
const assistantMode = store.getState().kairosEnabled;
|
||||
useScheduledTasks({
|
||||
isLoading,
|
||||
assistantMode,
|
||||
setMessages
|
||||
});
|
||||
|
||||
// Note: Permission polling is now handled by useInboxPoller
|
||||
// - Workers receive permission responses via mailbox messages
|
||||
|
||||
@@ -116,9 +116,21 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
return parsed.data
|
||||
})
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`[Bootstrap] Fetch failed: ${axios.isAxiosError(error) ? (error.response?.status ?? error.code) : 'unknown'}`,
|
||||
)
|
||||
if (axios.isAxiosError(error)) {
|
||||
const status = error.response?.status ?? 'no-response'
|
||||
const code = error.code ?? 'unknown-code'
|
||||
const method = error.config?.method?.toUpperCase() ?? 'UNKNOWN'
|
||||
const requestUrl = error.config?.url ?? 'unknown-url'
|
||||
const message = error.message ?? 'unknown axios error'
|
||||
|
||||
logForDebugging(
|
||||
`[Bootstrap] Fetch failed: status=${status} code=${code} method=${method} url=${requestUrl} message=${message}`,
|
||||
)
|
||||
} else {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
logForDebugging(`[Bootstrap] Fetch failed: ${message}`)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
166
src/services/api/codexOAuth.test.ts
Normal file
166
src/services/api/codexOAuth.test.ts
Normal file
@@ -0,0 +1,166 @@
|
||||
import { createServer } from 'node:http'
|
||||
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import { CodexOAuthService } from './codexOAuth.js'
|
||||
|
||||
const originalFetch = globalThis.fetch
|
||||
const originalCallbackPort = process.env.CODEX_OAUTH_CALLBACK_PORT
|
||||
const originalClientId = process.env.CODEX_OAUTH_CLIENT_ID
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
globalThis.fetch = originalFetch
|
||||
|
||||
if (originalCallbackPort === undefined) {
|
||||
delete process.env.CODEX_OAUTH_CALLBACK_PORT
|
||||
} else {
|
||||
process.env.CODEX_OAUTH_CALLBACK_PORT = originalCallbackPort
|
||||
}
|
||||
|
||||
if (originalClientId === undefined) {
|
||||
delete process.env.CODEX_OAUTH_CLIENT_ID
|
||||
} else {
|
||||
process.env.CODEX_OAUTH_CLIENT_ID = originalClientId
|
||||
}
|
||||
})
|
||||
|
||||
async function getFreePort(): Promise<number> {
|
||||
return await new Promise((resolve, reject) => {
|
||||
const server = createServer()
|
||||
|
||||
server.once('error', reject)
|
||||
server.listen(0, '127.0.0.1', () => {
|
||||
const address = server.address()
|
||||
if (!address || typeof address === 'string') {
|
||||
server.close(() => reject(new Error('Failed to allocate test port.')))
|
||||
return
|
||||
}
|
||||
|
||||
const { port } = address
|
||||
server.close(error => {
|
||||
if (error) {
|
||||
reject(error)
|
||||
return
|
||||
}
|
||||
resolve(port)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function buildCallbackRequest(authUrl: string): string {
|
||||
const authorizeUrl = new URL(authUrl)
|
||||
const redirectUri = authorizeUrl.searchParams.get('redirect_uri')
|
||||
const state = authorizeUrl.searchParams.get('state')
|
||||
|
||||
if (!redirectUri || !state) {
|
||||
throw new Error('Codex OAuth test did not receive a valid authorization URL.')
|
||||
}
|
||||
|
||||
const callbackUrl = new URL(redirectUri)
|
||||
callbackUrl.searchParams.set('code', 'auth-code')
|
||||
callbackUrl.searchParams.set('state', state)
|
||||
return callbackUrl.toString()
|
||||
}
|
||||
|
||||
test('serves updated success copy after a successful Codex OAuth flow', async () => {
|
||||
const callbackPort = await getFreePort()
|
||||
process.env.CODEX_OAUTH_CALLBACK_PORT = String(callbackPort)
|
||||
process.env.CODEX_OAUTH_CLIENT_ID = 'test-client-id'
|
||||
|
||||
globalThis.fetch = mock(async (input, init) => {
|
||||
const url = String(input)
|
||||
if (url.startsWith('http://localhost:')) {
|
||||
return originalFetch(input, init)
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
access_token: 'access-token',
|
||||
refresh_token: 'refresh-token',
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
},
|
||||
)
|
||||
}) as typeof fetch
|
||||
|
||||
const service = new CodexOAuthService()
|
||||
let callbackResponsePromise!: Promise<Response>
|
||||
|
||||
const flowPromise = service.startOAuthFlow(async authUrl => {
|
||||
callbackResponsePromise = originalFetch(buildCallbackRequest(authUrl))
|
||||
})
|
||||
|
||||
const tokens = await flowPromise
|
||||
const callbackResponse = await callbackResponsePromise
|
||||
const html = await callbackResponse.text()
|
||||
|
||||
expect(tokens.accessToken).toBe('access-token')
|
||||
expect(tokens.refreshToken).toBe('refresh-token')
|
||||
expect(html).toContain('You can return to OpenClaude now.')
|
||||
expect(html).toContain(
|
||||
'OpenClaude will finish activating your new Codex OAuth login.',
|
||||
)
|
||||
expect(html).not.toContain('continue automatically')
|
||||
})
|
||||
|
||||
test('cancellation during token exchange returns a cancelled page and rejects the flow', async () => {
|
||||
const callbackPort = await getFreePort()
|
||||
process.env.CODEX_OAUTH_CALLBACK_PORT = String(callbackPort)
|
||||
process.env.CODEX_OAUTH_CLIENT_ID = 'test-client-id'
|
||||
|
||||
let resolveFetchStart!: () => void
|
||||
const fetchStarted = new Promise<void>(resolve => {
|
||||
resolveFetchStart = resolve
|
||||
})
|
||||
|
||||
globalThis.fetch = mock((input, init) => {
|
||||
const url = String(input)
|
||||
if (url.startsWith('http://localhost:')) {
|
||||
return originalFetch(input, init)
|
||||
}
|
||||
|
||||
return new Promise<Response>((_resolve, reject) => {
|
||||
resolveFetchStart()
|
||||
|
||||
const signal = init?.signal
|
||||
if (!signal) {
|
||||
return
|
||||
}
|
||||
|
||||
if (signal.aborted) {
|
||||
reject(signal.reason)
|
||||
return
|
||||
}
|
||||
|
||||
signal.addEventListener(
|
||||
'abort',
|
||||
() => {
|
||||
reject(signal.reason)
|
||||
},
|
||||
{ once: true },
|
||||
)
|
||||
})
|
||||
}) as typeof fetch
|
||||
|
||||
const service = new CodexOAuthService()
|
||||
let callbackResponsePromise!: Promise<Response>
|
||||
|
||||
const flowPromise = service.startOAuthFlow(async authUrl => {
|
||||
callbackResponsePromise = originalFetch(buildCallbackRequest(authUrl))
|
||||
})
|
||||
|
||||
await fetchStarted
|
||||
service.cleanup()
|
||||
|
||||
await expect(flowPromise).rejects.toThrow('Codex OAuth flow was cancelled.')
|
||||
|
||||
const callbackResponse = await callbackResponsePromise
|
||||
const html = await callbackResponse.text()
|
||||
|
||||
expect(html).toContain('Codex login cancelled')
|
||||
expect(html).toContain('retry in OpenClaude')
|
||||
})
|
||||
307
src/services/api/codexOAuth.ts
Normal file
307
src/services/api/codexOAuth.ts
Normal file
@@ -0,0 +1,307 @@
|
||||
import { AuthCodeListener } from '../oauth/auth-code-listener.js'
|
||||
import {
|
||||
generateCodeChallenge,
|
||||
generateCodeVerifier,
|
||||
generateState,
|
||||
} from '../oauth/crypto.js'
|
||||
import {
|
||||
asTrimmedString,
|
||||
CODEX_OAUTH_ISSUER,
|
||||
CODEX_OAUTH_ORIGINATOR,
|
||||
CODEX_OAUTH_SCOPE,
|
||||
escapeHtml,
|
||||
exchangeCodexIdTokenForApiKey,
|
||||
getCodexOAuthCallbackPort,
|
||||
getCodexOAuthClientId,
|
||||
parseChatgptAccountId,
|
||||
} from './codexOAuthShared.js'
|
||||
|
||||
type CodexOAuthTokenResponse = {
|
||||
id_token?: string
|
||||
access_token?: string
|
||||
refresh_token?: string
|
||||
}
|
||||
|
||||
export type CodexOAuthTokens = {
|
||||
apiKey?: string
|
||||
accessToken: string
|
||||
refreshToken: string
|
||||
idToken?: string
|
||||
accountId?: string
|
||||
}
|
||||
|
||||
function buildCodexAuthorizeUrl(options: {
|
||||
port: number
|
||||
codeChallenge: string
|
||||
state: string
|
||||
}): string {
|
||||
const redirectUri = `http://localhost:${options.port}/auth/callback`
|
||||
const authUrl = new URL(`${CODEX_OAUTH_ISSUER}/oauth/authorize`)
|
||||
|
||||
authUrl.searchParams.append('response_type', 'code')
|
||||
authUrl.searchParams.append('client_id', getCodexOAuthClientId())
|
||||
authUrl.searchParams.append('redirect_uri', redirectUri)
|
||||
authUrl.searchParams.append('scope', CODEX_OAUTH_SCOPE)
|
||||
authUrl.searchParams.append('code_challenge', options.codeChallenge)
|
||||
authUrl.searchParams.append('code_challenge_method', 'S256')
|
||||
authUrl.searchParams.append('id_token_add_organizations', 'true')
|
||||
authUrl.searchParams.append('codex_cli_simplified_flow', 'true')
|
||||
authUrl.searchParams.append('state', options.state)
|
||||
authUrl.searchParams.append('originator', CODEX_OAUTH_ORIGINATOR)
|
||||
|
||||
return authUrl.toString()
|
||||
}
|
||||
|
||||
function renderSuccessPage(): string {
|
||||
return `<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>Codex Login Complete</title>
|
||||
<style>
|
||||
body { font-family: sans-serif; padding: 32px; line-height: 1.5; color: #111827; }
|
||||
h1 { margin: 0 0 12px; font-size: 22px; }
|
||||
p { margin: 0 0 10px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Codex login complete</h1>
|
||||
<p>You can return to OpenClaude now.</p>
|
||||
<p>OpenClaude will finish activating your new Codex OAuth login.</p>
|
||||
</body>
|
||||
</html>`
|
||||
}
|
||||
|
||||
function renderErrorPage(message: string): string {
|
||||
const safeMessage = escapeHtml(message)
|
||||
return `<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>Codex Login Failed</title>
|
||||
<style>
|
||||
body { font-family: sans-serif; padding: 32px; line-height: 1.5; color: #111827; }
|
||||
h1 { margin: 0 0 12px; font-size: 22px; color: #991b1b; }
|
||||
p { margin: 0 0 10px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Codex login failed</h1>
|
||||
<p>${safeMessage}</p>
|
||||
<p>You can close this window and try again in OpenClaude.</p>
|
||||
</body>
|
||||
</html>`
|
||||
}
|
||||
|
||||
function renderCancelledPage(): string {
|
||||
return `<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>Codex Login Cancelled</title>
|
||||
<style>
|
||||
body { font-family: sans-serif; padding: 32px; line-height: 1.5; color: #111827; }
|
||||
h1 { margin: 0 0 12px; font-size: 22px; }
|
||||
p { margin: 0 0 10px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Codex login cancelled</h1>
|
||||
<p>You can close this window and retry in OpenClaude.</p>
|
||||
</body>
|
||||
</html>`
|
||||
}
|
||||
|
||||
async function exchangeAuthorizationCode(options: {
|
||||
authorizationCode: string
|
||||
codeVerifier: string
|
||||
port: number
|
||||
signal?: AbortSignal
|
||||
}): Promise<CodexOAuthTokens> {
|
||||
const redirectUri = `http://localhost:${options.port}/auth/callback`
|
||||
const body = new URLSearchParams({
|
||||
grant_type: 'authorization_code',
|
||||
code: options.authorizationCode,
|
||||
redirect_uri: redirectUri,
|
||||
client_id: getCodexOAuthClientId(),
|
||||
code_verifier: options.codeVerifier,
|
||||
})
|
||||
|
||||
const response = await fetch(`${CODEX_OAUTH_ISSUER}/oauth/token`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
},
|
||||
body,
|
||||
signal: options.signal
|
||||
? AbortSignal.any([options.signal, AbortSignal.timeout(15_000)])
|
||||
: AbortSignal.timeout(15_000),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text().catch(() => '')
|
||||
throw new Error(
|
||||
errorText.trim()
|
||||
? `Codex OAuth token exchange failed (${response.status}): ${errorText.trim()}`
|
||||
: `Codex OAuth token exchange failed with status ${response.status}.`,
|
||||
)
|
||||
}
|
||||
|
||||
const payload = (await response.json()) as CodexOAuthTokenResponse
|
||||
const accessToken = asTrimmedString(payload.access_token)
|
||||
const refreshToken = asTrimmedString(payload.refresh_token)
|
||||
if (!accessToken || !refreshToken) {
|
||||
throw new Error(
|
||||
'Codex OAuth completed, but the token response was missing credentials.',
|
||||
)
|
||||
}
|
||||
|
||||
const idToken = asTrimmedString(payload.id_token)
|
||||
const apiKey = idToken
|
||||
? await exchangeCodexIdTokenForApiKey(idToken).catch(() => undefined)
|
||||
: undefined
|
||||
|
||||
return {
|
||||
apiKey,
|
||||
accessToken,
|
||||
refreshToken,
|
||||
idToken,
|
||||
accountId:
|
||||
parseChatgptAccountId(idToken) ?? parseChatgptAccountId(accessToken),
|
||||
}
|
||||
}
|
||||
|
||||
export class CodexOAuthService {
|
||||
private authCodeListener: AuthCodeListener | null = null
|
||||
private port: number | null = null
|
||||
private tokenExchangeAbortController: AbortController | null = null
|
||||
|
||||
private buildCancellationError(): Error {
|
||||
return new Error('Codex OAuth flow was cancelled.')
|
||||
}
|
||||
|
||||
async startOAuthFlow(
|
||||
authURLHandler: (authUrl: string) => Promise<void>,
|
||||
): Promise<CodexOAuthTokens> {
|
||||
const codeVerifier = generateCodeVerifier()
|
||||
const callbackPort = getCodexOAuthCallbackPort()
|
||||
const authCodeListener = new AuthCodeListener('/auth/callback')
|
||||
|
||||
this.authCodeListener = authCodeListener
|
||||
this.port = null
|
||||
|
||||
try {
|
||||
const port = await authCodeListener.start(callbackPort)
|
||||
this.port = port
|
||||
|
||||
const state = generateState()
|
||||
const codeChallenge = await generateCodeChallenge(codeVerifier)
|
||||
const authUrl = buildCodexAuthorizeUrl({
|
||||
port,
|
||||
codeChallenge,
|
||||
state,
|
||||
})
|
||||
|
||||
try {
|
||||
const authorizationCode = await authCodeListener.waitForAuthorization(
|
||||
state,
|
||||
async () => {
|
||||
await authURLHandler(authUrl)
|
||||
},
|
||||
)
|
||||
|
||||
const tokenExchangeAbortController = new AbortController()
|
||||
this.tokenExchangeAbortController = tokenExchangeAbortController
|
||||
|
||||
let tokens: CodexOAuthTokens
|
||||
try {
|
||||
tokens = await exchangeAuthorizationCode({
|
||||
authorizationCode,
|
||||
codeVerifier,
|
||||
port,
|
||||
signal: tokenExchangeAbortController.signal,
|
||||
})
|
||||
} finally {
|
||||
if (
|
||||
this.tokenExchangeAbortController === tokenExchangeAbortController
|
||||
) {
|
||||
this.tokenExchangeAbortController = null
|
||||
}
|
||||
}
|
||||
|
||||
if (this.authCodeListener !== authCodeListener) {
|
||||
throw this.buildCancellationError()
|
||||
}
|
||||
|
||||
authCodeListener.handleSuccessRedirect([], res => {
|
||||
res.writeHead(200, {
|
||||
'Content-Type': 'text/html; charset=utf-8',
|
||||
})
|
||||
res.end(renderSuccessPage())
|
||||
})
|
||||
|
||||
return tokens
|
||||
} catch (error) {
|
||||
const resolvedError =
|
||||
this.authCodeListener === authCodeListener
|
||||
? error
|
||||
: this.buildCancellationError()
|
||||
|
||||
if (authCodeListener.hasPendingResponse()) {
|
||||
const isCancellation =
|
||||
resolvedError instanceof Error &&
|
||||
resolvedError.message === 'Codex OAuth flow was cancelled.'
|
||||
|
||||
authCodeListener.handleErrorRedirect(res => {
|
||||
res.writeHead(isCancellation ? 200 : 400, {
|
||||
'Content-Type': 'text/html; charset=utf-8',
|
||||
})
|
||||
res.end(
|
||||
isCancellation
|
||||
? renderCancelledPage()
|
||||
: renderErrorPage(
|
||||
resolvedError instanceof Error
|
||||
? resolvedError.message
|
||||
: String(resolvedError),
|
||||
),
|
||||
)
|
||||
})
|
||||
}
|
||||
throw resolvedError
|
||||
} finally {
|
||||
this.cleanup()
|
||||
}
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
if (
|
||||
message.includes('EADDRINUSE') ||
|
||||
message.includes(String(callbackPort))
|
||||
) {
|
||||
throw new Error(
|
||||
`Codex OAuth needs localhost:${callbackPort} for its callback. Close any app already using that port and try again.`,
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
cleanup(): void {
|
||||
const cancellationError = this.buildCancellationError()
|
||||
|
||||
this.tokenExchangeAbortController?.abort(cancellationError)
|
||||
this.tokenExchangeAbortController = null
|
||||
|
||||
if (this.authCodeListener?.hasPendingResponse()) {
|
||||
this.authCodeListener.handleErrorRedirect(res => {
|
||||
res.writeHead(200, {
|
||||
'Content-Type': 'text/html; charset=utf-8',
|
||||
})
|
||||
res.end(renderCancelledPage())
|
||||
})
|
||||
}
|
||||
|
||||
this.authCodeListener?.cancelPendingAuthorization(cancellationError)
|
||||
this.authCodeListener = null
|
||||
this.port = null
|
||||
}
|
||||
}
|
||||
139
src/services/api/codexOAuthShared.ts
Normal file
139
src/services/api/codexOAuthShared.ts
Normal file
@@ -0,0 +1,139 @@
|
||||
export const CODEX_OAUTH_ISSUER = 'https://auth.openai.com'
|
||||
export const CODEX_REFRESH_URL = `${CODEX_OAUTH_ISSUER}/oauth/token`
|
||||
export const DEFAULT_CODEX_OAUTH_CLIENT_ID = 'app_EMoamEEZ73f0CkXaXp7hrann'
|
||||
export const DEFAULT_CODEX_OAUTH_CALLBACK_PORT = 1455
|
||||
export const CODEX_OAUTH_SCOPE =
|
||||
'openid profile email offline_access api.connectors.read api.connectors.invoke'
|
||||
export const CODEX_OAUTH_ORIGINATOR = 'codex_cli_rs'
|
||||
export const CODEX_API_KEY_TOKEN_NAME = 'openai-api-key'
|
||||
export const CODEX_ID_TOKEN_SUBJECT_TYPE =
|
||||
'urn:ietf:params:oauth:token-type:id_token'
|
||||
export const CODEX_TOKEN_EXCHANGE_GRANT =
|
||||
'urn:ietf:params:oauth:grant-type:token-exchange'
|
||||
|
||||
export function asTrimmedString(value: unknown): string | undefined {
|
||||
if (typeof value !== 'string') return undefined
|
||||
const trimmed = value.trim()
|
||||
return trimmed ? trimmed : undefined
|
||||
}
|
||||
|
||||
export function getCodexOAuthClientId(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
return asTrimmedString(env.CODEX_OAUTH_CLIENT_ID) ?? DEFAULT_CODEX_OAUTH_CLIENT_ID
|
||||
}
|
||||
|
||||
export function getCodexOAuthCallbackPort(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): number {
|
||||
const rawPort = asTrimmedString(env.CODEX_OAUTH_CALLBACK_PORT)
|
||||
if (!rawPort) {
|
||||
return DEFAULT_CODEX_OAUTH_CALLBACK_PORT
|
||||
}
|
||||
|
||||
const parsed = Number.parseInt(rawPort, 10)
|
||||
if (Number.isInteger(parsed) && parsed > 0 && parsed <= 65535) {
|
||||
return parsed
|
||||
}
|
||||
|
||||
return DEFAULT_CODEX_OAUTH_CALLBACK_PORT
|
||||
}
|
||||
|
||||
export function decodeJwtPayload(
|
||||
token: string,
|
||||
): Record<string, unknown> | undefined {
|
||||
const parts = token.split('.')
|
||||
if (parts.length < 2) return undefined
|
||||
|
||||
try {
|
||||
const normalized = parts[1].replace(/-/g, '+').replace(/_/g, '/')
|
||||
const padded = normalized + '='.repeat((4 - (normalized.length % 4)) % 4)
|
||||
const json = Buffer.from(padded, 'base64').toString('utf8')
|
||||
const parsed = JSON.parse(json)
|
||||
return parsed && typeof parsed === 'object'
|
||||
? (parsed as Record<string, unknown>)
|
||||
: undefined
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
export function parseChatgptAccountId(
|
||||
token: string | undefined,
|
||||
): string | undefined {
|
||||
if (!token) return undefined
|
||||
|
||||
const payload = decodeJwtPayload(token)
|
||||
const nestedAuth =
|
||||
payload?.['https://api.openai.com/auth'] &&
|
||||
typeof payload['https://api.openai.com/auth'] === 'object'
|
||||
? (payload['https://api.openai.com/auth'] as Record<string, unknown>)
|
||||
: undefined
|
||||
|
||||
return (
|
||||
asTrimmedString(
|
||||
nestedAuth?.chatgpt_account_id ??
|
||||
payload?.['https://api.openai.com/auth.chatgpt_account_id'] ??
|
||||
payload?.chatgpt_account_id,
|
||||
) ?? undefined
|
||||
)
|
||||
}
|
||||
|
||||
export function escapeHtml(value: string): string {
|
||||
return value.replace(/[&<>"']/g, char => {
|
||||
switch (char) {
|
||||
case '&':
|
||||
return '&'
|
||||
case '<':
|
||||
return '<'
|
||||
case '>':
|
||||
return '>'
|
||||
case '"':
|
||||
return '"'
|
||||
case '\'':
|
||||
return '''
|
||||
default:
|
||||
return char
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export async function exchangeCodexIdTokenForApiKey(
|
||||
idToken: string,
|
||||
): Promise<string> {
|
||||
const body = new URLSearchParams({
|
||||
grant_type: CODEX_TOKEN_EXCHANGE_GRANT,
|
||||
client_id: getCodexOAuthClientId(),
|
||||
requested_token: CODEX_API_KEY_TOKEN_NAME,
|
||||
subject_token: idToken,
|
||||
subject_token_type: CODEX_ID_TOKEN_SUBJECT_TYPE,
|
||||
})
|
||||
|
||||
const response = await fetch(CODEX_REFRESH_URL, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
},
|
||||
body,
|
||||
signal: AbortSignal.timeout(15_000),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const bodyText = await response.text().catch(() => '')
|
||||
throw new Error(
|
||||
bodyText.trim()
|
||||
? `Codex API key exchange failed (${response.status}): ${bodyText.trim()}`
|
||||
: `Codex API key exchange failed with status ${response.status}.`,
|
||||
)
|
||||
}
|
||||
|
||||
const payload = (await response.json()) as { access_token?: string }
|
||||
const apiKey = asTrimmedString(payload.access_token)
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
'Codex API key exchange completed, but no API key token was returned.',
|
||||
)
|
||||
}
|
||||
|
||||
return apiKey
|
||||
}
|
||||
@@ -8,16 +8,13 @@ import {
|
||||
convertCodexResponseToAnthropicMessage,
|
||||
convertToolsToResponsesTools,
|
||||
} from './codexShim.js'
|
||||
import {
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
@@ -30,6 +27,9 @@ afterEach(() => {
|
||||
if (originalEnv.CLAUDE_CODE_USE_GITHUB === undefined) delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
else process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
if (originalEnv.OPENAI_MODEL === undefined) delete process.env.OPENAI_MODEL
|
||||
else process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
|
||||
while (tempDirs.length > 0) {
|
||||
const dir = tempDirs.pop()
|
||||
if (dir) rmSync(dir, { recursive: true, force: true })
|
||||
@@ -59,6 +59,10 @@ async function collectStreamEventTypes(responseText: string): Promise<string[]>
|
||||
return events
|
||||
}
|
||||
|
||||
async function importFreshProviderConfigModule() {
|
||||
return import(`./providerConfig.js?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
describe('Codex provider config', () => {
|
||||
const originalOpenaiBaseUrl = process.env.OPENAI_BASE_URL
|
||||
const originalOpenaiApiBase = process.env.OPENAI_API_BASE
|
||||
@@ -75,7 +79,8 @@ describe('Codex provider config', () => {
|
||||
else process.env.OPENAI_API_BASE = originalOpenaiApiBase
|
||||
})
|
||||
|
||||
test('resolves codexplan alias to Codex transport with reasoning', () => {
|
||||
test('resolves codexplan alias to Codex transport with reasoning', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
@@ -84,9 +89,23 @@ describe('Codex provider config', () => {
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
expect(resolved.reasoning).toEqual({ effort: 'high' })
|
||||
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
|
||||
})
|
||||
|
||||
test('does not force Codex transport when a local non-Codex base URL is explicit', () => {
|
||||
test('resolves codexspark alias to Codex transport with Codex base URL', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
const resolved = resolveProviderRequest({ model: 'codexspark' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.3-codex-spark')
|
||||
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
|
||||
})
|
||||
|
||||
test('does not force Codex transport when a local non-Codex base URL is explicit', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
const resolved = resolveProviderRequest({
|
||||
model: 'codexplan',
|
||||
baseUrl: 'http://127.0.0.1:8080/v1',
|
||||
@@ -97,7 +116,8 @@ describe('Codex provider config', () => {
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
})
|
||||
|
||||
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', () => {
|
||||
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
// On Windows, env vars can leak as the literal string "undefined" instead of
|
||||
// the JS value undefined when not properly unset (issue #336).
|
||||
process.env.OPENAI_BASE_URL = 'undefined'
|
||||
@@ -105,20 +125,57 @@ describe('Codex provider config', () => {
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
})
|
||||
|
||||
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is an empty string', () => {
|
||||
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is an empty string', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
process.env.OPENAI_BASE_URL = ''
|
||||
const resolved = resolveProviderRequest({ model: 'codexplan' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
})
|
||||
|
||||
test('prefers explicit baseUrl option over env var', () => {
|
||||
test('prefers explicit baseUrl option over env var', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
process.env.OPENAI_BASE_URL = 'https://example.com/v1'
|
||||
const resolved = resolveProviderRequest({ model: 'codexplan', baseUrl: 'https://chatgpt.com/backend-api/codex' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
|
||||
})
|
||||
|
||||
test('loads Codex credentials from auth.json fallback', () => {
|
||||
test('default gpt-4o uses OpenAI base URL (no regression)', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
const resolved = resolveProviderRequest({ model: 'gpt-4o' })
|
||||
expect(resolved.transport).toBe('chat_completions')
|
||||
expect(resolved.baseUrl).toBe('https://api.openai.com/v1')
|
||||
expect(resolved.resolvedModel).toBe('gpt-4o')
|
||||
})
|
||||
|
||||
test('resolves codexplan from env var OPENAI_MODEL to Codex endpoint', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
process.env.OPENAI_MODEL = 'codexplan'
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
const resolved = resolveProviderRequest()
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.baseUrl).toBe('https://chatgpt.com/backend-api/codex')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
})
|
||||
|
||||
test('does not override custom base URL for codexplan (e.g., local provider)', async () => {
|
||||
const { resolveProviderRequest } = await importFreshProviderConfigModule()
|
||||
process.env.OPENAI_MODEL = 'codexplan'
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
const resolved = resolveProviderRequest()
|
||||
expect(resolved.transport).toBe('chat_completions')
|
||||
expect(resolved.baseUrl).toBe('http://localhost:11434/v1')
|
||||
})
|
||||
|
||||
test('loads Codex credentials from auth.json fallback', async () => {
|
||||
const { resolveCodexApiCredentials } = await importFreshProviderConfigModule()
|
||||
const authPath = createTempAuthJson({
|
||||
tokens: {
|
||||
access_token: 'header.payload.signature',
|
||||
@@ -134,6 +191,31 @@ describe('Codex provider config', () => {
|
||||
expect(credentials.accountId).toBe('acct_test')
|
||||
expect(credentials.source).toBe('auth.json')
|
||||
})
|
||||
|
||||
test('does not treat auth.json id_token as a Codex bearer credential', async () => {
|
||||
const { resolveCodexApiCredentials } = await importFreshProviderConfigModule()
|
||||
const idTokenPayload = Buffer.from(
|
||||
JSON.stringify({
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_from_id_token',
|
||||
},
|
||||
}),
|
||||
'utf8',
|
||||
).toString('base64url')
|
||||
const authPath = createTempAuthJson({
|
||||
tokens: {
|
||||
id_token: `header.${idTokenPayload}.signature`,
|
||||
},
|
||||
})
|
||||
|
||||
const credentials = resolveCodexApiCredentials({
|
||||
CODEX_AUTH_JSON_PATH: authPath,
|
||||
} as NodeJS.ProcessEnv)
|
||||
|
||||
expect(credentials.apiKey).toBe('')
|
||||
expect(credentials.accountId).toBe('acct_from_id_token')
|
||||
expect(credentials.source).toBe('none')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Codex request translation', () => {
|
||||
|
||||
@@ -580,15 +580,55 @@ export async function performCodexRequest(options: {
|
||||
return response
|
||||
}
|
||||
|
||||
async function* readSseEvents(response: Response): AsyncGenerator<CodexSseEvent> {
|
||||
async function* readSseEvents(response: Response, signal?: AbortSignal): AsyncGenerator<CodexSseEvent> {
|
||||
const reader = response.body?.getReader()
|
||||
if (!reader) return
|
||||
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
const STREAM_IDLE_TIMEOUT_MS = 120_000 // 2 minutes without data
|
||||
let lastDataTime = Date.now()
|
||||
|
||||
/**
|
||||
* Read from the stream with an idle timeout. Respects the caller's
|
||||
* AbortSignal — clears the idle timer on abort so the AbortError
|
||||
* surfaces cleanly instead of a spurious idle timeout.
|
||||
*/
|
||||
async function readWithTimeout(): Promise<ReadableStreamReadResult<Uint8Array>> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timeoutId = setTimeout(() => {
|
||||
const elapsed = Math.round((Date.now() - lastDataTime) / 1000)
|
||||
reject(new Error(
|
||||
`Codex SSE stream idle for ${elapsed}s (limit: ${STREAM_IDLE_TIMEOUT_MS / 1000}s). Connection likely dropped.`,
|
||||
))
|
||||
}, STREAM_IDLE_TIMEOUT_MS)
|
||||
|
||||
let abortCleanup: (() => void) | undefined
|
||||
if (signal) {
|
||||
abortCleanup = () => {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
signal.addEventListener('abort', abortCleanup, { once: true })
|
||||
}
|
||||
|
||||
reader.read().then(
|
||||
result => {
|
||||
clearTimeout(timeoutId)
|
||||
if (signal && abortCleanup) signal.removeEventListener('abort', abortCleanup)
|
||||
if (result.value) lastDataTime = Date.now()
|
||||
resolve(result)
|
||||
},
|
||||
err => {
|
||||
clearTimeout(timeoutId)
|
||||
if (signal && abortCleanup) signal.removeEventListener('abort', abortCleanup)
|
||||
reject(err)
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
const { done, value } = await readWithTimeout()
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
@@ -649,10 +689,11 @@ function determineStopReason(
|
||||
|
||||
export async function collectCodexCompletedResponse(
|
||||
response: Response,
|
||||
signal?: AbortSignal,
|
||||
): Promise<Record<string, any>> {
|
||||
let completedResponse: Record<string, any> | undefined
|
||||
|
||||
for await (const event of readSseEvents(response)) {
|
||||
for await (const event of readSseEvents(response, signal)) {
|
||||
if (event.event === 'response.failed') {
|
||||
const msg = event.data?.response?.error?.message ??
|
||||
event.data?.error?.message ?? 'Codex response failed'
|
||||
@@ -681,6 +722,7 @@ export async function collectCodexCompletedResponse(
|
||||
export async function* codexStreamToAnthropic(
|
||||
response: Response,
|
||||
model: string,
|
||||
signal?: AbortSignal,
|
||||
): AsyncGenerator<AnthropicStreamEvent> {
|
||||
const messageId = makeMessageId()
|
||||
const toolBlocksByItemId = new Map<
|
||||
@@ -742,7 +784,7 @@ export async function* codexStreamToAnthropic(
|
||||
},
|
||||
}
|
||||
|
||||
for await (const event of readSseEvents(response)) {
|
||||
for await (const event of readSseEvents(response, signal)) {
|
||||
const payload = event.data
|
||||
|
||||
if (event.event === 'response.output_item.added') {
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
import {
|
||||
readCodexCredentialsAsync,
|
||||
refreshCodexAccessTokenIfNeeded,
|
||||
} from '../../utils/codexCredentials.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { isBareMode } from '../../utils/envUtils.js'
|
||||
import {
|
||||
DEFAULT_CODEX_BASE_URL,
|
||||
isCodexBaseUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveRuntimeCodexCredentials,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
@@ -391,6 +397,18 @@ export function getCodexUsageUrl(baseUrl = DEFAULT_CODEX_BASE_URL): string {
|
||||
}
|
||||
|
||||
export async function fetchCodexUsage(): Promise<CodexUsageData> {
|
||||
const refreshResult = await refreshCodexAccessTokenIfNeeded().catch(
|
||||
async error => {
|
||||
logForDebugging(
|
||||
`[codex] access token refresh failed before usage fetch: ${error instanceof Error ? error.message : String(error)}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
return {
|
||||
refreshed: false,
|
||||
credentials: await readCodexCredentialsAsync(),
|
||||
}
|
||||
},
|
||||
)
|
||||
const request = resolveProviderRequest({
|
||||
model: process.env.OPENAI_MODEL,
|
||||
baseUrl: process.env.OPENAI_BASE_URL,
|
||||
@@ -401,16 +419,19 @@ export async function fetchCodexUsage(): Promise<CodexUsageData> {
|
||||
)
|
||||
}
|
||||
|
||||
const credentials = resolveCodexApiCredentials()
|
||||
const credentials = resolveRuntimeCodexCredentials({
|
||||
storedCredentials: refreshResult.credentials,
|
||||
})
|
||||
if (!credentials.apiKey) {
|
||||
const oauthHint = isBareMode() ? '' : ', choose Codex OAuth in /provider'
|
||||
const authHint = credentials.authPath
|
||||
? ` or place a Codex auth.json at ${credentials.authPath}`
|
||||
: ''
|
||||
? `${oauthHint} or place a Codex auth.json at ${credentials.authPath}`
|
||||
: oauthHint
|
||||
throw new Error(`Codex auth is required. Set CODEX_API_KEY${authHint}.`)
|
||||
}
|
||||
if (!credentials.accountId) {
|
||||
throw new Error(
|
||||
'Codex auth is missing chatgpt_account_id. Re-login with the Codex CLI or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.',
|
||||
'Codex auth is missing chatgpt_account_id. Re-login with Codex OAuth, the Codex CLI, or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.',
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -924,6 +924,30 @@ export function getAssistantMessageFromError(
|
||||
})
|
||||
}
|
||||
|
||||
// 500 errors caused by context overflow — the API returns 500 instead of 400
|
||||
// when the request body (including conversation context) exceeds limits.
|
||||
// This happens when auto-compact fails or the token estimation undercounts.
|
||||
// Detect by checking for context-related keywords in 500 responses.
|
||||
if (
|
||||
error instanceof APIError &&
|
||||
error.status >= 500 &&
|
||||
(error.message.toLowerCase().includes('too many tokens') ||
|
||||
error.message.toLowerCase().includes('request too large') ||
|
||||
error.message.toLowerCase().includes('context length') ||
|
||||
error.message.toLowerCase().includes('maximum context') ||
|
||||
error.message.toLowerCase().includes('input length') ||
|
||||
error.message.toLowerCase().includes('payload too large'))
|
||||
) {
|
||||
const rewindInstruction = getIsNonInteractiveSession()
|
||||
? ''
|
||||
: ' Press esc twice to go up a few messages, or run /compact to reduce context.'
|
||||
return createAssistantAPIErrorMessage({
|
||||
content: `The conversation has grown too large for the API to process.${rewindInstruction} Alternatively, start a new session with /new.`,
|
||||
error: 'invalid_request',
|
||||
errorDetails: `Context overflow (500): ${error.message}`,
|
||||
})
|
||||
}
|
||||
|
||||
// Connection errors (non-timeout) — use formatAPIError for detailed messages
|
||||
if (error instanceof APIConnectionError) {
|
||||
return createAssistantAPIErrorMessage({
|
||||
|
||||
@@ -403,6 +403,97 @@ test('preserves usage from final OpenAI stream chunk with empty choices', async
|
||||
expect(usageEvent?.usage?.output_tokens).toBe(45)
|
||||
})
|
||||
|
||||
test('uses max_tokens instead of max_completion_tokens for local providers', async () => {
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
const body = JSON.parse(String(init?.body))
|
||||
expect(body.max_tokens).toBe(64)
|
||||
expect(body.max_completion_tokens).toBeUndefined()
|
||||
expect(body.stream_options).toBeUndefined()
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'llama3.1:8b',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'hello',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 5,
|
||||
completion_tokens: 1,
|
||||
total_tokens: 6,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'llama3.1:8b',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
})
|
||||
|
||||
test('keeps max_completion_tokens for non-local non-github providers', async () => {
|
||||
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
const body = JSON.parse(String(init?.body))
|
||||
expect(body.max_completion_tokens).toBe(64)
|
||||
expect(body.max_tokens).toBeUndefined()
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'hello',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 5,
|
||||
completion_tokens: 1,
|
||||
total_tokens: 6,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
})
|
||||
|
||||
test('preserves Gemini tool call extra_content in follow-up requests', async () => {
|
||||
let requestBody: Record<string, unknown> | undefined
|
||||
|
||||
@@ -689,9 +780,117 @@ test('preserves image tool results as placeholders in follow-up requests', async
|
||||
|
||||
const toolMessage = (requestBody?.messages as Array<Record<string, unknown>>).find(
|
||||
message => message.role === 'tool',
|
||||
) as { content?: string } | undefined
|
||||
) as {
|
||||
content?: Array<{
|
||||
type: string
|
||||
text?: string
|
||||
image_url?: { url: string }
|
||||
}> | string
|
||||
} | undefined
|
||||
|
||||
expect(toolMessage?.content).toContain('[image:image/png]')
|
||||
expect(Array.isArray(toolMessage?.content)).toBe(true)
|
||||
const parts = toolMessage?.content as Array<{
|
||||
type: string
|
||||
text?: string
|
||||
image_url?: { url: string }
|
||||
}>
|
||||
const imagePart = parts.find(part => part.type === 'image_url')
|
||||
expect(imagePart?.image_url?.url).toBe('data:image/png;base64,ZmFrZQ==')
|
||||
})
|
||||
|
||||
test('preserves mixed text and image tool results as multipart content', async () => {
|
||||
let requestBody: Record<string, unknown> | undefined
|
||||
|
||||
globalThis.fetch = (async (_input, init) => {
|
||||
requestBody = JSON.parse(String(init?.body))
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-1',
|
||||
model: 'gpt-4o',
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: 'done',
|
||||
},
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 12,
|
||||
completion_tokens: 4,
|
||||
total_tokens: 16,
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'gpt-4o',
|
||||
system: 'test system',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Read this screenshot' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use',
|
||||
id: 'call_image_2',
|
||||
name: 'Read',
|
||||
input: { file_path: 'C:\\temp\\screenshot.png' },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'call_image_2',
|
||||
content: [
|
||||
{ type: 'text', text: 'Screenshot captured' },
|
||||
{
|
||||
type: 'image',
|
||||
source: {
|
||||
type: 'base64',
|
||||
media_type: 'image/png',
|
||||
data: 'ZmFrZQ==',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
const toolMessage = (requestBody?.messages as Array<Record<string, unknown>>).find(
|
||||
message => message.role === 'tool',
|
||||
) as {
|
||||
content?: Array<{
|
||||
type: string
|
||||
text?: string
|
||||
image_url?: { url: string }
|
||||
}>
|
||||
} | undefined
|
||||
|
||||
expect(Array.isArray(toolMessage?.content)).toBe(true)
|
||||
const parts = toolMessage?.content ?? []
|
||||
expect(parts[0]).toEqual({ type: 'text', text: 'Screenshot captured' })
|
||||
expect(parts[1]).toEqual({
|
||||
type: 'image_url',
|
||||
image_url: { url: 'data:image/png;base64,ZmFrZQ==' },
|
||||
})
|
||||
})
|
||||
|
||||
test('uses GEMINI_ACCESS_TOKEN for Gemini OpenAI-compatible requests', async () => {
|
||||
|
||||
@@ -22,7 +22,12 @@
|
||||
*/
|
||||
|
||||
import { APIError } from '@anthropic-ai/sdk'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import {
|
||||
readCodexCredentialsAsync,
|
||||
refreshCodexAccessTokenIfNeeded,
|
||||
} from '../../utils/codexCredentials.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { isBareMode, isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { resolveGeminiCredential } from '../../utils/geminiAuth.js'
|
||||
import { hydrateGeminiAccessTokenFromSecureStorage } from '../../utils/geminiCredentials.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
@@ -44,7 +49,7 @@ import {
|
||||
} from './codexShim.js'
|
||||
import {
|
||||
isLocalProviderUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveRuntimeCodexCredentials,
|
||||
resolveProviderRequest,
|
||||
getGithubEndpointType,
|
||||
} from './providerConfig.js'
|
||||
@@ -176,35 +181,61 @@ function convertSystemPrompt(
|
||||
return String(system)
|
||||
}
|
||||
|
||||
function convertToolResultContent(content: unknown): string {
|
||||
if (typeof content === 'string') return content
|
||||
if (!Array.isArray(content)) return JSON.stringify(content ?? '')
|
||||
function convertToolResultContent(
|
||||
content: unknown,
|
||||
isError?: boolean,
|
||||
): string | Array<{ type: string; text?: string; image_url?: { url: string } }> {
|
||||
if (typeof content === 'string') {
|
||||
return isError ? `Error: ${content}` : content
|
||||
}
|
||||
if (!Array.isArray(content)) {
|
||||
const text = JSON.stringify(content ?? '')
|
||||
return isError ? `Error: ${text}` : text
|
||||
}
|
||||
|
||||
const chunks: string[] = []
|
||||
const parts: Array<{
|
||||
type: string
|
||||
text?: string
|
||||
image_url?: { url: string }
|
||||
}> = []
|
||||
for (const block of content) {
|
||||
if (block?.type === 'text' && typeof block.text === 'string') {
|
||||
chunks.push(block.text)
|
||||
parts.push({ type: 'text', text: block.text })
|
||||
continue
|
||||
}
|
||||
|
||||
if (block?.type === 'image') {
|
||||
const source = block.source
|
||||
if (source?.type === 'url' && source.url) {
|
||||
chunks.push(`[Image](${source.url})`)
|
||||
} else if (source?.type === 'base64') {
|
||||
chunks.push(`[image:${source.media_type ?? 'unknown'}]`)
|
||||
} else {
|
||||
chunks.push('[image]')
|
||||
parts.push({ type: 'image_url', image_url: { url: source.url } })
|
||||
} else if (source?.type === 'base64' && source.media_type && source.data) {
|
||||
parts.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: `data:${source.media_type};base64,${source.data}`,
|
||||
},
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (typeof block?.text === 'string') {
|
||||
chunks.push(block.text)
|
||||
parts.push({ type: 'text', text: block.text })
|
||||
}
|
||||
}
|
||||
|
||||
return chunks.join('\n')
|
||||
if (parts.length === 0) return ''
|
||||
if (parts.length === 1 && parts[0].type === 'text') {
|
||||
const text = parts[0].text ?? ''
|
||||
return isError ? `Error: ${text}` : text
|
||||
}
|
||||
if (isError && parts[0]?.type === 'text') {
|
||||
parts[0] = { ...parts[0], text: `Error: ${parts[0].text ?? ''}` }
|
||||
} else if (isError) {
|
||||
parts.unshift({ type: 'text', text: 'Error:' })
|
||||
}
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
function convertContentBlocks(
|
||||
@@ -292,11 +323,10 @@ function convertMessages(
|
||||
|
||||
// Emit tool results as tool messages
|
||||
for (const tr of toolResults) {
|
||||
const trContent = convertToolResultContent(tr.content)
|
||||
result.push({
|
||||
role: 'tool',
|
||||
tool_call_id: tr.tool_use_id ?? 'unknown',
|
||||
content: tr.is_error ? `Error: ${trContent}` : trContent,
|
||||
content: convertToolResultContent(tr.content, tr.is_error),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -611,6 +641,7 @@ function repairPossiblyTruncatedObjectJson(raw: string): string | null {
|
||||
async function* openaiStreamToAnthropic(
|
||||
response: Response,
|
||||
model: string,
|
||||
signal?: AbortSignal,
|
||||
): AsyncGenerator<AnthropicStreamEvent> {
|
||||
const messageId = makeMessageId()
|
||||
let contentBlockIndex = 0
|
||||
@@ -658,6 +689,51 @@ async function* openaiStreamToAnthropic(
|
||||
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
const STREAM_IDLE_TIMEOUT_MS = 120_000 // 2 minutes without data = connection likely dead
|
||||
let lastDataTime = Date.now()
|
||||
|
||||
/**
|
||||
* Read from the stream with an idle timeout. If no data arrives within
|
||||
* STREAM_IDLE_TIMEOUT_MS, assume the connection is dead and throw so
|
||||
* withRetry can reconnect. This prevents indefinite hangs on stale
|
||||
* SSE connections from OpenAI/Gemini during long-running sessions.
|
||||
* Respects the caller's AbortSignal — clears the idle timer on abort
|
||||
* so the rejection reason is AbortError, not a spurious idle timeout.
|
||||
*/
|
||||
async function readWithTimeout(): Promise<ReadableStreamReadResult<Uint8Array>> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timeoutId = setTimeout(() => {
|
||||
const elapsed = Math.round((Date.now() - lastDataTime) / 1000)
|
||||
reject(new Error(
|
||||
`OpenAI/Gemini SSE stream idle for ${elapsed}s (limit: ${STREAM_IDLE_TIMEOUT_MS / 1000}s). Connection likely dropped.`,
|
||||
))
|
||||
}, STREAM_IDLE_TIMEOUT_MS)
|
||||
|
||||
// If the caller aborts, clear the timer so the AbortError surfaces
|
||||
// cleanly instead of being masked by a spurious idle timeout.
|
||||
let abortCleanup: (() => void) | undefined
|
||||
if (signal) {
|
||||
abortCleanup = () => {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
signal.addEventListener('abort', abortCleanup, { once: true })
|
||||
}
|
||||
|
||||
reader.read().then(
|
||||
result => {
|
||||
clearTimeout(timeoutId)
|
||||
if (signal && abortCleanup) signal.removeEventListener('abort', abortCleanup)
|
||||
if (result.value) lastDataTime = Date.now()
|
||||
resolve(result)
|
||||
},
|
||||
err => {
|
||||
clearTimeout(timeoutId)
|
||||
if (signal && abortCleanup) signal.removeEventListener('abort', abortCleanup)
|
||||
reject(err)
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
const closeActiveContentBlock = async function* () {
|
||||
if (!hasEmittedContentStart) return
|
||||
@@ -685,7 +761,7 @@ async function* openaiStreamToAnthropic(
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
const { done, value } = await readWithTimeout()
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
@@ -1045,13 +1121,13 @@ class OpenAIShimMessages {
|
||||
const isResponsesStream = response.url?.includes('/responses')
|
||||
return new OpenAIShimStream(
|
||||
(request.transport === 'codex_responses' || isResponsesStream)
|
||||
? codexStreamToAnthropic(response, request.resolvedModel)
|
||||
: openaiStreamToAnthropic(response, request.resolvedModel),
|
||||
? codexStreamToAnthropic(response, request.resolvedModel, options?.signal)
|
||||
: openaiStreamToAnthropic(response, request.resolvedModel, options?.signal),
|
||||
)
|
||||
}
|
||||
|
||||
if (request.transport === 'codex_responses') {
|
||||
const data = await collectCodexCompletedResponse(response)
|
||||
const data = await collectCodexCompletedResponse(response, options?.signal)
|
||||
return convertCodexResponseToAnthropicMessage(
|
||||
data,
|
||||
request.resolvedModel,
|
||||
@@ -1114,7 +1190,6 @@ class OpenAIShimMessages {
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubMode = isGithubModelsMode()
|
||||
const isGithubWithCodexTransport = isGithubMode && request.transport === 'codex_responses'
|
||||
const isGithubCopilotEndpoint = isGithubMode && githubEndpointType === 'copilot'
|
||||
|
||||
if (isGithubWithCodexTransport) {
|
||||
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
@@ -1141,11 +1216,26 @@ class OpenAIShimMessages {
|
||||
}
|
||||
|
||||
if (request.transport === 'codex_responses' && !isGithubMode) {
|
||||
const credentials = resolveCodexApiCredentials()
|
||||
const refreshResult = await refreshCodexAccessTokenIfNeeded().catch(
|
||||
async error => {
|
||||
logForDebugging(
|
||||
`[codex] access token refresh failed before request: ${error instanceof Error ? error.message : String(error)}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
return {
|
||||
refreshed: false,
|
||||
credentials: await readCodexCredentialsAsync(),
|
||||
}
|
||||
},
|
||||
)
|
||||
const credentials = resolveRuntimeCodexCredentials({
|
||||
storedCredentials: refreshResult.credentials,
|
||||
})
|
||||
if (!credentials.apiKey) {
|
||||
const oauthHint = isBareMode() ? '' : ', choose Codex OAuth in /provider'
|
||||
const authHint = credentials.authPath
|
||||
? ` or place a Codex auth.json at ${credentials.authPath}`
|
||||
: ''
|
||||
? `${oauthHint} or place a Codex auth.json at ${credentials.authPath}`
|
||||
: oauthHint
|
||||
const safeModel =
|
||||
redactSecretValueForDisplay(request.requestedModel, process.env as SecretValueSource) ??
|
||||
'the requested model'
|
||||
@@ -1155,7 +1245,7 @@ class OpenAIShimMessages {
|
||||
}
|
||||
if (!credentials.accountId) {
|
||||
throw new Error(
|
||||
'Codex auth is missing chatgpt_account_id. Re-login with the Codex CLI or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.',
|
||||
'Codex auth is missing chatgpt_account_id. Re-login with Codex OAuth, the Codex CLI, or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.',
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1216,18 +1306,20 @@ class OpenAIShimMessages {
|
||||
|
||||
const isGithub = isGithubModelsMode()
|
||||
const isMistral = isMistralMode()
|
||||
const isLocal = isLocalProviderUrl(request.baseUrl)
|
||||
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubCopilot = isGithub && githubEndpointType === 'copilot'
|
||||
const isGithubModels = isGithub && (githubEndpointType === 'models' || githubEndpointType === 'custom')
|
||||
|
||||
if ((isGithub || isMistral) && body.max_completion_tokens !== undefined) {
|
||||
if ((isGithub || isMistral || isLocal) && body.max_completion_tokens !== undefined) {
|
||||
body.max_tokens = body.max_completion_tokens
|
||||
delete body.max_completion_tokens
|
||||
}
|
||||
|
||||
// mistral also doesn't recognize body.store
|
||||
if (isMistral) {
|
||||
// mistral and gemini don't recognize body.store — Gemini returns 400
|
||||
// "Invalid JSON payload received. Unknown name 'store': Cannot find field."
|
||||
if (isMistral || isGeminiMode()) {
|
||||
delete body.store
|
||||
}
|
||||
|
||||
|
||||
225
src/services/api/providerConfig.codexSecureStorage.test.ts
Normal file
225
src/services/api/providerConfig.codexSecureStorage.test.ts
Normal file
@@ -0,0 +1,225 @@
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from 'node:fs'
|
||||
import { tmpdir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
import * as realOs from 'node:os'
|
||||
|
||||
function makeJwt(payload: Record<string, unknown>): string {
|
||||
const header = Buffer.from(JSON.stringify({ alg: 'none', typ: 'JWT' }))
|
||||
.toString('base64url')
|
||||
const body = Buffer.from(JSON.stringify(payload)).toString('base64url')
|
||||
return `${header}.${body}.signature`
|
||||
}
|
||||
|
||||
describe('resolveCodexApiCredentials with secure storage', () => {
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
test('loads Codex credentials from OpenClaude secure storage', async () => {
|
||||
mock.module('../../utils/codexCredentials.js', () => ({
|
||||
isCodexRefreshFailureCoolingDown: () => false,
|
||||
readCodexCredentials: () => ({
|
||||
apiKey: 'codex-api-key-token',
|
||||
accessToken: 'header.payload.signature',
|
||||
accountId: 'acct_secure',
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { resolveCodexApiCredentials } = await import(
|
||||
'./providerConfig.js?codex-secure-storage'
|
||||
)
|
||||
|
||||
const credentials = resolveCodexApiCredentials({} as NodeJS.ProcessEnv)
|
||||
expect(credentials.apiKey).toBe('codex-api-key-token')
|
||||
expect(credentials.accountId).toBe('acct_secure')
|
||||
expect(credentials.source).toBe('secure-storage')
|
||||
})
|
||||
|
||||
test('prefers explicit env credentials over secure storage', async () => {
|
||||
mock.module('../../utils/codexCredentials.js', () => ({
|
||||
isCodexRefreshFailureCoolingDown: () => false,
|
||||
readCodexCredentials: () => ({
|
||||
accessToken: 'stored-token',
|
||||
accountId: 'acct_stored',
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { resolveCodexApiCredentials } = await import(
|
||||
'./providerConfig.js?codex-env-precedence'
|
||||
)
|
||||
|
||||
const credentials = resolveCodexApiCredentials({
|
||||
CODEX_API_KEY: 'env-token',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_env',
|
||||
} as NodeJS.ProcessEnv)
|
||||
|
||||
expect(credentials.apiKey).toBe('env-token')
|
||||
expect(credentials.accountId).toBe('acct_env')
|
||||
expect(credentials.source).toBe('env')
|
||||
})
|
||||
|
||||
test('parses nested chatgpt_account_id from a CODEX_API_KEY JWT', async () => {
|
||||
mock.module('../../utils/codexCredentials.js', () => ({
|
||||
isCodexRefreshFailureCoolingDown: () => false,
|
||||
readCodexCredentials: () => undefined,
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { resolveCodexApiCredentials } = await import(
|
||||
'./providerConfig.js?codex-env-nested-account'
|
||||
)
|
||||
|
||||
const credentials = resolveCodexApiCredentials({
|
||||
CODEX_API_KEY: makeJwt({
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_nested_env',
|
||||
},
|
||||
}),
|
||||
} as NodeJS.ProcessEnv)
|
||||
|
||||
expect(credentials.accountId).toBe('acct_nested_env')
|
||||
expect(credentials.source).toBe('env')
|
||||
})
|
||||
|
||||
test('parses nested chatgpt_account_id from auth.json tokens', async () => {
|
||||
mock.module('../../utils/codexCredentials.js', () => ({
|
||||
isCodexRefreshFailureCoolingDown: () => false,
|
||||
readCodexCredentials: () => undefined,
|
||||
}))
|
||||
|
||||
const tempDir = mkdtempSync(join(tmpdir(), 'openclaude-codex-auth-'))
|
||||
const authPath = join(tempDir, 'auth.json')
|
||||
|
||||
writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
openai_api_key: makeJwt({
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_nested_auth_json',
|
||||
},
|
||||
}),
|
||||
}),
|
||||
'utf8',
|
||||
)
|
||||
|
||||
try {
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { resolveCodexApiCredentials } = await import(
|
||||
'./providerConfig.js?codex-auth-json-nested-account'
|
||||
)
|
||||
|
||||
const credentials = resolveCodexApiCredentials({
|
||||
CODEX_AUTH_JSON_PATH: authPath,
|
||||
} as NodeJS.ProcessEnv)
|
||||
|
||||
expect(credentials.accountId).toBe('acct_nested_auth_json')
|
||||
expect(credentials.source).toBe('auth.json')
|
||||
} finally {
|
||||
rmSync(tempDir, { force: true, recursive: true })
|
||||
}
|
||||
})
|
||||
|
||||
test('does not read default auth.json when secure storage already has Codex credentials', async () => {
|
||||
mock.module('../../utils/codexCredentials.js', () => ({
|
||||
isCodexRefreshFailureCoolingDown: () => false,
|
||||
readCodexCredentials: () => ({
|
||||
apiKey: 'codex-api-key-token',
|
||||
accessToken: 'header.payload.signature',
|
||||
accountId: 'acct_secure',
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { resolveCodexApiCredentials } = await import(
|
||||
'./providerConfig.js?codex-secure-storage-no-auth-io'
|
||||
)
|
||||
|
||||
const credentials = resolveCodexApiCredentials({} as NodeJS.ProcessEnv)
|
||||
expect(credentials.apiKey).toBe('codex-api-key-token')
|
||||
expect(credentials.accountId).toBe('acct_secure')
|
||||
expect(credentials.source).toBe('secure-storage')
|
||||
})
|
||||
|
||||
test('falls back to the default auth.json when stored Codex refresh is cooling down', async () => {
|
||||
const tempHomeDir = mkdtempSync(join(tmpdir(), 'openclaude-codex-home-'))
|
||||
const authJson = JSON.stringify({
|
||||
openai_api_key: makeJwt({
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_auth_json',
|
||||
},
|
||||
}),
|
||||
})
|
||||
mkdirSync(join(tempHomeDir, '.codex'), { recursive: true })
|
||||
writeFileSync(join(tempHomeDir, '.codex', 'auth.json'), authJson, 'utf8')
|
||||
|
||||
mock.module('node:os', () => ({
|
||||
...realOs,
|
||||
homedir: () => tempHomeDir,
|
||||
}))
|
||||
|
||||
mock.module('../../utils/codexCredentials.js', () => ({
|
||||
isCodexRefreshFailureCoolingDown: () => true,
|
||||
readCodexCredentials: () => ({
|
||||
accessToken: 'stored-token',
|
||||
refreshToken: 'refresh-stored',
|
||||
accountId: 'acct_stored',
|
||||
lastRefreshFailureAt: Date.now(),
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { resolveCodexApiCredentials } = await import(
|
||||
'./providerConfig.js?codex-refresh-cooldown-fallback'
|
||||
)
|
||||
|
||||
try {
|
||||
const credentials = resolveCodexApiCredentials({} as NodeJS.ProcessEnv)
|
||||
expect(credentials.source).toBe('auth.json')
|
||||
expect(credentials.accountId).toBe('acct_auth_json')
|
||||
expect(credentials.apiKey).not.toBe('stored-token')
|
||||
} finally {
|
||||
rmSync(tempHomeDir, { force: true, recursive: true })
|
||||
}
|
||||
})
|
||||
|
||||
test('preserves the stored account id when auth.json fallback lacks one', async () => {
|
||||
const tempHomeDir = mkdtempSync(join(tmpdir(), 'openclaude-codex-home-'))
|
||||
const authJson = JSON.stringify({
|
||||
openai_api_key: 'auth-json-access-token',
|
||||
})
|
||||
mkdirSync(join(tempHomeDir, '.codex'), { recursive: true })
|
||||
writeFileSync(join(tempHomeDir, '.codex', 'auth.json'), authJson, 'utf8')
|
||||
|
||||
mock.module('node:os', () => ({
|
||||
...realOs,
|
||||
homedir: () => tempHomeDir,
|
||||
}))
|
||||
|
||||
mock.module('../../utils/codexCredentials.js', () => ({
|
||||
isCodexRefreshFailureCoolingDown: () => true,
|
||||
readCodexCredentials: () => ({
|
||||
accessToken: 'stored-token',
|
||||
refreshToken: 'refresh-stored',
|
||||
accountId: 'acct_stored',
|
||||
lastRefreshFailureAt: Date.now(),
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { resolveCodexApiCredentials } = await import(
|
||||
'./providerConfig.js?codex-refresh-cooldown-account-id-fallback'
|
||||
)
|
||||
|
||||
try {
|
||||
const credentials = resolveCodexApiCredentials({} as NodeJS.ProcessEnv)
|
||||
expect(credentials.source).toBe('auth.json')
|
||||
expect(credentials.apiKey).toBe('auth-json-access-token')
|
||||
expect(credentials.accountId).toBe('acct_stored')
|
||||
} finally {
|
||||
rmSync(tempHomeDir, { force: true, recursive: true })
|
||||
}
|
||||
})
|
||||
})
|
||||
107
src/services/api/providerConfig.runtimeCodexCredentials.test.ts
Normal file
107
src/services/api/providerConfig.runtimeCodexCredentials.test.ts
Normal file
@@ -0,0 +1,107 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import { mkdtempSync, rmSync, writeFileSync } from 'node:fs'
|
||||
import { tmpdir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
|
||||
import { resolveRuntimeCodexCredentials } from './providerConfig.js'
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
function makeJwt(payload: Record<string, unknown>): string {
|
||||
const header = Buffer.from(JSON.stringify({ alg: 'none', typ: 'JWT' }))
|
||||
.toString('base64url')
|
||||
const body = Buffer.from(JSON.stringify(payload)).toString('base64url')
|
||||
return `${header}.${body}.signature`
|
||||
}
|
||||
|
||||
test('runtime credential resolution honors explicit auth.json over stored secure-storage tokens', () => {
|
||||
const tempDir = mkdtempSync(join(tmpdir(), 'openclaude-codex-explicit-auth-'))
|
||||
const authPath = join(tempDir, 'auth.json')
|
||||
|
||||
writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
openai_api_key: makeJwt({
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_explicit_auth_json',
|
||||
},
|
||||
}),
|
||||
}),
|
||||
'utf8',
|
||||
)
|
||||
|
||||
try {
|
||||
const credentials = resolveRuntimeCodexCredentials({
|
||||
env: {
|
||||
CODEX_AUTH_JSON_PATH: authPath,
|
||||
} as NodeJS.ProcessEnv,
|
||||
storedCredentials: {
|
||||
apiKey: 'stored-api-key',
|
||||
accessToken: 'stored-access-token',
|
||||
accountId: 'acct_stored',
|
||||
},
|
||||
})
|
||||
|
||||
expect(credentials.source).toBe('auth.json')
|
||||
expect(credentials.accountId).toBe('acct_explicit_auth_json')
|
||||
expect(credentials.apiKey).not.toBe('stored-api-key')
|
||||
} finally {
|
||||
rmSync(tempDir, { force: true, recursive: true })
|
||||
}
|
||||
})
|
||||
|
||||
test('runtime credential resolution preserves an explicit auth.json path even when it is missing', () => {
|
||||
const tempDir = mkdtempSync(join(tmpdir(), 'openclaude-codex-missing-auth-'))
|
||||
const authPath = join(tempDir, 'missing-auth.json')
|
||||
|
||||
try {
|
||||
const credentials = resolveRuntimeCodexCredentials({
|
||||
env: {
|
||||
CODEX_AUTH_JSON_PATH: authPath,
|
||||
} as NodeJS.ProcessEnv,
|
||||
storedCredentials: {
|
||||
apiKey: 'stored-api-key',
|
||||
accessToken: 'stored-access-token',
|
||||
accountId: 'acct_stored',
|
||||
},
|
||||
})
|
||||
|
||||
expect(credentials.source).toBe('none')
|
||||
expect(credentials.authPath).toBe(authPath)
|
||||
expect(credentials.apiKey).toBe('')
|
||||
} finally {
|
||||
rmSync(tempDir, { force: true, recursive: true })
|
||||
}
|
||||
})
|
||||
|
||||
test('runtime credential resolution avoids sync secure-storage reads when async credentials are provided', async () => {
|
||||
let syncReadCalled = false
|
||||
|
||||
mock.module('../../utils/codexCredentials.js', () => ({
|
||||
isCodexRefreshFailureCoolingDown: () => false,
|
||||
readCodexCredentials: () => {
|
||||
syncReadCalled = true
|
||||
throw new Error('sync secure-storage read should not run in runtime resolution')
|
||||
},
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { resolveRuntimeCodexCredentials } = await import(
|
||||
'./providerConfig.js?runtime-no-sync-secure-storage'
|
||||
)
|
||||
|
||||
const credentials = resolveRuntimeCodexCredentials({
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
storedCredentials: {
|
||||
accessToken: 'stored-access-token',
|
||||
accountId: 'acct_stored',
|
||||
},
|
||||
})
|
||||
|
||||
expect(syncReadCalled).toBe(false)
|
||||
expect(credentials.source).toBe('secure-storage')
|
||||
expect(credentials.apiKey).toBe('stored-access-token')
|
||||
expect(credentials.accountId).toBe('acct_stored')
|
||||
})
|
||||
@@ -3,7 +3,16 @@ import { isIP } from 'node:net'
|
||||
import { homedir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
|
||||
import {
|
||||
isCodexRefreshFailureCoolingDown,
|
||||
readCodexCredentials,
|
||||
type CodexCredentialBlob,
|
||||
} from '../../utils/codexCredentials.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import {
|
||||
asTrimmedString,
|
||||
parseChatgptAccountId,
|
||||
} from './codexOAuthShared.js'
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
||||
@@ -60,6 +69,8 @@ const CODEX_ALIAS_MODELS: Record<
|
||||
type CodexAlias = keyof typeof CODEX_ALIAS_MODELS
|
||||
type ReasoningEffort = 'low' | 'medium' | 'high' | 'xhigh'
|
||||
|
||||
const OPENAI_CODEX_SHORTCUT_ALIASES = new Set(['codexplan', 'codexspark'])
|
||||
|
||||
export type ProviderTransport = 'chat_completions' | 'codex_responses'
|
||||
|
||||
export type ResolvedProviderRequest = {
|
||||
@@ -76,7 +87,7 @@ export type ResolvedCodexCredentials = {
|
||||
apiKey: string
|
||||
accountId?: string
|
||||
authPath?: string
|
||||
source: 'env' | 'auth.json' | 'none'
|
||||
source: 'env' | 'secure-storage' | 'auth.json' | 'none'
|
||||
}
|
||||
|
||||
type ModelDescriptor = {
|
||||
@@ -112,12 +123,6 @@ function isPrivateIpv6Address(hostname: string): boolean {
|
||||
return (prefix & 0xfe00) === 0xfc00 || (prefix & 0xffc0) === 0xfe80
|
||||
}
|
||||
|
||||
function asTrimmedString(value: unknown): string | undefined {
|
||||
if (typeof value !== 'string') return undefined
|
||||
const trimmed = value.trim()
|
||||
return trimmed ? trimmed : undefined
|
||||
}
|
||||
|
||||
// Reads an env-var-style string intended as a URL or path, rejecting both
|
||||
// empty strings and the literal string "undefined" that Windows shells can
|
||||
// write when a variable is unset-then-referenced without quotes (issue #336).
|
||||
@@ -149,23 +154,6 @@ function readNestedString(
|
||||
return undefined
|
||||
}
|
||||
|
||||
function decodeJwtPayload(token: string): Record<string, unknown> | undefined {
|
||||
const parts = token.split('.')
|
||||
if (parts.length < 2) return undefined
|
||||
|
||||
try {
|
||||
const normalized = parts[1].replace(/-/g, '+').replace(/_/g, '/')
|
||||
const padded = normalized + '='.repeat((4 - (normalized.length % 4)) % 4)
|
||||
const json = Buffer.from(padded, 'base64').toString('utf8')
|
||||
const parsed = JSON.parse(json)
|
||||
return parsed && typeof parsed === 'object'
|
||||
? (parsed as Record<string, unknown>)
|
||||
: undefined
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
function parseReasoningEffort(value: string | undefined): ReasoningEffort | undefined {
|
||||
if (!value) return undefined
|
||||
const normalized = value.trim().toLowerCase()
|
||||
@@ -220,6 +208,12 @@ export function isCodexAlias(model: string): boolean {
|
||||
return base in CODEX_ALIAS_MODELS
|
||||
}
|
||||
|
||||
function isOpenAICodexShortcutAlias(model: string): boolean {
|
||||
const normalized = model.trim().toLowerCase()
|
||||
const base = normalized.split('?', 1)[0] ?? normalized
|
||||
return OPENAI_CODEX_SHORTCUT_ALIASES.has(base)
|
||||
}
|
||||
|
||||
export function shouldUseCodexTransport(
|
||||
model: string,
|
||||
baseUrl: string | undefined,
|
||||
@@ -367,13 +361,41 @@ export function resolveProviderRequest(options?: {
|
||||
options?.fallbackModel?.trim() ||
|
||||
(isGithubMode ? 'github:copilot' : 'gpt-4o')
|
||||
const descriptor = parseModelDescriptor(requestedModel)
|
||||
const rawBaseUrl =
|
||||
asEnvUrl(options?.baseUrl) ??
|
||||
const explicitBaseUrl = asEnvUrl(options?.baseUrl)
|
||||
const envBaseUrlRaw =
|
||||
explicitBaseUrl ??
|
||||
asEnvUrl(
|
||||
isMistralMode ? (process.env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL) : process.env.OPENAI_BASE_URL,
|
||||
isMistralMode
|
||||
? (process.env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL)
|
||||
: process.env.OPENAI_BASE_URL
|
||||
) ??
|
||||
asEnvUrl(process.env.OPENAI_API_BASE)
|
||||
|
||||
const isCodexModelForGithub = isGithubMode && isCodexAlias(requestedModel)
|
||||
const envBaseUrl =
|
||||
isCodexModelForGithub && envBaseUrlRaw && getGithubEndpointType(envBaseUrlRaw) === 'custom'
|
||||
? undefined
|
||||
: envBaseUrlRaw
|
||||
|
||||
const rawBaseUrl = explicitBaseUrl ?? envBaseUrl
|
||||
|
||||
const shellModel = process.env.OPENAI_MODEL?.trim() ?? ''
|
||||
const envIsCodexShortcut = isOpenAICodexShortcutAlias(shellModel)
|
||||
const envResolvedCodexModel = envIsCodexShortcut
|
||||
? parseModelDescriptor(shellModel).baseModel
|
||||
: null
|
||||
const requestedMatchesEnvCodexShortcut =
|
||||
Boolean(options?.model) &&
|
||||
Boolean(envResolvedCodexModel) &&
|
||||
descriptor.baseModel === envResolvedCodexModel
|
||||
const isCodexAliasModel =
|
||||
isOpenAICodexShortcutAlias(requestedModel) || requestedMatchesEnvCodexShortcut
|
||||
const hasUserSetBaseUrl = rawBaseUrl && rawBaseUrl !== DEFAULT_OPENAI_BASE_URL
|
||||
const finalBaseUrl =
|
||||
!isGithubMode && isCodexAliasModel && !hasUserSetBaseUrl
|
||||
? DEFAULT_CODEX_BASE_URL
|
||||
: rawBaseUrl
|
||||
|
||||
const githubEndpointType = isGithubMode
|
||||
? getGithubEndpointType(rawBaseUrl)
|
||||
: 'custom'
|
||||
@@ -386,7 +408,7 @@ export function resolveProviderRequest(options?: {
|
||||
: requestedModel
|
||||
|
||||
const transport: ProviderTransport =
|
||||
shouldUseCodexTransport(requestedModel, rawBaseUrl) ||
|
||||
shouldUseCodexTransport(requestedModel, finalBaseUrl) ||
|
||||
(isGithubCopilot && shouldUseGithubResponsesApi(githubResolvedModel))
|
||||
? 'codex_responses'
|
||||
: 'chat_completions'
|
||||
@@ -410,7 +432,7 @@ export function resolveProviderRequest(options?: {
|
||||
requestedModel,
|
||||
resolvedModel,
|
||||
baseUrl:
|
||||
(rawBaseUrl ??
|
||||
(finalBaseUrl ??
|
||||
(isGithubCopilot && transport === 'codex_responses'
|
||||
? GITHUB_COPILOT_BASE_URL
|
||||
: (isGithubMode
|
||||
@@ -458,18 +480,6 @@ export function resolveCodexAuthPath(
|
||||
return join(homedir(), '.codex', 'auth.json')
|
||||
}
|
||||
|
||||
export function parseChatgptAccountId(
|
||||
token: string | undefined,
|
||||
): string | undefined {
|
||||
if (!token) return undefined
|
||||
const payload = decodeJwtPayload(token)
|
||||
const fromClaim = asTrimmedString(
|
||||
payload?.['https://api.openai.com/auth.chatgpt_account_id'],
|
||||
)
|
||||
if (fromClaim) return fromClaim
|
||||
return asTrimmedString(payload?.chatgpt_account_id)
|
||||
}
|
||||
|
||||
function loadCodexAuthJson(
|
||||
authPath: string,
|
||||
): Record<string, unknown> | undefined {
|
||||
@@ -485,8 +495,97 @@ function loadCodexAuthJson(
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveCodexApiCredentials(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
function resolveCodexAuthJsonCredentials(options: {
|
||||
authJson: Record<string, unknown> | undefined
|
||||
authPath: string
|
||||
envAccountId?: string
|
||||
missingSource?: ResolvedCodexCredentials['source']
|
||||
}): ResolvedCodexCredentials {
|
||||
const { authJson, authPath, envAccountId } = options
|
||||
|
||||
if (!authJson) {
|
||||
return {
|
||||
apiKey: '',
|
||||
authPath,
|
||||
source: options.missingSource ?? 'none',
|
||||
}
|
||||
}
|
||||
|
||||
const apiKey = readNestedString(authJson, [
|
||||
['openai_api_key'],
|
||||
['openaiApiKey'],
|
||||
['access_token'],
|
||||
['accessToken'],
|
||||
['tokens', 'access_token'],
|
||||
['tokens', 'accessToken'],
|
||||
['auth', 'access_token'],
|
||||
['auth', 'accessToken'],
|
||||
['token', 'access_token'],
|
||||
['token', 'accessToken'],
|
||||
])
|
||||
// OIDC identity tokens can carry the ChatGPT account id, but they are not
|
||||
// valid bearer credentials for Codex API requests.
|
||||
const idToken = readNestedString(authJson, [
|
||||
['id_token'],
|
||||
['idToken'],
|
||||
['tokens', 'id_token'],
|
||||
['tokens', 'idToken'],
|
||||
])
|
||||
const accountId =
|
||||
envAccountId ??
|
||||
readNestedString(authJson, [
|
||||
['account_id'],
|
||||
['accountId'],
|
||||
['tokens', 'account_id'],
|
||||
['tokens', 'accountId'],
|
||||
['auth', 'account_id'],
|
||||
['auth', 'accountId'],
|
||||
]) ??
|
||||
parseChatgptAccountId(apiKey) ??
|
||||
parseChatgptAccountId(idToken)
|
||||
|
||||
if (!apiKey) {
|
||||
return {
|
||||
apiKey: '',
|
||||
accountId,
|
||||
authPath,
|
||||
source: options.missingSource ?? 'none',
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
apiKey,
|
||||
accountId,
|
||||
authPath,
|
||||
source: 'auth.json',
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveStoredCodexCredentials(options: {
|
||||
storedCredentials: Pick<
|
||||
CodexCredentialBlob,
|
||||
'apiKey' | 'accessToken' | 'idToken' | 'accountId'
|
||||
>
|
||||
envAccountId?: string
|
||||
}): ResolvedCodexCredentials {
|
||||
const { storedCredentials, envAccountId } = options
|
||||
|
||||
return {
|
||||
apiKey: storedCredentials.apiKey ?? storedCredentials.accessToken,
|
||||
accountId:
|
||||
envAccountId ??
|
||||
storedCredentials.accountId ??
|
||||
parseChatgptAccountId(storedCredentials.idToken) ??
|
||||
parseChatgptAccountId(storedCredentials.accessToken),
|
||||
source: 'secure-storage',
|
||||
}
|
||||
}
|
||||
|
||||
function resolveEnvOrAuthJsonCodexCredentials(
|
||||
env: NodeJS.ProcessEnv,
|
||||
options?: {
|
||||
explicitAuthPathOnly?: boolean
|
||||
},
|
||||
): ResolvedCodexCredentials {
|
||||
const envApiKey = asTrimmedString(env.CODEX_API_KEY)
|
||||
const envAccountId =
|
||||
@@ -501,55 +600,127 @@ export function resolveCodexApiCredentials(
|
||||
}
|
||||
}
|
||||
|
||||
const explicitAuthPathConfigured = Boolean(
|
||||
asTrimmedString(env.CODEX_AUTH_JSON_PATH) ?? asTrimmedString(env.CODEX_HOME),
|
||||
)
|
||||
|
||||
if (!explicitAuthPathConfigured && options?.explicitAuthPathOnly) {
|
||||
return {
|
||||
apiKey: '',
|
||||
accountId: envAccountId,
|
||||
source: 'none',
|
||||
}
|
||||
}
|
||||
|
||||
const authPath = resolveCodexAuthPath(env)
|
||||
const authJson = loadCodexAuthJson(authPath)
|
||||
if (!authJson) {
|
||||
return {
|
||||
apiKey: '',
|
||||
authPath,
|
||||
source: 'none',
|
||||
}
|
||||
}
|
||||
|
||||
const apiKey = readNestedString(authJson, [
|
||||
['access_token'],
|
||||
['accessToken'],
|
||||
['tokens', 'access_token'],
|
||||
['tokens', 'accessToken'],
|
||||
['auth', 'access_token'],
|
||||
['auth', 'accessToken'],
|
||||
['token', 'access_token'],
|
||||
['token', 'accessToken'],
|
||||
['tokens', 'id_token'],
|
||||
['tokens', 'idToken'],
|
||||
])
|
||||
const accountId =
|
||||
envAccountId ??
|
||||
readNestedString(authJson, [
|
||||
['account_id'],
|
||||
['accountId'],
|
||||
['tokens', 'account_id'],
|
||||
['tokens', 'accountId'],
|
||||
['auth', 'account_id'],
|
||||
['auth', 'accountId'],
|
||||
]) ??
|
||||
parseChatgptAccountId(apiKey)
|
||||
|
||||
if (!apiKey) {
|
||||
return {
|
||||
apiKey: '',
|
||||
accountId,
|
||||
authPath,
|
||||
source: 'none',
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
apiKey,
|
||||
accountId,
|
||||
return resolveCodexAuthJsonCredentials({
|
||||
authJson,
|
||||
authPath,
|
||||
source: 'auth.json',
|
||||
envAccountId,
|
||||
})
|
||||
}
|
||||
|
||||
export function resolveRuntimeCodexCredentials(options?: {
|
||||
env?: NodeJS.ProcessEnv
|
||||
storedCredentials?: Pick<
|
||||
CodexCredentialBlob,
|
||||
'apiKey' | 'accessToken' | 'idToken' | 'accountId'
|
||||
>
|
||||
}): ResolvedCodexCredentials {
|
||||
const env = options?.env ?? process.env
|
||||
const explicitCredentials = resolveEnvOrAuthJsonCodexCredentials(env, {
|
||||
explicitAuthPathOnly: true,
|
||||
})
|
||||
const explicitAuthPathConfigured = Boolean(
|
||||
asTrimmedString(env.CODEX_AUTH_JSON_PATH) ?? asTrimmedString(env.CODEX_HOME),
|
||||
)
|
||||
const hasStoredCredentialsOption = Boolean(
|
||||
options &&
|
||||
Object.prototype.hasOwnProperty.call(options, 'storedCredentials'),
|
||||
)
|
||||
|
||||
if (
|
||||
explicitAuthPathConfigured ||
|
||||
explicitCredentials.source === 'env' ||
|
||||
explicitCredentials.source === 'auth.json'
|
||||
) {
|
||||
return explicitCredentials
|
||||
}
|
||||
|
||||
if (options?.storedCredentials?.accessToken) {
|
||||
return resolveStoredCodexCredentials({
|
||||
storedCredentials: options.storedCredentials,
|
||||
envAccountId:
|
||||
asTrimmedString(env.CODEX_ACCOUNT_ID) ??
|
||||
asTrimmedString(env.CHATGPT_ACCOUNT_ID),
|
||||
})
|
||||
}
|
||||
|
||||
if (hasStoredCredentialsOption) {
|
||||
return resolveEnvOrAuthJsonCodexCredentials(env)
|
||||
}
|
||||
|
||||
return resolveCodexApiCredentials(env)
|
||||
}
|
||||
|
||||
export function resolveCodexApiCredentials(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): ResolvedCodexCredentials {
|
||||
const envAccountId =
|
||||
asTrimmedString(env.CODEX_ACCOUNT_ID) ??
|
||||
asTrimmedString(env.CHATGPT_ACCOUNT_ID)
|
||||
const envOrExplicitAuthJsonCredentials = resolveEnvOrAuthJsonCodexCredentials(
|
||||
env,
|
||||
{
|
||||
explicitAuthPathOnly: true,
|
||||
},
|
||||
)
|
||||
|
||||
if (
|
||||
envOrExplicitAuthJsonCredentials.source === 'env' ||
|
||||
envOrExplicitAuthJsonCredentials.source === 'auth.json' ||
|
||||
envOrExplicitAuthJsonCredentials.authPath
|
||||
) {
|
||||
return envOrExplicitAuthJsonCredentials
|
||||
}
|
||||
|
||||
const storedCredentials = readCodexCredentials()
|
||||
if (storedCredentials?.accessToken) {
|
||||
const resolvedStoredCredentials = resolveStoredCodexCredentials({
|
||||
storedCredentials,
|
||||
envAccountId,
|
||||
})
|
||||
|
||||
const shouldCheckDefaultAuthJson =
|
||||
!resolvedStoredCredentials.accountId ||
|
||||
isCodexRefreshFailureCoolingDown(storedCredentials)
|
||||
|
||||
if (!shouldCheckDefaultAuthJson) {
|
||||
return resolvedStoredCredentials
|
||||
}
|
||||
|
||||
const authPath = resolveCodexAuthPath(env)
|
||||
const authJson = loadCodexAuthJson(authPath)
|
||||
const resolvedAuthJsonCredentials = resolveCodexAuthJsonCredentials({
|
||||
authJson,
|
||||
authPath,
|
||||
envAccountId,
|
||||
})
|
||||
|
||||
if (resolvedAuthJsonCredentials.apiKey) {
|
||||
return {
|
||||
...resolvedAuthJsonCredentials,
|
||||
accountId:
|
||||
resolvedAuthJsonCredentials.accountId ??
|
||||
resolvedStoredCredentials.accountId,
|
||||
}
|
||||
}
|
||||
|
||||
return resolvedStoredCredentials
|
||||
}
|
||||
|
||||
return resolveEnvOrAuthJsonCodexCredentials(env)
|
||||
}
|
||||
|
||||
export function getReasoningEffortForModel(model: string): ReasoningEffort | undefined {
|
||||
@@ -559,3 +730,18 @@ export function getReasoningEffortForModel(model: string): ReasoningEffort | und
|
||||
const aliasConfig = CODEX_ALIAS_MODELS[alias]
|
||||
return aliasConfig?.reasoningEffort
|
||||
}
|
||||
|
||||
export function supportsCodexReasoningEffort(model: string): boolean {
|
||||
const normalized = model.trim().toLowerCase()
|
||||
const base = normalized.split('?', 1)[0] ?? normalized
|
||||
|
||||
if (base === 'gpt-5.3-codex-spark' || base === 'codexspark') {
|
||||
return false
|
||||
}
|
||||
|
||||
if (getReasoningEffortForModel(base) !== undefined) {
|
||||
return true
|
||||
}
|
||||
|
||||
return /^gpt-5(?:[.-]|$)/.test(base)
|
||||
}
|
||||
|
||||
46
src/services/compact/autoCompact.test.ts
Normal file
46
src/services/compact/autoCompact.test.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
getEffectiveContextWindowSize,
|
||||
getAutoCompactThreshold,
|
||||
} from './autoCompact.ts'
|
||||
|
||||
describe('getEffectiveContextWindowSize', () => {
|
||||
test('returns positive value for known models with large context windows', () => {
|
||||
// claude-sonnet-4 has 200k context
|
||||
const effective = getEffectiveContextWindowSize('claude-sonnet-4')
|
||||
expect(effective).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
test('never returns negative even for unknown 3P models (issue #635)', () => {
|
||||
// Previously, unknown 3P models got 8k context → effective context was
|
||||
// 8k minus 20k summary reservation = -12k, causing infinite auto-compact.
|
||||
// Now the fallback is 128k and there's a floor, so effective is always
|
||||
// at least reservedTokensForSummary + buffer.
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
try {
|
||||
const effective = getEffectiveContextWindowSize('some-unknown-3p-model')
|
||||
expect(effective).toBeGreaterThan(0)
|
||||
// Must be at least summary reservation (20k) + buffer (13k) = 33k
|
||||
expect(effective).toBeGreaterThanOrEqual(33_000)
|
||||
} finally {
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAutoCompactThreshold', () => {
|
||||
test('returns positive threshold for known models', () => {
|
||||
const threshold = getAutoCompactThreshold('claude-sonnet-4')
|
||||
expect(threshold).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
test('never returns negative threshold even for unknown 3P models (issue #635)', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
try {
|
||||
const threshold = getAutoCompactThreshold('some-unknown-3p-model')
|
||||
expect(threshold).toBeGreaterThan(0)
|
||||
} finally {
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -45,7 +45,12 @@ export function getEffectiveContextWindowSize(model: string): number {
|
||||
}
|
||||
}
|
||||
|
||||
return contextWindow - reservedTokensForSummary
|
||||
// Floor: effective context must be at least the summary reservation plus a
|
||||
// usable buffer. If it goes lower, the auto-compact threshold becomes
|
||||
// negative and fires on every message (issue #635).
|
||||
const autocompactBuffer = 13_000 // must match AUTOCOMPACT_BUFFER_TOKENS
|
||||
const effectiveContext = contextWindow - reservedTokensForSummary
|
||||
return Math.max(effectiveContext, reservedTokensForSummary + autocompactBuffer)
|
||||
}
|
||||
|
||||
export type AutoCompactTrackingState = {
|
||||
|
||||
@@ -206,9 +206,12 @@ export function isMcpSessionExpiredError(error: Error): boolean {
|
||||
}
|
||||
|
||||
/**
|
||||
* Default timeout for MCP tool calls (effectively infinite - ~27.8 hours).
|
||||
* Default timeout for MCP tool calls (5 minutes — reasonable for most tools).
|
||||
* Use MCP_TOOL_TIMEOUT env var to override per-server.
|
||||
* The previous default of ~27.8 hours effectively meant no timeout, causing
|
||||
* tools to hang indefinitely on unresponsive servers.
|
||||
*/
|
||||
const DEFAULT_MCP_TOOL_TIMEOUT_MS = 100_000_000
|
||||
const DEFAULT_MCP_TOOL_TIMEOUT_MS = 300_000
|
||||
|
||||
/**
|
||||
* Cap on MCP tool descriptions and server instructions sent to the model.
|
||||
@@ -1764,10 +1767,32 @@ export const fetchToolsForClient = memoizeWithLRU(
|
||||
return []
|
||||
}
|
||||
|
||||
const result = (await client.client.request(
|
||||
{ method: 'tools/list' },
|
||||
ListToolsResultSchema,
|
||||
)) as ListToolsResult
|
||||
// Retry tool list fetch up to 2 times on transient failures.
|
||||
// Without retry, a single timeout during tools/list makes all MCP tools
|
||||
// silently disappear from the model's context until the next reconnect.
|
||||
let result: ListToolsResult | undefined
|
||||
let lastError: unknown
|
||||
for (let attempt = 0; attempt < 3; attempt++) {
|
||||
try {
|
||||
result = (await client.client.request(
|
||||
{ method: 'tools/list' },
|
||||
ListToolsResultSchema,
|
||||
)) as ListToolsResult
|
||||
break
|
||||
} catch (err) {
|
||||
lastError = err
|
||||
if (attempt < 2) {
|
||||
logMCPDebug(
|
||||
client.name,
|
||||
`tools/list failed (attempt ${attempt + 1}/3): ${errorMessage(err)}. Retrying...`,
|
||||
)
|
||||
await sleep(1000 * (attempt + 1))
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!result) {
|
||||
throw lastError ?? new Error('tools/list failed after 3 attempts')
|
||||
}
|
||||
|
||||
// Sanitize tool data from MCP server
|
||||
const toolsToProcess = recursivelySanitizeUnicode(result.tools)
|
||||
@@ -2864,6 +2889,11 @@ export async function callMCPToolWithUrlElicitationRetry({
|
||||
}): Promise<MCPToolCallResult> {
|
||||
const MAX_URL_ELICITATION_RETRIES = 3
|
||||
for (let attempt = 0; ; attempt++) {
|
||||
// Check abort signal before each attempt — without this, a cancelled
|
||||
// elicitation retry loop continues spinning until MAX retries
|
||||
if (signal.aborted) {
|
||||
throw new Error('Tool call aborted during URL elicitation')
|
||||
}
|
||||
try {
|
||||
return await callToolFn({
|
||||
client: connectedClient,
|
||||
@@ -3156,9 +3186,12 @@ async function callMCPTool({
|
||||
errorDetails = String(result.error)
|
||||
}
|
||||
logMCPError(name, errorDetails)
|
||||
// Include server and tool name in telemetry for debugging, but keep
|
||||
// the human-readable message unchanged to avoid breaking error consumers
|
||||
// that parse the message string.
|
||||
throw new McpToolCallError_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS(
|
||||
errorDetails,
|
||||
'MCP tool returned error',
|
||||
`MCP tool [${name}] ${tool}: ${errorDetails}`,
|
||||
'_meta' in result && result._meta ? { _meta: result._meta } : undefined,
|
||||
)
|
||||
}
|
||||
|
||||
155
src/services/oauth/auth-code-listener.analytics.test.ts
Normal file
155
src/services/oauth/auth-code-listener.analytics.test.ts
Normal file
@@ -0,0 +1,155 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
test('custom error responses log the error redirect analytics event', async () => {
|
||||
const events: Array<{
|
||||
name: string
|
||||
metadata: Record<string, boolean | number | undefined>
|
||||
}> = []
|
||||
|
||||
mock.module('src/services/analytics/index.js', () => ({
|
||||
logEvent: (
|
||||
name: string,
|
||||
metadata: Record<string, boolean | number | undefined>,
|
||||
) => {
|
||||
events.push({ name, metadata })
|
||||
},
|
||||
}))
|
||||
|
||||
const { AuthCodeListener } = await import(
|
||||
`./auth-code-listener.js?ts=${Date.now()}-${Math.random()}`
|
||||
)
|
||||
const listener = new AuthCodeListener('/callback')
|
||||
const response = {
|
||||
writeHead: () => {},
|
||||
end: () => {},
|
||||
}
|
||||
|
||||
;(listener as any).pendingResponse = response
|
||||
|
||||
listener.handleErrorRedirect(res => {
|
||||
res.writeHead(400, {
|
||||
'Content-Type': 'text/plain; charset=utf-8',
|
||||
})
|
||||
res.end('cancelled')
|
||||
})
|
||||
|
||||
expect(events).toEqual([
|
||||
{
|
||||
name: 'tengu_oauth_automatic_redirect_error',
|
||||
metadata: { custom_handler: true },
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('custom handlers that do not end the response are closed automatically and still log analytics', async () => {
|
||||
const events: Array<{
|
||||
name: string
|
||||
metadata: Record<string, boolean | number | undefined>
|
||||
}> = []
|
||||
const response = {
|
||||
destroyed: false,
|
||||
headersSent: false,
|
||||
writableEnded: false,
|
||||
writeHead: () => {
|
||||
response.headersSent = true
|
||||
},
|
||||
end: () => {
|
||||
response.writableEnded = true
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('src/services/analytics/index.js', () => ({
|
||||
logEvent: (
|
||||
name: string,
|
||||
metadata: Record<string, boolean | number | undefined>,
|
||||
) => {
|
||||
events.push({ name, metadata })
|
||||
},
|
||||
}))
|
||||
|
||||
mock.module('../../utils/log.js', () => ({
|
||||
logError: () => {},
|
||||
}))
|
||||
|
||||
const { AuthCodeListener } = await import(
|
||||
`./auth-code-listener.js?ts=${Date.now()}-${Math.random()}`
|
||||
)
|
||||
const listener = new AuthCodeListener('/callback')
|
||||
|
||||
;(listener as any).pendingResponse = response
|
||||
|
||||
listener.handleErrorRedirect(res => {
|
||||
res.writeHead(400, {
|
||||
'Content-Type': 'text/plain; charset=utf-8',
|
||||
})
|
||||
})
|
||||
|
||||
expect(response.writableEnded).toBe(true)
|
||||
expect((listener as any).pendingResponse).toBeNull()
|
||||
expect(events).toEqual([
|
||||
{
|
||||
name: 'tengu_oauth_automatic_redirect_error',
|
||||
metadata: { custom_handler: true },
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('custom handlers that throw are logged, converted to a fallback response, and do not log analytics', async () => {
|
||||
const events: Array<{
|
||||
name: string
|
||||
metadata: Record<string, boolean | number | undefined>
|
||||
}> = []
|
||||
const loggedErrors: unknown[] = []
|
||||
const response = {
|
||||
destroyed: false,
|
||||
headersSent: false,
|
||||
writableEnded: false,
|
||||
statusCode: 0,
|
||||
body: '',
|
||||
writeHead: (statusCode: number) => {
|
||||
response.headersSent = true
|
||||
response.statusCode = statusCode
|
||||
},
|
||||
end: (body = '') => {
|
||||
response.writableEnded = true
|
||||
response.body = body
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('src/services/analytics/index.js', () => ({
|
||||
logEvent: (
|
||||
name: string,
|
||||
metadata: Record<string, boolean | number | undefined>,
|
||||
) => {
|
||||
events.push({ name, metadata })
|
||||
},
|
||||
}))
|
||||
|
||||
mock.module('../../utils/log.js', () => ({
|
||||
logError: (error: unknown) => {
|
||||
loggedErrors.push(error)
|
||||
},
|
||||
}))
|
||||
|
||||
const { AuthCodeListener } = await import(
|
||||
`./auth-code-listener.js?ts=${Date.now()}-${Math.random()}`
|
||||
)
|
||||
const listener = new AuthCodeListener('/callback')
|
||||
|
||||
;(listener as any).pendingResponse = response
|
||||
|
||||
listener.handleErrorRedirect(() => {
|
||||
throw new Error('handler exploded')
|
||||
})
|
||||
|
||||
expect(response.statusCode).toBe(500)
|
||||
expect(response.body).toBe('Authentication redirect failed')
|
||||
expect(response.writableEnded).toBe(true)
|
||||
expect((listener as any).pendingResponse).toBeNull()
|
||||
expect(loggedErrors).toHaveLength(1)
|
||||
expect(events).toEqual([])
|
||||
})
|
||||
31
src/services/oauth/auth-code-listener.test.ts
Normal file
31
src/services/oauth/auth-code-listener.test.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { AuthCodeListener } from './auth-code-listener.js'
|
||||
|
||||
const listeners: AuthCodeListener[] = []
|
||||
|
||||
afterEach(() => {
|
||||
while (listeners.length > 0) {
|
||||
listeners.pop()?.close()
|
||||
}
|
||||
})
|
||||
|
||||
test('cancelPendingAuthorization rejects a pending OAuth wait', async () => {
|
||||
const listener = new AuthCodeListener('/callback')
|
||||
listeners.push(listener)
|
||||
|
||||
await listener.start()
|
||||
|
||||
const pendingAuthorization = listener.waitForAuthorization(
|
||||
'state-test',
|
||||
async () => {},
|
||||
)
|
||||
|
||||
listener.cancelPendingAuthorization(
|
||||
new Error('Codex OAuth flow was cancelled.'),
|
||||
)
|
||||
|
||||
await expect(pendingAuthorization).rejects.toThrow(
|
||||
'Codex OAuth flow was cancelled.',
|
||||
)
|
||||
})
|
||||
@@ -71,6 +71,42 @@ export class AuthCodeListener {
|
||||
})
|
||||
}
|
||||
|
||||
private respondToPendingRequest(options: {
|
||||
handler: (res: ServerResponse) => void
|
||||
analyticsEvent:
|
||||
| 'tengu_oauth_automatic_redirect'
|
||||
| 'tengu_oauth_automatic_redirect_error'
|
||||
analyticsMetadata?: Record<string, boolean>
|
||||
}): void {
|
||||
if (!this.pendingResponse) return
|
||||
|
||||
const response = this.pendingResponse
|
||||
try {
|
||||
options.handler(response)
|
||||
|
||||
if (!response.writableEnded && !response.destroyed) {
|
||||
response.end()
|
||||
}
|
||||
|
||||
logEvent(options.analyticsEvent, options.analyticsMetadata ?? {})
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
|
||||
if (!response.headersSent && !response.destroyed) {
|
||||
response.writeHead(500, {
|
||||
'Content-Type': 'text/plain; charset=utf-8',
|
||||
})
|
||||
}
|
||||
if (!response.writableEnded && !response.destroyed) {
|
||||
response.end('Authentication redirect failed')
|
||||
}
|
||||
} finally {
|
||||
if (this.pendingResponse === response) {
|
||||
this.pendingResponse = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Completes the OAuth flow by redirecting the user's browser to a success page.
|
||||
* Different success pages are shown based on the granted scopes.
|
||||
@@ -85,9 +121,13 @@ export class AuthCodeListener {
|
||||
|
||||
// If custom handler provided, use it instead of default redirect
|
||||
if (customHandler) {
|
||||
customHandler(this.pendingResponse, scopes)
|
||||
this.pendingResponse = null
|
||||
logEvent('tengu_oauth_automatic_redirect', { custom_handler: true })
|
||||
this.respondToPendingRequest({
|
||||
handler: res => {
|
||||
customHandler(res, scopes)
|
||||
},
|
||||
analyticsEvent: 'tengu_oauth_automatic_redirect',
|
||||
analyticsMetadata: { custom_handler: true },
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -97,29 +137,48 @@ export class AuthCodeListener {
|
||||
: getOauthConfig().CONSOLE_SUCCESS_URL
|
||||
|
||||
// Send browser to success page
|
||||
this.pendingResponse.writeHead(302, { Location: successUrl })
|
||||
this.pendingResponse.end()
|
||||
this.pendingResponse = null
|
||||
|
||||
logEvent('tengu_oauth_automatic_redirect', {})
|
||||
this.respondToPendingRequest({
|
||||
handler: res => {
|
||||
res.writeHead(302, { Location: successUrl })
|
||||
res.end()
|
||||
},
|
||||
analyticsEvent: 'tengu_oauth_automatic_redirect',
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles error case by sending a redirect to the appropriate success page with an error indicator,
|
||||
* ensuring the browser flow is completed properly.
|
||||
*/
|
||||
handleErrorRedirect(): void {
|
||||
handleErrorRedirect(customHandler?: (res: ServerResponse) => void): void {
|
||||
if (!this.pendingResponse) return
|
||||
|
||||
if (customHandler) {
|
||||
this.respondToPendingRequest({
|
||||
handler: customHandler,
|
||||
analyticsEvent: 'tengu_oauth_automatic_redirect_error',
|
||||
analyticsMetadata: { custom_handler: true },
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: swap to a different url once we have an error page
|
||||
const errorUrl = getOauthConfig().CLAUDEAI_SUCCESS_URL
|
||||
|
||||
// Send browser to error page
|
||||
this.pendingResponse.writeHead(302, { Location: errorUrl })
|
||||
this.pendingResponse.end()
|
||||
this.pendingResponse = null
|
||||
this.respondToPendingRequest({
|
||||
handler: res => {
|
||||
res.writeHead(302, { Location: errorUrl })
|
||||
res.end()
|
||||
},
|
||||
analyticsEvent: 'tengu_oauth_automatic_redirect_error',
|
||||
})
|
||||
}
|
||||
|
||||
logEvent('tengu_oauth_automatic_redirect_error', {})
|
||||
cancelPendingAuthorization(
|
||||
error: Error = new Error('OAuth authorization was cancelled.'),
|
||||
): void {
|
||||
this.reject(error)
|
||||
this.close()
|
||||
}
|
||||
|
||||
private startLocalListener(onReady: () => Promise<void>): void {
|
||||
@@ -176,8 +235,7 @@ export class AuthCodeListener {
|
||||
|
||||
private handleError(err: Error): void {
|
||||
logError(err)
|
||||
this.close()
|
||||
this.reject(err)
|
||||
this.cancelPendingAuthorization(err)
|
||||
}
|
||||
|
||||
private resolve(authorizationCode: string): void {
|
||||
@@ -185,6 +243,7 @@ export class AuthCodeListener {
|
||||
this.promiseResolver(authorizationCode)
|
||||
this.promiseResolver = null
|
||||
this.promiseRejecter = null
|
||||
this.expectedState = null
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,6 +252,7 @@ export class AuthCodeListener {
|
||||
this.promiseRejecter(error)
|
||||
this.promiseResolver = null
|
||||
this.promiseRejecter = null
|
||||
this.expectedState = null
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,5 +267,8 @@ export class AuthCodeListener {
|
||||
this.localServer.removeAllListeners()
|
||||
this.localServer.close()
|
||||
}
|
||||
|
||||
this.expectedState = null
|
||||
this.port = 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,7 +109,6 @@ const externalTips: Tip[] = [
|
||||
`Use Plan Mode to prepare for a complex request before making changes. Press ${getShortcutDisplay('chat:cycleMode', 'Chat', 'shift+tab')} twice to enable.`,
|
||||
cooldownSessions: 5,
|
||||
isRelevant: async () => {
|
||||
if (process.env.USER_TYPE === 'ant') return false
|
||||
const config = getGlobalConfig()
|
||||
// Show to users who haven't used plan mode recently (7+ days)
|
||||
const daysSinceLastUse = config.lastPlanModeUse
|
||||
@@ -401,9 +400,7 @@ const externalTips: Tip[] = [
|
||||
{
|
||||
id: 'shift-tab',
|
||||
content: async () =>
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? `Hit ${getShortcutDisplay('chat:cycleMode', 'Chat', 'shift+tab')} to cycle between default mode and auto mode`
|
||||
: `Hit ${getShortcutDisplay('chat:cycleMode', 'Chat', 'shift+tab')} to cycle between default mode, auto-accept edit mode, and plan mode`,
|
||||
`Hit ${getShortcutDisplay('chat:cycleMode', 'Chat', 'shift+tab')} to cycle between default mode, auto-accept edit mode, and plan mode`,
|
||||
cooldownSessions: 10,
|
||||
isRelevant: async () => true,
|
||||
},
|
||||
@@ -476,7 +473,6 @@ const externalTips: Tip[] = [
|
||||
`Your default model setting is Opus Plan Mode. Press ${getShortcutDisplay('chat:cycleMode', 'Chat', 'shift+tab')} twice to activate Plan Mode and plan with Claude Opus.`,
|
||||
cooldownSessions: 2,
|
||||
async isRelevant() {
|
||||
if (process.env.USER_TYPE === 'ant') return false
|
||||
const config = getGlobalConfig()
|
||||
const modelSetting = getUserSpecifiedModelSetting()
|
||||
const hasOpusPlanMode = modelSetting === 'opusplan'
|
||||
@@ -624,33 +620,12 @@ const externalTips: Tip[] = [
|
||||
content: async () => 'Use /feedback to help us improve!',
|
||||
cooldownSessions: 15,
|
||||
async isRelevant() {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
return false
|
||||
}
|
||||
const config = getGlobalConfig()
|
||||
return config.numStartups > 5
|
||||
},
|
||||
},
|
||||
]
|
||||
const internalOnlyTips: Tip[] =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? [
|
||||
{
|
||||
id: 'important-claudemd',
|
||||
content: async () =>
|
||||
'[internal] Use "IMPORTANT:" prefix for must-follow CLAUDE.md rules',
|
||||
cooldownSessions: 30,
|
||||
isRelevant: async () => true,
|
||||
},
|
||||
{
|
||||
id: 'skillify',
|
||||
content: async () =>
|
||||
'[internal] Use /skillify to turn repeatable recurring workflows into reusable project skills',
|
||||
cooldownSessions: 15,
|
||||
isRelevant: async () => true,
|
||||
},
|
||||
]
|
||||
: []
|
||||
const internalOnlyTips: Tip[] = []
|
||||
|
||||
function getCustomTips(): Tip[] {
|
||||
const settings = getInitialSettings()
|
||||
|
||||
@@ -4,6 +4,7 @@ import { registerBatchSkill } from './batch.js'
|
||||
import { registerClaudeInChromeSkill } from './claudeInChrome.js'
|
||||
import { registerDebugSkill } from './debug.js'
|
||||
import { registerKeybindingsSkill } from './keybindings.js'
|
||||
import { registerLoopSkill } from './loop.js'
|
||||
import { registerSimplifySkill } from './simplify.js'
|
||||
import { registerUpdateConfigSkill } from './updateConfig.js'
|
||||
|
||||
@@ -34,15 +35,10 @@ export function initBundledSkills(): void {
|
||||
/* eslint-enable @typescript-eslint/no-require-imports */
|
||||
registerHunterSkill()
|
||||
}
|
||||
if (feature('AGENT_TRIGGERS')) {
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const { registerLoopSkill } = require('./loop.js')
|
||||
/* eslint-enable @typescript-eslint/no-require-imports */
|
||||
// /loop's isEnabled delegates to isKairosCronEnabled() — same lazy
|
||||
// per-invocation pattern as the cron tools. Registered unconditionally;
|
||||
// the skill's own isEnabled callback decides visibility.
|
||||
registerLoopSkill()
|
||||
}
|
||||
// /loop's isEnabled delegates to isKairosCronEnabled() — registered
|
||||
// unconditionally so the static import is bundled; visibility is gated
|
||||
// at runtime by the isEnabled callback.
|
||||
registerLoopSkill()
|
||||
if (feature('AGENT_TRIGGERS_REMOTE')) {
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const {
|
||||
|
||||
125
src/skills/bundled/loop.test.ts
Normal file
125
src/skills/bundled/loop.test.ts
Normal file
@@ -0,0 +1,125 @@
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { clearBundledSkills, getBundledSkills } from '../bundledSkills.js'
|
||||
import { registerLoopSkill } from './loop.js'
|
||||
|
||||
afterEach(() => {
|
||||
clearBundledSkills()
|
||||
})
|
||||
|
||||
test('bare /loop returns dynamic maintenance instructions', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
expect(skill).toBeDefined()
|
||||
expect(skill?.type).toBe('prompt')
|
||||
|
||||
const blocks = await skill!.getPromptForCommand('', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('# /loop — dynamic rescheduling')
|
||||
expect(text).toContain('If .claude/loop.md exists, read it and use it.')
|
||||
expect(text).toContain('continue any unfinished work from the conversation')
|
||||
expect(text).toContain('Set the scheduled prompt to this exact text so the next iteration stays in dynamic mode:')
|
||||
expect(text).toContain('/loop')
|
||||
})
|
||||
|
||||
test('prompt-only /loop returns dynamic rescheduling instructions', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
const blocks = await skill!.getPromptForCommand('check the deploy', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('# /loop — dynamic rescheduling')
|
||||
expect(text).toContain('check the deploy')
|
||||
expect(text).toContain('choose the next delay dynamically between 1 minute and 1 hour')
|
||||
expect(text).toContain('/loop check the deploy')
|
||||
})
|
||||
|
||||
test('interval /loop returns fixed recurring instructions', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
const blocks = await skill!.getPromptForCommand('5m check the deploy', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('# /loop — fixed recurring interval')
|
||||
expect(text).toContain('Requested interval:')
|
||||
expect(text).toContain('5m')
|
||||
expect(text).toContain('Call CronCreate')
|
||||
expect(text).toContain('recurring: true')
|
||||
expect(text).toContain('Immediately execute the effective prompt now')
|
||||
})
|
||||
|
||||
test('interval-only /loop becomes fixed maintenance mode', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
const blocks = await skill!.getPromptForCommand('15m', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('# /loop — fixed recurring interval')
|
||||
expect(text).toContain('15m')
|
||||
expect(text).toContain('This is a maintenance loop with no explicit prompt.')
|
||||
expect(text).toContain('Scheduled maintenance loop iteration.')
|
||||
})
|
||||
|
||||
test('trailing every clause parses interval and prompt', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
const blocks = await skill!.getPromptForCommand('check the deploy every 20m', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('# /loop — fixed recurring interval')
|
||||
expect(text).toContain('20m')
|
||||
expect(text).toContain('check the deploy')
|
||||
})
|
||||
|
||||
test('trailing every clause with word unit parses correctly', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
const blocks = await skill!.getPromptForCommand('run tests every 5 minutes', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('# /loop — fixed recurring interval')
|
||||
expect(text).toContain('5m')
|
||||
expect(text).toContain('run tests')
|
||||
})
|
||||
|
||||
test('"check every PR" is not treated as an interval', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
const blocks = await skill!.getPromptForCommand('check every PR', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('# /loop — dynamic rescheduling')
|
||||
expect(text).toContain('check every PR')
|
||||
})
|
||||
|
||||
test('human-readable hour unit parses correctly', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
const blocks = await skill!.getPromptForCommand('2h check logs', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('# /loop — fixed recurring interval')
|
||||
expect(text).toContain('2h')
|
||||
expect(text).toContain('check logs')
|
||||
})
|
||||
|
||||
test('prompt delimiters are present and unambiguous', async () => {
|
||||
registerLoopSkill()
|
||||
|
||||
const skill = getBundledSkills().find(command => command.name === 'loop')
|
||||
const blocks = await skill!.getPromptForCommand('5m say hi', {} as never)
|
||||
const text = (blocks[0] as { text: string }).text
|
||||
|
||||
expect(text).toContain('--- BEGIN PROMPT ---')
|
||||
expect(text).toContain('say hi')
|
||||
expect(text).toContain('--- END PROMPT ---')
|
||||
})
|
||||
@@ -6,87 +6,218 @@ import {
|
||||
} from '../../tools/ScheduleCronTool/prompt.js'
|
||||
import { registerBundledSkill } from '../bundledSkills.js'
|
||||
|
||||
const DEFAULT_INTERVAL = '10m'
|
||||
type LoopMode =
|
||||
| 'dynamic-prompt'
|
||||
| 'dynamic-maintenance'
|
||||
| 'fixed-prompt'
|
||||
| 'fixed-maintenance'
|
||||
|
||||
const USAGE_MESSAGE = `Usage: /loop [interval] <prompt>
|
||||
type ParsedLoopArgs = {
|
||||
mode: LoopMode
|
||||
interval?: string
|
||||
prompt?: string
|
||||
}
|
||||
|
||||
Run a prompt or slash command on a recurring interval.
|
||||
const DYNAMIC_MIN_DELAY = '1 minute'
|
||||
const DYNAMIC_MAX_DELAY = '1 hour'
|
||||
|
||||
Intervals: Ns, Nm, Nh, Nd (e.g. 5m, 30m, 2h, 1d). Minimum granularity is 1 minute.
|
||||
If no interval is specified, defaults to ${DEFAULT_INTERVAL}.
|
||||
const MAINTENANCE_PROMPT = `Scheduled maintenance loop iteration.
|
||||
|
||||
Examples:
|
||||
/loop 5m /babysit-prs
|
||||
/loop 30m check the deploy
|
||||
/loop 1h /standup 1
|
||||
/loop check the deploy (defaults to ${DEFAULT_INTERVAL})
|
||||
/loop check the deploy every 20m`
|
||||
If .claude/loop.md exists, read it and follow it.
|
||||
Otherwise, if ~/.claude/loop.md exists, read it and follow it.
|
||||
Otherwise:
|
||||
- continue any unfinished work from the conversation
|
||||
- tend to the current branch's pull request: review comments, failed CI runs, merge conflicts
|
||||
- run cleanup passes such as bug hunts or simplification when nothing else is pending
|
||||
|
||||
function buildPrompt(args: string): string {
|
||||
return `# /loop — schedule a recurring prompt
|
||||
Do not start new initiatives outside that scope.
|
||||
Irreversible actions such as pushing or deleting only proceed when they continue something the transcript already authorized.`
|
||||
|
||||
Parse the input below into \`[interval] <prompt…>\` and schedule it with ${CRON_CREATE_TOOL_NAME}.
|
||||
function normalizeIntervalUnit(rawUnit: string): 's' | 'm' | 'h' | 'd' | null {
|
||||
const unit = rawUnit.toLowerCase()
|
||||
if (['s', 'sec', 'secs', 'second', 'seconds'].includes(unit)) return 's'
|
||||
if (['m', 'min', 'mins', 'minute', 'minutes'].includes(unit)) return 'm'
|
||||
if (['h', 'hr', 'hrs', 'hour', 'hours'].includes(unit)) return 'h'
|
||||
if (['d', 'day', 'days'].includes(unit)) return 'd'
|
||||
return null
|
||||
}
|
||||
|
||||
## Parsing (in priority order)
|
||||
function parseIntervalToken(token: string): string | null {
|
||||
const match = token.trim().match(/^(\d+)\s*([a-zA-Z]+)$/)
|
||||
if (!match) return null
|
||||
const value = Number.parseInt(match[1]!, 10)
|
||||
if (!Number.isFinite(value) || value < 1) return null
|
||||
const unit = normalizeIntervalUnit(match[2]!)
|
||||
if (!unit) return null
|
||||
return `${value}${unit}`
|
||||
}
|
||||
|
||||
1. **Leading token**: if the first whitespace-delimited token matches \`^\\d+[smhd]$\` (e.g. \`5m\`, \`2h\`), that's the interval; the rest is the prompt.
|
||||
2. **Trailing "every" clause**: otherwise, if the input ends with \`every <N><unit>\` or \`every <N> <unit-word>\` (e.g. \`every 20m\`, \`every 5 minutes\`, \`every 2 hours\`), extract that as the interval and strip it from the prompt. Only match when what follows "every" is a time expression — \`check every PR\` has no interval.
|
||||
3. **Default**: otherwise, interval is \`${DEFAULT_INTERVAL}\` and the entire input is the prompt.
|
||||
function parseTrailingEveryClause(input: string): {
|
||||
prompt: string
|
||||
interval: string
|
||||
} | null {
|
||||
const match = input.match(/^(.*?)(?:\s+every\s+)(\d+)\s*([a-zA-Z]+)\s*$/i)
|
||||
if (!match) return null
|
||||
const interval = parseIntervalToken(`${match[2]!}${match[3]!}`)
|
||||
if (!interval) return null
|
||||
return {
|
||||
prompt: match[1]!.trim(),
|
||||
interval,
|
||||
}
|
||||
}
|
||||
|
||||
If the resulting prompt is empty, show usage \`/loop [interval] <prompt>\` and stop — do not call ${CRON_CREATE_TOOL_NAME}.
|
||||
function parseLoopArgs(args: string): ParsedLoopArgs {
|
||||
const trimmed = args.trim()
|
||||
if (!trimmed) return { mode: 'dynamic-maintenance' }
|
||||
|
||||
Examples:
|
||||
- \`5m /babysit-prs\` → interval \`5m\`, prompt \`/babysit-prs\` (rule 1)
|
||||
- \`check the deploy every 20m\` → interval \`20m\`, prompt \`check the deploy\` (rule 2)
|
||||
- \`run tests every 5 minutes\` → interval \`5m\`, prompt \`run tests\` (rule 2)
|
||||
- \`check the deploy\` → interval \`${DEFAULT_INTERVAL}\`, prompt \`check the deploy\` (rule 3)
|
||||
- \`check every PR\` → interval \`${DEFAULT_INTERVAL}\`, prompt \`check every PR\` (rule 3 — "every" not followed by time)
|
||||
- \`5m\` → empty prompt → show usage
|
||||
const bareInterval = parseIntervalToken(trimmed)
|
||||
if (bareInterval) {
|
||||
return { mode: 'fixed-maintenance', interval: bareInterval }
|
||||
}
|
||||
|
||||
## Interval → cron
|
||||
const [firstToken, ...restTokens] = trimmed.split(/\s+/)
|
||||
const leadingInterval = parseIntervalToken(firstToken ?? '')
|
||||
if (leadingInterval) {
|
||||
const prompt = restTokens.join(' ').trim()
|
||||
if (!prompt) return { mode: 'fixed-maintenance', interval: leadingInterval }
|
||||
return {
|
||||
mode: 'fixed-prompt',
|
||||
interval: leadingInterval,
|
||||
prompt,
|
||||
}
|
||||
}
|
||||
|
||||
Supported suffixes: \`s\` (seconds, rounded up to nearest minute, min 1), \`m\` (minutes), \`h\` (hours), \`d\` (days). Convert:
|
||||
const trailingEvery = parseTrailingEveryClause(trimmed)
|
||||
if (trailingEvery) {
|
||||
if (!trailingEvery.prompt) {
|
||||
return {
|
||||
mode: 'fixed-maintenance',
|
||||
interval: trailingEvery.interval,
|
||||
}
|
||||
}
|
||||
return {
|
||||
mode: 'fixed-prompt',
|
||||
interval: trailingEvery.interval,
|
||||
prompt: trailingEvery.prompt,
|
||||
}
|
||||
}
|
||||
|
||||
| Interval pattern | Cron expression | Notes |
|
||||
|-----------------------|---------------------|------------------------------------------|
|
||||
| \`Nm\` where N ≤ 59 | \`*/N * * * *\` | every N minutes |
|
||||
| \`Nm\` where N ≥ 60 | \`0 */H * * *\` | round to hours (H = N/60, must divide 24)|
|
||||
| \`Nh\` where N ≤ 23 | \`0 */N * * *\` | every N hours |
|
||||
| \`Nd\` | \`0 0 */N * *\` | every N days at midnight local |
|
||||
| \`Ns\` | treat as \`ceil(N/60)m\` | cron minimum granularity is 1 minute |
|
||||
return {
|
||||
mode: 'dynamic-prompt',
|
||||
prompt: trimmed,
|
||||
}
|
||||
}
|
||||
|
||||
**If the interval doesn't cleanly divide its unit** (e.g. \`7m\` → \`*/7 * * * *\` gives uneven gaps at :56→:00; \`90m\` → 1.5h which cron can't express), pick the nearest clean interval and tell the user what you rounded to before scheduling.
|
||||
function buildFixedPrompt(parsed: ParsedLoopArgs): string {
|
||||
const targetInstructions = parsed.prompt
|
||||
? `Use this prompt verbatim for both the immediate run and the recurring scheduled task:
|
||||
|
||||
## Action
|
||||
--- BEGIN PROMPT ---
|
||||
${parsed.prompt}
|
||||
--- END PROMPT ---
|
||||
`
|
||||
: `This is a maintenance loop with no explicit prompt.
|
||||
|
||||
1. Call ${CRON_CREATE_TOOL_NAME} with:
|
||||
- \`cron\`: the expression from the table above
|
||||
- \`prompt\`: the parsed prompt from above, verbatim (slash commands are passed through unchanged)
|
||||
- \`recurring\`: \`true\`
|
||||
2. Briefly confirm: what's scheduled, the cron expression, the human-readable cadence, that recurring tasks auto-expire after ${DEFAULT_MAX_AGE_DAYS} days, and that they can cancel sooner with ${CRON_DELETE_TOOL_NAME} (include the job ID).
|
||||
3. **Then immediately execute the parsed prompt now** — don't wait for the first cron fire. If it's a slash command, invoke it via the Skill tool; otherwise act on it directly.
|
||||
For the recurring scheduled task, use this exact maintenance prompt body:
|
||||
|
||||
## Input
|
||||
--- BEGIN MAINTENANCE PROMPT ---
|
||||
${MAINTENANCE_PROMPT}
|
||||
--- END MAINTENANCE PROMPT ---
|
||||
`
|
||||
|
||||
${args}`
|
||||
return `# /loop — fixed recurring interval
|
||||
|
||||
The user invoked /loop with a fixed interval.
|
||||
|
||||
Requested interval: ${parsed.interval}
|
||||
|
||||
${targetInstructions}
|
||||
## Instructions
|
||||
|
||||
1. Convert the requested interval to a recurring cron expression.
|
||||
- Supported suffixes: s, m, h, d.
|
||||
- Seconds must be rounded up to the nearest minute because cron has minute granularity.
|
||||
- If the requested interval does not map cleanly to cron cadence, choose the nearest clean recurring interval and tell the user what you picked.
|
||||
2. Call ${CRON_CREATE_TOOL_NAME} with:
|
||||
- the recurring cron expression
|
||||
- the effective prompt body above
|
||||
- recurring: true
|
||||
- durable: false
|
||||
3. Briefly confirm what was scheduled, the cron expression, the human cadence, that recurring tasks auto-expire after ${DEFAULT_MAX_AGE_DAYS} days, and that the user can cancel sooner with ${CRON_DELETE_TOOL_NAME} using the returned job ID.
|
||||
4. Immediately execute the effective prompt now — do not wait for the first cron fire.
|
||||
- If the effective prompt starts with a slash command, invoke it via the Skill tool.
|
||||
- Otherwise, act on it directly.
|
||||
`
|
||||
}
|
||||
|
||||
function buildDynamicPrompt(parsed: ParsedLoopArgs): string {
|
||||
const effectivePromptInstructions = parsed.prompt
|
||||
? `Use this prompt verbatim as the effective prompt for this iteration:
|
||||
|
||||
--- BEGIN PROMPT ---
|
||||
${parsed.prompt}
|
||||
--- END PROMPT ---
|
||||
`
|
||||
: `This is a maintenance loop with no explicit prompt.
|
||||
|
||||
Determine the effective prompt in this order:
|
||||
1. If .claude/loop.md exists, read it and use it.
|
||||
2. Otherwise, if ~/.claude/loop.md exists, read it and use it.
|
||||
3. Otherwise, use this built-in maintenance prompt:
|
||||
|
||||
--- BEGIN MAINTENANCE PROMPT ---
|
||||
${MAINTENANCE_PROMPT}
|
||||
--- END MAINTENANCE PROMPT ---
|
||||
`
|
||||
|
||||
const reschedulePrompt = parsed.prompt ? `/loop ${parsed.prompt}` : '/loop'
|
||||
|
||||
return `# /loop — dynamic rescheduling
|
||||
|
||||
The user invoked /loop without a fixed interval.
|
||||
|
||||
${effectivePromptInstructions}
|
||||
## Instructions
|
||||
|
||||
1. Execute the effective prompt now.
|
||||
- If it starts with a slash command, invoke it via the Skill tool.
|
||||
- Otherwise, act on it directly.
|
||||
2. After the work finishes, choose the next delay dynamically between ${DYNAMIC_MIN_DELAY} and ${DYNAMIC_MAX_DELAY}.
|
||||
- Use shorter delays while active work is progressing or likely to change soon.
|
||||
- Use longer delays when the situation is quiet or stable.
|
||||
3. Briefly tell the user the chosen delay and the reason.
|
||||
4. Schedule exactly one session-only follow-up run with ${CRON_CREATE_TOOL_NAME}.
|
||||
- Use recurring: false.
|
||||
- Use durable: false.
|
||||
- Pin the cron expression to a specific future local-time minute that matches the chosen delay.
|
||||
- Set the scheduled prompt to this exact text so the next iteration stays in dynamic mode:
|
||||
|
||||
--- BEGIN SCHEDULED PROMPT ---
|
||||
${reschedulePrompt}
|
||||
--- END SCHEDULED PROMPT ---
|
||||
|
||||
5. Confirm the next run time and the returned job ID.
|
||||
6. Do not create a recurring cron for this mode.
|
||||
`
|
||||
}
|
||||
|
||||
export function registerLoopSkill(): void {
|
||||
registerBundledSkill({
|
||||
name: 'loop',
|
||||
description:
|
||||
'Run a prompt or slash command on a recurring interval (e.g. /loop 5m /foo, defaults to 10m)',
|
||||
'Run a prompt on a fixed interval or dynamically reschedule it, including bare maintenance-mode loops.',
|
||||
whenToUse:
|
||||
'When the user wants to set up a recurring task, poll for status, or run something repeatedly on an interval (e.g. "check the deploy every 5 minutes", "keep running /babysit-prs"). Do NOT invoke for one-off tasks.',
|
||||
argumentHint: '[interval] <prompt>',
|
||||
'When the user wants to poll for status, babysit a workflow, run recurring maintenance, or keep re-running a prompt within the current session.',
|
||||
argumentHint: '[interval] [prompt]',
|
||||
userInvocable: true,
|
||||
isEnabled: isKairosCronEnabled,
|
||||
async getPromptForCommand(args) {
|
||||
const trimmed = args.trim()
|
||||
if (!trimmed) {
|
||||
return [{ type: 'text', text: USAGE_MESSAGE }]
|
||||
}
|
||||
return [{ type: 'text', text: buildPrompt(trimmed) }]
|
||||
const parsed = parseLoopArgs(args)
|
||||
const text =
|
||||
parsed.mode === 'fixed-prompt' || parsed.mode === 'fixed-maintenance'
|
||||
? buildFixedPrompt(parsed)
|
||||
: buildDynamicPrompt(parsed)
|
||||
return [{ type: 'text', text }]
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
102
src/tasks/MonitorMcpTask/MonitorMcpTask.ts
Normal file
102
src/tasks/MonitorMcpTask/MonitorMcpTask.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
// MonitorMcpTask — task registry entry for the 'monitor_mcp' type.
|
||||
//
|
||||
// Architecture: MonitorTool spawns shell processes as LocalShellTask
|
||||
// (type: 'local_bash', kind: 'monitor'). The 'monitor_mcp' type exists
|
||||
// in TaskType for forward-compatibility with MCP-based monitoring (not
|
||||
// yet implemented). This module satisfies the import from tasks.ts and
|
||||
// provides killMonitorMcpTasksForAgent for agent-scoped cleanup of
|
||||
// monitor-kind shell tasks.
|
||||
|
||||
import type { AppState } from '../../state/AppState.js'
|
||||
import type { SetAppState, Task, TaskStateBase } from '../../Task.js'
|
||||
import type { AgentId } from '../../types/ids.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { dequeueAllMatching } from '../../utils/messageQueueManager.js'
|
||||
import { evictTaskOutput } from '../../utils/task/diskOutput.js'
|
||||
import { updateTaskState } from '../../utils/task/framework.js'
|
||||
import { isLocalShellTask } from '../LocalShellTask/guards.js'
|
||||
import { killTask } from '../LocalShellTask/killShellTasks.js'
|
||||
|
||||
export type MonitorMcpTaskState = TaskStateBase & {
|
||||
type: 'monitor_mcp'
|
||||
agentId?: AgentId
|
||||
}
|
||||
|
||||
function isMonitorMcpTask(task: unknown): task is MonitorMcpTaskState {
|
||||
return (
|
||||
typeof task === 'object' &&
|
||||
task !== null &&
|
||||
'type' in task &&
|
||||
task.type === 'monitor_mcp'
|
||||
)
|
||||
}
|
||||
|
||||
export const MonitorMcpTask: Task = {
|
||||
name: 'MonitorMcpTask',
|
||||
type: 'monitor_mcp',
|
||||
async kill(taskId, setAppState) {
|
||||
updateTaskState<MonitorMcpTaskState>(taskId, setAppState, task => {
|
||||
if (task.status !== 'running') {
|
||||
return task
|
||||
}
|
||||
|
||||
return {
|
||||
...task,
|
||||
status: 'killed',
|
||||
notified: true,
|
||||
endTime: Date.now(),
|
||||
}
|
||||
})
|
||||
void evictTaskOutput(taskId)
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Kill all monitor tasks owned by a given agent.
|
||||
*
|
||||
* MonitorTool spawns tasks as local_bash with kind='monitor'. When an agent
|
||||
* exits, killShellTasksForAgent already handles those. This function provides
|
||||
* additional cleanup for any monitor_mcp-typed tasks and also kills any
|
||||
* local_bash tasks with kind='monitor' that might have been missed (belt and
|
||||
* suspenders). Finally, it purges queued notifications for the dead agent.
|
||||
*/
|
||||
export function killMonitorMcpTasksForAgent(
|
||||
agentId: AgentId,
|
||||
getAppState: () => AppState,
|
||||
setAppState: SetAppState,
|
||||
): void {
|
||||
const tasks = getAppState().tasks ?? {}
|
||||
|
||||
for (const [taskId, task] of Object.entries(tasks)) {
|
||||
// Kill monitor_mcp tasks for this agent
|
||||
if (
|
||||
isMonitorMcpTask(task) &&
|
||||
task.agentId === agentId &&
|
||||
task.status === 'running'
|
||||
) {
|
||||
logForDebugging(
|
||||
`killMonitorMcpTasksForAgent: killing monitor_mcp task ${taskId} (agent ${agentId} exiting)`,
|
||||
)
|
||||
void MonitorMcpTask.kill(taskId, setAppState)
|
||||
}
|
||||
|
||||
// Also kill local_bash tasks with kind='monitor' for this agent
|
||||
// (killShellTasksForAgent already does this, but being explicit
|
||||
// guards against ordering issues)
|
||||
if (
|
||||
isLocalShellTask(task) &&
|
||||
task.kind === 'monitor' &&
|
||||
task.agentId === agentId &&
|
||||
task.status === 'running'
|
||||
) {
|
||||
logForDebugging(
|
||||
`killMonitorMcpTasksForAgent: killing monitor shell task ${taskId} (agent ${agentId} exiting)`,
|
||||
)
|
||||
killTask(taskId, setAppState)
|
||||
}
|
||||
}
|
||||
|
||||
// Purge any queued notifications addressed to this agent — its query loop
|
||||
// has exited and won't drain them.
|
||||
dequeueAllMatching(cmd => cmd.agentId === agentId)
|
||||
}
|
||||
29
src/tools.ts
29
src/tools.ts
@@ -12,27 +12,18 @@ import { WebFetchTool } from './tools/WebFetchTool/WebFetchTool.js'
|
||||
import { TaskStopTool } from './tools/TaskStopTool/TaskStopTool.js'
|
||||
import { BriefTool } from './tools/BriefTool/BriefTool.js'
|
||||
// Dead code elimination: conditional import for internal-only tools
|
||||
/* eslint-disable custom-rules/no-process-env-top-level, @typescript-eslint/no-require-imports */
|
||||
const REPLTool =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? require('./tools/REPLTool/REPLTool.js').REPLTool
|
||||
: null
|
||||
const SuggestBackgroundPRTool =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? require('./tools/SuggestBackgroundPRTool/SuggestBackgroundPRTool.js')
|
||||
.SuggestBackgroundPRTool
|
||||
: null
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const REPLTool = null
|
||||
const SuggestBackgroundPRTool = null
|
||||
const SleepTool =
|
||||
feature('PROACTIVE') || feature('KAIROS')
|
||||
? require('./tools/SleepTool/SleepTool.js').SleepTool
|
||||
: null
|
||||
const cronTools = feature('AGENT_TRIGGERS')
|
||||
? [
|
||||
require('./tools/ScheduleCronTool/CronCreateTool.js').CronCreateTool,
|
||||
require('./tools/ScheduleCronTool/CronDeleteTool.js').CronDeleteTool,
|
||||
require('./tools/ScheduleCronTool/CronListTool.js').CronListTool,
|
||||
]
|
||||
: []
|
||||
const cronTools = [
|
||||
require('./tools/ScheduleCronTool/CronCreateTool.js').CronCreateTool,
|
||||
require('./tools/ScheduleCronTool/CronDeleteTool.js').CronDeleteTool,
|
||||
require('./tools/ScheduleCronTool/CronListTool.js').CronListTool,
|
||||
]
|
||||
const RemoteTriggerTool = feature('AGENT_TRIGGERS_REMOTE')
|
||||
? require('./tools/RemoteTriggerTool/RemoteTriggerTool.js').RemoteTriggerTool
|
||||
: null
|
||||
@@ -57,7 +48,6 @@ import { TodoWriteTool } from './tools/TodoWriteTool/TodoWriteTool.js'
|
||||
import { ExitPlanModeV2Tool } from './tools/ExitPlanModeTool/ExitPlanModeV2Tool.js'
|
||||
import { TestingPermissionTool } from './tools/testing/TestingPermissionTool.js'
|
||||
import { GrepTool } from './tools/GrepTool/GrepTool.js'
|
||||
import { TungstenTool } from './tools/TungstenTool/TungstenTool.js'
|
||||
// Lazy require to break circular dependency: tools.ts -> TeamCreateTool/TeamDeleteTool -> ... -> tools.ts
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const getTeamCreateTool = () =>
|
||||
@@ -78,7 +68,6 @@ import { ToolSearchTool } from './tools/ToolSearchTool/ToolSearchTool.js'
|
||||
import { EnterPlanModeTool } from './tools/EnterPlanModeTool/EnterPlanModeTool.js'
|
||||
import { EnterWorktreeTool } from './tools/EnterWorktreeTool/EnterWorktreeTool.js'
|
||||
import { ExitWorktreeTool } from './tools/ExitWorktreeTool/ExitWorktreeTool.js'
|
||||
import { ConfigTool } from './tools/ConfigTool/ConfigTool.js'
|
||||
import { TaskCreateTool } from './tools/TaskCreateTool/TaskCreateTool.js'
|
||||
import { TaskGetTool } from './tools/TaskGetTool/TaskGetTool.js'
|
||||
import { TaskUpdateTool } from './tools/TaskUpdateTool/TaskUpdateTool.js'
|
||||
@@ -211,8 +200,6 @@ export function getAllBaseTools(): Tools {
|
||||
AskUserQuestionTool,
|
||||
SkillTool,
|
||||
EnterPlanModeTool,
|
||||
...(process.env.USER_TYPE === 'ant' ? [ConfigTool] : []),
|
||||
...(process.env.USER_TYPE === 'ant' ? [TungstenTool] : []),
|
||||
...(SuggestBackgroundPRTool ? [SuggestBackgroundPRTool] : []),
|
||||
...(WebBrowserTool ? [WebBrowserTool] : []),
|
||||
...(isTodoV2Enabled()
|
||||
|
||||
@@ -1042,10 +1042,12 @@ export const AgentTool = buildTool({
|
||||
});
|
||||
} finally {
|
||||
stopBackgroundedSummarization?.();
|
||||
clearInvokedSkillsForAgent(syncAgentId);
|
||||
clearDumpState(syncAgentId);
|
||||
// Note: worktree cleanup is done before enqueueAgentNotification
|
||||
// in both try and catch paths so we can include worktree info
|
||||
// Defensive cleanup: wrap each call so one failure doesn't
|
||||
// prevent the other from running. Without this, if
|
||||
// clearInvokedSkillsForAgent throws, clearDumpState is
|
||||
// skipped and dump state leaks.
|
||||
try { clearInvokedSkillsForAgent(syncAgentId); } catch { /* cleanup best-effort */ }
|
||||
try { clearDumpState(syncAgentId); } catch { /* cleanup best-effort */ }
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -73,9 +73,8 @@ export const EXPLORE_AGENT: BuiltInAgentDefinition = {
|
||||
],
|
||||
source: 'built-in',
|
||||
baseDir: 'built-in',
|
||||
// Ants get inherit to use the main agent's model; external users get haiku for speed
|
||||
// Note: For ants, getAgentModel() checks tengu_explore_agent GrowthBook flag at runtime
|
||||
model: process.env.USER_TYPE === 'ant' ? 'inherit' : 'haiku',
|
||||
// Use haiku for speed — explore is a fast read-only search agent
|
||||
model: 'haiku',
|
||||
// Explore is a fast read-only search agent — it doesn't need commit/PR/lint
|
||||
// rules from CLAUDE.md. The main agent has full context and interprets results.
|
||||
omitClaudeMd: true,
|
||||
|
||||
@@ -15,7 +15,6 @@ import {
|
||||
DOCKER_READ_ONLY_COMMANDS,
|
||||
EXTERNAL_READONLY_COMMANDS,
|
||||
type FlagArgType,
|
||||
GH_READ_ONLY_COMMANDS,
|
||||
GIT_READ_ONLY_COMMANDS,
|
||||
PYRIGHT_READ_ONLY_COMMANDS,
|
||||
RIPGREP_READ_ONLY_COMMANDS,
|
||||
@@ -1136,68 +1135,6 @@ const COMMAND_ALLOWLIST: Record<string, CommandConfig> = {
|
||||
...DOCKER_READ_ONLY_COMMANDS,
|
||||
}
|
||||
|
||||
// gh commands are internal-only since they make network requests, which goes against
|
||||
// the read-only validation principle of no network access
|
||||
const ANT_ONLY_COMMAND_ALLOWLIST: Record<string, CommandConfig> = {
|
||||
// All gh read-only commands from shared validation map
|
||||
...GH_READ_ONLY_COMMANDS,
|
||||
// aki — internal knowledge-base search CLI.
|
||||
// Network read-only (same policy as gh). --audit-csv omitted: writes to disk.
|
||||
aki: {
|
||||
safeFlags: {
|
||||
'-h': 'none',
|
||||
'--help': 'none',
|
||||
'-k': 'none',
|
||||
'--keyword': 'none',
|
||||
'-s': 'none',
|
||||
'--semantic': 'none',
|
||||
'--no-adaptive': 'none',
|
||||
'-n': 'number',
|
||||
'--limit': 'number',
|
||||
'-o': 'number',
|
||||
'--offset': 'number',
|
||||
'--source': 'string',
|
||||
'--exclude-source': 'string',
|
||||
'-a': 'string',
|
||||
'--after': 'string',
|
||||
'-b': 'string',
|
||||
'--before': 'string',
|
||||
'--collection': 'string',
|
||||
'--drive': 'string',
|
||||
'--folder': 'string',
|
||||
'--descendants': 'none',
|
||||
'-m': 'string',
|
||||
'--meta': 'string',
|
||||
'-t': 'string',
|
||||
'--threshold': 'string',
|
||||
'--kw-weight': 'string',
|
||||
'--sem-weight': 'string',
|
||||
'-j': 'none',
|
||||
'--json': 'none',
|
||||
'-c': 'none',
|
||||
'--chunk': 'none',
|
||||
'--preview': 'none',
|
||||
'-d': 'none',
|
||||
'--full-doc': 'none',
|
||||
'-v': 'none',
|
||||
'--verbose': 'none',
|
||||
'--stats': 'none',
|
||||
'-S': 'number',
|
||||
'--summarize': 'number',
|
||||
'--explain': 'none',
|
||||
'--examine': 'string',
|
||||
'--url': 'string',
|
||||
'--multi-turn': 'number',
|
||||
'--multi-turn-model': 'string',
|
||||
'--multi-turn-context': 'string',
|
||||
'--no-rerank': 'none',
|
||||
'--audit': 'none',
|
||||
'--local': 'none',
|
||||
'--staging': 'none',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
function getCommandAllowlist(): Record<string, CommandConfig> {
|
||||
let allowlist: Record<string, CommandConfig> = COMMAND_ALLOWLIST
|
||||
// On Windows, xargs can be used as a data-to-code bridge: if a file contains
|
||||
@@ -1208,9 +1145,6 @@ function getCommandAllowlist(): Record<string, CommandConfig> {
|
||||
const { xargs: _, ...rest } = allowlist
|
||||
allowlist = rest
|
||||
}
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
return { ...allowlist, ...ANT_ONLY_COMMAND_ALLOWLIST }
|
||||
}
|
||||
return allowlist
|
||||
}
|
||||
|
||||
|
||||
@@ -19,34 +19,43 @@ type SandboxInput = {
|
||||
// It is not a security bug to be able to bypass excludedCommands — the sandbox permission
|
||||
// system (which prompts users) is the actual security control.
|
||||
function containsExcludedCommand(command: string): boolean {
|
||||
// Check dynamic config for disabled commands and substrings (only for ants)
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
const disabledCommands = getFeatureValue_CACHED_MAY_BE_STALE<{
|
||||
commands: string[]
|
||||
substrings: string[]
|
||||
}>('tengu_sandbox_disabled_commands', { commands: [], substrings: [] })
|
||||
// Check dynamic config for disabled commands and substrings
|
||||
const raw = getFeatureValue_CACHED_MAY_BE_STALE<{
|
||||
commands: string[]
|
||||
substrings: string[]
|
||||
}>('tengu_sandbox_disabled_commands', { commands: [], substrings: [] })
|
||||
|
||||
// Check if command contains any disabled substrings
|
||||
for (const substring of disabledCommands.substrings) {
|
||||
if (command.includes(substring)) {
|
||||
const disabledCommands =
|
||||
typeof raw === 'object' && raw !== null
|
||||
? raw
|
||||
: { commands: [], substrings: [] }
|
||||
const substrings = Array.isArray(disabledCommands.substrings)
|
||||
? disabledCommands.substrings
|
||||
: []
|
||||
const commands = Array.isArray(disabledCommands.commands)
|
||||
? disabledCommands.commands
|
||||
: []
|
||||
|
||||
// Check if command contains any disabled substrings
|
||||
for (const substring of substrings) {
|
||||
if (command.includes(substring)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if command starts with any disabled commands
|
||||
try {
|
||||
const commandParts = splitCommand_DEPRECATED(command)
|
||||
for (const part of commandParts) {
|
||||
const baseCommand = part.trim().split(' ')[0]
|
||||
if (baseCommand && commands.includes(baseCommand)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if command starts with any disabled commands
|
||||
try {
|
||||
const commandParts = splitCommand_DEPRECATED(command)
|
||||
for (const part of commandParts) {
|
||||
const baseCommand = part.trim().split(' ')[0]
|
||||
if (baseCommand && disabledCommands.commands.includes(baseCommand)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// If we can't parse the command (e.g., malformed bash syntax),
|
||||
// treat it as not excluded to allow other validation checks to handle it
|
||||
// This prevents crashes when rendering tool use messages
|
||||
}
|
||||
} catch {
|
||||
// If we can't parse the command (e.g., malformed bash syntax),
|
||||
// treat it as not excluded to allow other validation checks to handle it
|
||||
// This prevents crashes when rendering tool use messages
|
||||
}
|
||||
|
||||
// Check user-configured excluded commands from settings
|
||||
|
||||
@@ -98,73 +98,6 @@ User: "What files handle routing?"
|
||||
`
|
||||
}
|
||||
|
||||
function getEnterPlanModeToolPromptAnt(): string {
|
||||
// When interview phase is enabled, omit the "What Happens" section —
|
||||
// detailed workflow instructions arrive via the plan_mode attachment (messages.ts).
|
||||
const whatHappens = isPlanModeInterviewPhaseEnabled()
|
||||
? ''
|
||||
: WHAT_HAPPENS_SECTION
|
||||
|
||||
return `Use this tool when a task has genuine ambiguity about the right approach and getting user input before coding would prevent significant rework. This tool transitions you into plan mode where you can explore the codebase and design an implementation approach for user approval.
|
||||
|
||||
## When to Use This Tool
|
||||
|
||||
Plan mode is valuable when the implementation approach is genuinely unclear. Use it when:
|
||||
|
||||
1. **Significant Architectural Ambiguity**: Multiple reasonable approaches exist and the choice meaningfully affects the codebase
|
||||
- Example: "Add caching to the API" - Redis vs in-memory vs file-based
|
||||
- Example: "Add real-time updates" - WebSockets vs SSE vs polling
|
||||
|
||||
2. **Unclear Requirements**: You need to explore and clarify before you can make progress
|
||||
- Example: "Make the app faster" - need to profile and identify bottlenecks
|
||||
- Example: "Refactor this module" - need to understand what the target architecture should be
|
||||
|
||||
3. **High-Impact Restructuring**: The task will significantly restructure existing code and getting buy-in first reduces risk
|
||||
- Example: "Redesign the authentication system"
|
||||
- Example: "Migrate from one state management approach to another"
|
||||
|
||||
## When NOT to Use This Tool
|
||||
|
||||
Skip plan mode when you can reasonably infer the right approach:
|
||||
- The task is straightforward even if it touches multiple files
|
||||
- The user's request is specific enough that the implementation path is clear
|
||||
- You're adding a feature with an obvious implementation pattern (e.g., adding a button, a new endpoint following existing conventions)
|
||||
- Bug fixes where the fix is clear once you understand the bug
|
||||
- Research/exploration tasks (use the Agent tool instead)
|
||||
- The user says something like "can we work on X" or "let's do X" — just get started
|
||||
|
||||
When in doubt, prefer starting work and using ${ASK_USER_QUESTION_TOOL_NAME} for specific questions over entering a full planning phase.
|
||||
|
||||
${whatHappens}## Examples
|
||||
|
||||
### GOOD - Use EnterPlanMode:
|
||||
User: "Add user authentication to the app"
|
||||
- Genuinely ambiguous: session vs JWT, where to store tokens, middleware structure
|
||||
|
||||
User: "Redesign the data pipeline"
|
||||
- Major restructuring where the wrong approach wastes significant effort
|
||||
|
||||
### BAD - Don't use EnterPlanMode:
|
||||
User: "Add a delete button to the user profile"
|
||||
- Implementation path is clear; just do it
|
||||
|
||||
User: "Can we work on the search feature?"
|
||||
- User wants to get started, not plan
|
||||
|
||||
User: "Update the error handling in the API"
|
||||
- Start working; ask specific questions if needed
|
||||
|
||||
User: "Fix the typo in the README"
|
||||
- Straightforward, no planning needed
|
||||
|
||||
## Important Notes
|
||||
|
||||
- This tool REQUIRES user approval - they must consent to entering plan mode
|
||||
`
|
||||
}
|
||||
|
||||
export function getEnterPlanModeToolPrompt(): string {
|
||||
return process.env.USER_TYPE === 'ant'
|
||||
? getEnterPlanModeToolPromptAnt()
|
||||
: getEnterPlanModeToolPromptExternal()
|
||||
return getEnterPlanModeToolPromptExternal()
|
||||
}
|
||||
|
||||
195
src/tools/MonitorTool/MonitorTool.ts
Normal file
195
src/tools/MonitorTool/MonitorTool.ts
Normal file
@@ -0,0 +1,195 @@
|
||||
import type { ToolResultBlockParam } from '@anthropic-ai/sdk/resources/index.mjs'
|
||||
import React from 'react'
|
||||
import { z } from 'zod/v4'
|
||||
import { buildTool, type ToolDef } from '../../Tool.js'
|
||||
import { spawnShellTask } from '../../tasks/LocalShellTask/LocalShellTask.js'
|
||||
import { lazySchema } from '../../utils/lazySchema.js'
|
||||
import { exec } from '../../utils/Shell.js'
|
||||
import { getTaskOutputPath } from '../../utils/task/diskOutput.js'
|
||||
import {
|
||||
bashToolHasPermission,
|
||||
matchWildcardPattern,
|
||||
permissionRuleExtractPrefix,
|
||||
} from '../BashTool/bashPermissions.js'
|
||||
import { parseForSecurity } from '../../utils/bash/ast.js'
|
||||
|
||||
export const MONITOR_TOOL_NAME = 'Monitor'
|
||||
|
||||
const MONITOR_TIMEOUT_MS = 30 * 60 * 1000 // 30 minutes
|
||||
|
||||
const inputSchema = lazySchema(() =>
|
||||
z.strictObject({
|
||||
command: z
|
||||
.string()
|
||||
.describe('The shell command to run and monitor'),
|
||||
description: z
|
||||
.string()
|
||||
.describe(
|
||||
'Clear, concise description of what this command does in active voice.',
|
||||
),
|
||||
}),
|
||||
)
|
||||
type InputSchema = ReturnType<typeof inputSchema>
|
||||
|
||||
const outputSchema = lazySchema(() =>
|
||||
z.object({
|
||||
taskId: z
|
||||
.string()
|
||||
.describe('The ID of the background monitor task'),
|
||||
outputFile: z
|
||||
.string()
|
||||
.describe('Path to the file where output is being written'),
|
||||
}),
|
||||
)
|
||||
type OutputSchema = ReturnType<typeof outputSchema>
|
||||
|
||||
type Output = z.infer<OutputSchema>
|
||||
|
||||
export const MonitorTool = buildTool({
|
||||
name: MONITOR_TOOL_NAME,
|
||||
searchHint: 'stream shell output as notifications',
|
||||
maxResultSizeChars: 10_000,
|
||||
strict: true,
|
||||
|
||||
isConcurrencySafe() {
|
||||
return true
|
||||
},
|
||||
|
||||
toAutoClassifierInput(input) {
|
||||
return input.command
|
||||
},
|
||||
|
||||
async preparePermissionMatcher({ command }) {
|
||||
const parsed = await parseForSecurity(command)
|
||||
if (parsed.kind !== 'simple') {
|
||||
return () => true
|
||||
}
|
||||
const subcommands = parsed.commands.map(c => c.argv.join(' '))
|
||||
return (pattern: string) => {
|
||||
const prefix = permissionRuleExtractPrefix(pattern)
|
||||
return subcommands.some(cmd => {
|
||||
if (prefix !== null) {
|
||||
return cmd === prefix || cmd.startsWith(`${prefix} `)
|
||||
}
|
||||
return matchWildcardPattern(pattern, cmd)
|
||||
})
|
||||
}
|
||||
},
|
||||
|
||||
async checkPermissions(input, context) {
|
||||
// Delegate to the bash permission system — Monitor runs shell commands
|
||||
// just like Bash does, so the same permission rules apply.
|
||||
return bashToolHasPermission({ command: input.command }, context)
|
||||
},
|
||||
|
||||
async description(input) {
|
||||
return input.description || 'Monitor shell command'
|
||||
},
|
||||
|
||||
async prompt() {
|
||||
return `Execute a shell command in the background and stream its stdout line-by-line as notifications. Each polling interval (~1s), new output lines are delivered to you. Use this for monitoring logs, watching build output, or observing long-running processes. For one-shot "wait until done" commands, prefer Bash with run_in_background instead.`
|
||||
},
|
||||
|
||||
get inputSchema(): InputSchema {
|
||||
return inputSchema()
|
||||
},
|
||||
|
||||
get outputSchema(): OutputSchema {
|
||||
return outputSchema()
|
||||
},
|
||||
|
||||
userFacingName() {
|
||||
return 'Monitor'
|
||||
},
|
||||
|
||||
getToolUseSummary(input) {
|
||||
if (!input?.description) {
|
||||
return input?.command ?? null
|
||||
}
|
||||
return input.description
|
||||
},
|
||||
|
||||
getActivityDescription(input) {
|
||||
if (!input?.description) {
|
||||
return 'Starting monitor'
|
||||
}
|
||||
return `Monitoring ${input.description}`
|
||||
},
|
||||
|
||||
renderToolUseMessage(
|
||||
input: Partial<z.infer<InputSchema>>,
|
||||
): React.ReactNode {
|
||||
const cmd = input.command ?? ''
|
||||
const desc = input.description ?? ''
|
||||
if (desc && cmd) {
|
||||
return `${desc}: ${cmd}`
|
||||
}
|
||||
return cmd || desc || ''
|
||||
},
|
||||
|
||||
renderToolResultMessage(
|
||||
output: Output,
|
||||
): React.ReactNode {
|
||||
return `Monitor started (task ${output.taskId})`
|
||||
},
|
||||
|
||||
mapToolResultToToolResultBlockParam(
|
||||
output: Output,
|
||||
toolUseID: string,
|
||||
): ToolResultBlockParam {
|
||||
const outputPath = output.outputFile
|
||||
return {
|
||||
tool_use_id: toolUseID,
|
||||
type: 'tool_result',
|
||||
content: `Monitor task started with ID: ${output.taskId}. Output is being streamed to: ${outputPath}. You will receive notifications as new output lines appear (~1s polling). Use TaskStop to end monitoring when done.`,
|
||||
}
|
||||
},
|
||||
|
||||
async call(input, toolUseContext) {
|
||||
const { command, description } = input
|
||||
const { abortController, setAppState } = toolUseContext
|
||||
|
||||
// Create the shell command — uses the same Shell.exec() as BashTool.
|
||||
// This is intentionally a shell execution (not execFile) because
|
||||
// MonitorTool needs full shell features (pipes, redirects, etc.)
|
||||
// just like BashTool does.
|
||||
const shellCommand = await exec(
|
||||
command,
|
||||
abortController.signal,
|
||||
'bash',
|
||||
{ timeout: MONITOR_TIMEOUT_MS },
|
||||
)
|
||||
|
||||
// Spawn as a background task with kind='monitor' — identical to
|
||||
// BashTool's run_in_background path but always monitor-flavored.
|
||||
const handle = await spawnShellTask(
|
||||
{
|
||||
command,
|
||||
description: description || command,
|
||||
shellCommand,
|
||||
toolUseId: toolUseContext.toolUseId,
|
||||
agentId: toolUseContext.agentId,
|
||||
kind: 'monitor',
|
||||
},
|
||||
{
|
||||
abortController,
|
||||
getAppState: () => {
|
||||
throw new Error(
|
||||
'getAppState not available in MonitorTool spawn context',
|
||||
)
|
||||
},
|
||||
setAppState: toolUseContext.setAppStateForTasks ?? setAppState,
|
||||
},
|
||||
)
|
||||
|
||||
const taskId = handle.taskId
|
||||
const outputFile = getTaskOutputPath(taskId)
|
||||
|
||||
return {
|
||||
data: {
|
||||
taskId,
|
||||
outputFile,
|
||||
},
|
||||
}
|
||||
},
|
||||
} satisfies ToolDef<InputSchema, Output>)
|
||||
@@ -9,39 +9,35 @@ export const DEFAULT_MAX_AGE_DAYS =
|
||||
DEFAULT_CRON_JITTER_CONFIG.recurringMaxAgeMs / (24 * 60 * 60 * 1000)
|
||||
|
||||
/**
|
||||
* Unified gate for the cron scheduling system. Combines the build-time
|
||||
* `feature('AGENT_TRIGGERS')` flag (dead code elimination) with the runtime
|
||||
* `tengu_kairos_cron` GrowthBook gate on a 5-minute refresh window.
|
||||
* Unified gate for the cron scheduling system.
|
||||
*
|
||||
* AGENT_TRIGGERS is independently shippable from KAIROS — the cron module
|
||||
* graph (cronScheduler/cronTasks/cronTasksLock/cron.ts + the three tools +
|
||||
* /loop skill) has zero imports into src/assistant/ and no feature('KAIROS')
|
||||
* calls. The REPL.tsx kairosEnabled read is safe:
|
||||
* kairosEnabled is unconditionally in AppStateStore with default false, so
|
||||
* when KAIROS is off the scheduler just gets assistantMode: false.
|
||||
* Open builds (USER_TYPE !== 'ant') enable cron unconditionally — the
|
||||
* cron tools and /loop skill are registered without the AGENT_TRIGGERS
|
||||
* build flag, so this gate is the sole runtime switch. Set the env var
|
||||
* `CLAUDE_CODE_DISABLE_CRON=1` to turn it off locally.
|
||||
*
|
||||
* Anthropic-internal (ant) builds additionally consult the
|
||||
* `tengu_kairos_cron` GrowthBook gate on a 5-minute refresh window,
|
||||
* serving as a fleet-wide kill switch.
|
||||
*
|
||||
* Called from Tool.isEnabled() (lazy, post-init) and inside useEffect /
|
||||
* imperative setup, never at module scope — so the disk cache has had a
|
||||
* chance to populate.
|
||||
*
|
||||
* The default is `true` — /loop is GA (announced in changelog). GrowthBook
|
||||
* is disabled for Bedrock/Vertex/Foundry and when DISABLE_TELEMETRY /
|
||||
* CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC are set; a `false` default would
|
||||
* break /loop for those users (GH #31759). The GB gate now serves purely as
|
||||
* a fleet-wide kill switch — flipping it to `false` stops already-running
|
||||
* schedulers on their next isKilled poll tick, not just new ones.
|
||||
*
|
||||
* `CLAUDE_CODE_DISABLE_CRON` is a local override that wins over GB.
|
||||
*/
|
||||
export function isKairosCronEnabled(): boolean {
|
||||
return feature('AGENT_TRIGGERS')
|
||||
? !isEnvTruthy(process.env.CLAUDE_CODE_DISABLE_CRON) &&
|
||||
getFeatureValue_CACHED_WITH_REFRESH(
|
||||
'tengu_kairos_cron',
|
||||
true,
|
||||
KAIROS_CRON_REFRESH_MS,
|
||||
)
|
||||
: false
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_DISABLE_CRON)) return false
|
||||
|
||||
// OpenClaude open builds do not rely on Anthropic's internal runtime gates.
|
||||
// Expose cron support by default unless explicitly disabled.
|
||||
if (process.env.USER_TYPE !== 'ant') return true
|
||||
|
||||
return getFeatureValue_CACHED_WITH_REFRESH(
|
||||
'tengu_kairos_cron',
|
||||
true,
|
||||
KAIROS_CRON_REFRESH_MS,
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -819,7 +819,25 @@ export const SendMessageTool: Tool<InputSchema, SendMessageToolOutput> =
|
||||
},
|
||||
}
|
||||
}
|
||||
// task exists but stopped — auto-resume
|
||||
// task exists but stopped — auto-resume.
|
||||
// Guard against race: two concurrent SendMessage calls to the same
|
||||
// stopped agent could both trigger resumeAgentBackground(), causing
|
||||
// duplicate task registration. Check status again after acquiring
|
||||
// the task reference (the first resume changes status to 'running').
|
||||
const freshTask = context.getAppState().tasks[agentId]
|
||||
if (isLocalAgentTask(freshTask) && freshTask.status === 'running') {
|
||||
queuePendingMessage(
|
||||
agentId,
|
||||
input.message,
|
||||
context.setAppStateForTasks ?? context.setAppState,
|
||||
)
|
||||
return {
|
||||
data: {
|
||||
success: true,
|
||||
message: `Message queued for delivery to ${input.to} at its next tool round (was concurrently resumed).`,
|
||||
},
|
||||
}
|
||||
}
|
||||
try {
|
||||
const result = await resumeAgentBackground({
|
||||
agentId,
|
||||
|
||||
@@ -125,7 +125,7 @@ function makeToolSchema(input: Input): BetaWebSearchTool20250305 {
|
||||
name: 'web_search',
|
||||
allowed_domains: input.allowed_domains,
|
||||
blocked_domains: input.blocked_domains,
|
||||
max_uses: 8, // Hardcoded to 8 searches maximum
|
||||
max_uses: 15, // Allow up to 15 searches per query for better coverage
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ export const bingProvider: SearchProvider = {
|
||||
|
||||
const url = new URL('https://api.bing.microsoft.com/v7.0/search')
|
||||
url.searchParams.set('q', input.query)
|
||||
url.searchParams.set('count', '10')
|
||||
url.searchParams.set('count', '15')
|
||||
|
||||
const res = await fetch(url.toString(), {
|
||||
headers: { 'Ocp-Apim-Subscription-Key': process.env.BING_API_KEY! },
|
||||
|
||||
@@ -19,7 +19,7 @@ export const exaProvider: SearchProvider = {
|
||||
|
||||
const body: Record<string, any> = {
|
||||
query: input.query,
|
||||
numResults: 10,
|
||||
numResults: 15,
|
||||
type: 'auto',
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ export const firecrawlProvider: SearchProvider = {
|
||||
query = `${query} ${exclusions}`
|
||||
}
|
||||
|
||||
const data = await app.search(query, { limit: 10 })
|
||||
const data = await app.search(query, { limit: 15 })
|
||||
|
||||
const hits = applyDomainFilters(
|
||||
(data.web ?? []).map((r: { url: string; title?: string; description?: string }) => ({
|
||||
|
||||
@@ -19,6 +19,7 @@ export const jinaProvider: SearchProvider = {
|
||||
|
||||
const url = new URL('https://s.jina.ai/')
|
||||
url.searchParams.set('q', input.query)
|
||||
url.searchParams.set('count', '10')
|
||||
|
||||
const res = await fetch(url.toString(), {
|
||||
headers: {
|
||||
|
||||
@@ -26,6 +26,7 @@ export const linkupProvider: SearchProvider = {
|
||||
body: JSON.stringify({
|
||||
q: input.query,
|
||||
search_type: 'standard',
|
||||
depth: 'standard',
|
||||
}),
|
||||
signal,
|
||||
})
|
||||
|
||||
@@ -20,6 +20,7 @@ export const mojeekProvider: SearchProvider = {
|
||||
const url = new URL('https://www.mojeek.com/search')
|
||||
url.searchParams.set('q', input.query)
|
||||
url.searchParams.set('fmt', 'json')
|
||||
url.searchParams.set('t', '10')
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'Accept': 'application/json',
|
||||
|
||||
@@ -25,7 +25,7 @@ export const tavilyProvider: SearchProvider = {
|
||||
},
|
||||
body: JSON.stringify({
|
||||
query: input.query,
|
||||
max_results: 10,
|
||||
max_results: 15,
|
||||
include_answer: false,
|
||||
}),
|
||||
signal,
|
||||
|
||||
@@ -19,6 +19,7 @@ export const youProvider: SearchProvider = {
|
||||
|
||||
const url = new URL('https://api.ydc-index.io/v1/search')
|
||||
url.searchParams.set('query', input.query)
|
||||
url.searchParams.set('num_web_results', '10')
|
||||
|
||||
const res = await fetch(url.toString(), {
|
||||
headers: { 'X-API-Key': process.env.YOU_API_KEY! },
|
||||
|
||||
607
src/utils/codexCredentials.test.ts
Normal file
607
src/utils/codexCredentials.test.ts
Normal file
@@ -0,0 +1,607 @@
|
||||
/**
|
||||
* These tests avoid static imports so Bun can mock secureStorage before
|
||||
* codexCredentials is first loaded.
|
||||
*/
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
|
||||
function makeJwt(payload: Record<string, unknown>): string {
|
||||
const header = Buffer.from(JSON.stringify({ alg: 'none', typ: 'JWT' }))
|
||||
.toString('base64url')
|
||||
const body = Buffer.from(JSON.stringify(payload)).toString('base64url')
|
||||
return `${header}.${body}.signature`
|
||||
}
|
||||
|
||||
describe('codexCredentials', () => {
|
||||
const originalSimple = process.env.CLAUDE_CODE_SIMPLE
|
||||
const originalCodeKey = process.env.CODEX_API_KEY
|
||||
const originalFetch = globalThis.fetch
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
globalThis.fetch = originalFetch
|
||||
|
||||
if (originalSimple === undefined) {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
} else {
|
||||
process.env.CLAUDE_CODE_SIMPLE = originalSimple
|
||||
}
|
||||
|
||||
if (originalCodeKey === undefined) {
|
||||
delete process.env.CODEX_API_KEY
|
||||
} else {
|
||||
process.env.CODEX_API_KEY = originalCodeKey
|
||||
}
|
||||
})
|
||||
|
||||
test('save returns failure in bare mode', async () => {
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { saveCodexCredentials } = await import(
|
||||
'./codexCredentials.js?save-bare-mode'
|
||||
)
|
||||
|
||||
const result = saveCodexCredentials({
|
||||
accessToken: 'token',
|
||||
accountId: 'acct_123',
|
||||
})
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.warning).toContain('Bare mode')
|
||||
})
|
||||
|
||||
test('saveCodexCredentials refuses plaintext fallback when native secure storage is unavailable', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: (options?: { allowPlainTextFallback?: boolean }) => {
|
||||
expect(options?.allowPlainTextFallback).toBe(false)
|
||||
return {
|
||||
read: () => null,
|
||||
readAsync: async () => null,
|
||||
update: () => ({
|
||||
success: false,
|
||||
warning:
|
||||
'Secure storage is unavailable on this platform without plaintext fallback.',
|
||||
}),
|
||||
delete: () => true,
|
||||
}
|
||||
},
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { saveCodexCredentials } = await import(
|
||||
'./codexCredentials.js?save-no-plaintext-fallback'
|
||||
)
|
||||
|
||||
const result = saveCodexCredentials({
|
||||
accessToken: 'token',
|
||||
accountId: 'acct_123',
|
||||
})
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.warning).toContain('without plaintext fallback')
|
||||
})
|
||||
|
||||
test('refreshCodexAccessTokenIfNeeded refreshes expired stored credentials', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CODEX_API_KEY
|
||||
|
||||
const expiredToken = makeJwt({
|
||||
exp: Math.floor((Date.now() - 60_000) / 1000),
|
||||
chatgpt_account_id: 'acct_old',
|
||||
})
|
||||
const freshAccessToken = makeJwt({
|
||||
exp: Math.floor((Date.now() + 3_600_000) / 1000),
|
||||
chatgpt_account_id: 'acct_new',
|
||||
})
|
||||
const freshIdToken = makeJwt({
|
||||
exp: Math.floor((Date.now() + 3_600_000) / 1000),
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_new',
|
||||
},
|
||||
})
|
||||
|
||||
let storageState: Record<string, unknown> = {
|
||||
codex: {
|
||||
accessToken: expiredToken,
|
||||
refreshToken: 'refresh-old',
|
||||
accountId: 'acct_old',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => storageState,
|
||||
readAsync: async () => storageState,
|
||||
update: (next: Record<string, unknown>) => {
|
||||
storageState = next
|
||||
return { success: true }
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
globalThis.fetch = mock(
|
||||
async (_input, init) => {
|
||||
const bodyText =
|
||||
typeof init?.body === 'string'
|
||||
? init.body
|
||||
: init?.body instanceof URLSearchParams
|
||||
? init.body.toString()
|
||||
: ''
|
||||
|
||||
if (
|
||||
bodyText.includes('grant_type=refresh_token') ||
|
||||
bodyText.includes('"grant_type":"refresh_token"')
|
||||
) {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
access_token: freshAccessToken,
|
||||
refresh_token: 'refresh-new',
|
||||
id_token: freshIdToken,
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
access_token: 'codex-api-key-token',
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
},
|
||||
) as unknown as typeof fetch
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { refreshCodexAccessTokenIfNeeded, readCodexCredentials } =
|
||||
await import('./codexCredentials.js?refresh-success')
|
||||
|
||||
const result = await refreshCodexAccessTokenIfNeeded()
|
||||
expect(result.refreshed).toBe(true)
|
||||
|
||||
const stored = readCodexCredentials()
|
||||
expect(stored?.accessToken).toBe(freshAccessToken)
|
||||
expect(stored?.apiKey).toBe('codex-api-key-token')
|
||||
expect(stored?.refreshToken).toBe('refresh-new')
|
||||
expect(stored?.accountId).toBe('acct_new')
|
||||
})
|
||||
|
||||
test('refreshCodexAccessTokenIfNeeded backs off after a failed refresh attempt', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CODEX_API_KEY
|
||||
|
||||
const expiredToken = makeJwt({
|
||||
exp: Math.floor((Date.now() - 60_000) / 1000),
|
||||
chatgpt_account_id: 'acct_old',
|
||||
})
|
||||
|
||||
let storageState: Record<string, unknown> = {
|
||||
codex: {
|
||||
accessToken: expiredToken,
|
||||
refreshToken: 'refresh-old',
|
||||
accountId: 'acct_old',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => storageState,
|
||||
readAsync: async () => storageState,
|
||||
update: (next: Record<string, unknown>) => {
|
||||
storageState = next
|
||||
return { success: true }
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
let refreshAttempts = 0
|
||||
globalThis.fetch = mock(async () => {
|
||||
refreshAttempts += 1
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
error: {
|
||||
code: 'invalid_grant',
|
||||
message: 'refresh token expired',
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 400,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as unknown as typeof fetch
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { refreshCodexAccessTokenIfNeeded, readCodexCredentials } =
|
||||
await import('./codexCredentials.js?refresh-cooldown')
|
||||
|
||||
await expect(refreshCodexAccessTokenIfNeeded()).rejects.toThrow(
|
||||
'Codex token refresh failed (invalid_grant): refresh token expired',
|
||||
)
|
||||
|
||||
const afterFailure = readCodexCredentials()
|
||||
expect(typeof afterFailure?.lastRefreshFailureAt).toBe('number')
|
||||
|
||||
const secondAttempt = await refreshCodexAccessTokenIfNeeded()
|
||||
expect(secondAttempt.refreshed).toBe(false)
|
||||
expect(secondAttempt.credentials?.accessToken).toBe(expiredToken)
|
||||
expect(refreshAttempts).toBe(1)
|
||||
})
|
||||
|
||||
test('refreshCodexAccessTokenIfNeeded drops a stale api key when id-token exchange fails', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CODEX_API_KEY
|
||||
|
||||
const expiredToken = makeJwt({
|
||||
exp: Math.floor((Date.now() - 60_000) / 1000),
|
||||
chatgpt_account_id: 'acct_old',
|
||||
})
|
||||
const freshAccessToken = makeJwt({
|
||||
exp: Math.floor((Date.now() + 3_600_000) / 1000),
|
||||
chatgpt_account_id: 'acct_new',
|
||||
})
|
||||
const freshIdToken = makeJwt({
|
||||
exp: Math.floor((Date.now() + 3_600_000) / 1000),
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_new',
|
||||
},
|
||||
})
|
||||
|
||||
let storageState: Record<string, unknown> = {
|
||||
codex: {
|
||||
apiKey: 'stale-api-key',
|
||||
accessToken: expiredToken,
|
||||
refreshToken: 'refresh-old',
|
||||
accountId: 'acct_old',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => storageState,
|
||||
readAsync: async () => storageState,
|
||||
update: (next: Record<string, unknown>) => {
|
||||
storageState = next
|
||||
return { success: true }
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
globalThis.fetch = mock(
|
||||
async (_input, init) => {
|
||||
const bodyText =
|
||||
typeof init?.body === 'string'
|
||||
? init.body
|
||||
: init?.body instanceof URLSearchParams
|
||||
? init.body.toString()
|
||||
: ''
|
||||
|
||||
if (bodyText.includes('grant_type=refresh_token')) {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
access_token: freshAccessToken,
|
||||
refresh_token: 'refresh-new',
|
||||
id_token: freshIdToken,
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return new Response('exchange failed', {
|
||||
status: 500,
|
||||
})
|
||||
},
|
||||
) as unknown as typeof fetch
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { refreshCodexAccessTokenIfNeeded, readCodexCredentials } =
|
||||
await import('./codexCredentials.js?refresh-drop-stale-api-key')
|
||||
|
||||
const result = await refreshCodexAccessTokenIfNeeded()
|
||||
expect(result.refreshed).toBe(true)
|
||||
|
||||
const stored = readCodexCredentials()
|
||||
expect(stored?.accessToken).toBe(freshAccessToken)
|
||||
expect(stored?.apiKey).toBeUndefined()
|
||||
expect(stored?.refreshToken).toBe('refresh-new')
|
||||
expect(stored?.accountId).toBe('acct_new')
|
||||
})
|
||||
|
||||
test('refreshCodexAccessTokenIfNeeded deduplicates concurrent refresh attempts', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CODEX_API_KEY
|
||||
|
||||
const expiredToken = makeJwt({
|
||||
exp: Math.floor((Date.now() - 60_000) / 1000),
|
||||
chatgpt_account_id: 'acct_old',
|
||||
})
|
||||
const freshAccessToken = makeJwt({
|
||||
exp: Math.floor((Date.now() + 3_600_000) / 1000),
|
||||
chatgpt_account_id: 'acct_new',
|
||||
})
|
||||
const freshIdToken = makeJwt({
|
||||
exp: Math.floor((Date.now() + 3_600_000) / 1000),
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_new',
|
||||
},
|
||||
})
|
||||
|
||||
let storageState: Record<string, unknown> = {
|
||||
codex: {
|
||||
accessToken: expiredToken,
|
||||
refreshToken: 'refresh-old',
|
||||
accountId: 'acct_old',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => storageState,
|
||||
readAsync: async () => storageState,
|
||||
update: (next: Record<string, unknown>) => {
|
||||
storageState = next
|
||||
return { success: true }
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
let refreshAttempts = 0
|
||||
let releaseRefresh: (() => void) | undefined
|
||||
const refreshGate = new Promise<void>(resolve => {
|
||||
releaseRefresh = resolve
|
||||
})
|
||||
|
||||
globalThis.fetch = mock(async (_input, init) => {
|
||||
const bodyText =
|
||||
typeof init?.body === 'string'
|
||||
? init.body
|
||||
: init?.body instanceof URLSearchParams
|
||||
? init.body.toString()
|
||||
: ''
|
||||
|
||||
if (bodyText.includes('grant_type=refresh_token')) {
|
||||
refreshAttempts += 1
|
||||
await refreshGate
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
access_token: freshAccessToken,
|
||||
refresh_token: 'refresh-new',
|
||||
id_token: freshIdToken,
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
access_token: 'codex-api-key-token',
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as unknown as typeof fetch
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { refreshCodexAccessTokenIfNeeded } = await import(
|
||||
'./codexCredentials.js?refresh-dedupe'
|
||||
)
|
||||
|
||||
const firstRefresh = refreshCodexAccessTokenIfNeeded()
|
||||
const secondRefresh = refreshCodexAccessTokenIfNeeded()
|
||||
releaseRefresh?.()
|
||||
|
||||
const [firstResult, secondResult] = await Promise.all([
|
||||
firstRefresh,
|
||||
secondRefresh,
|
||||
])
|
||||
|
||||
expect(refreshAttempts).toBe(1)
|
||||
expect(firstResult).toEqual(secondResult)
|
||||
expect(firstResult.refreshed).toBe(true)
|
||||
expect(firstResult.credentials?.accessToken).toBe(freshAccessToken)
|
||||
})
|
||||
|
||||
test('saveCodexCredentials preserves an existing linked profile id', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
|
||||
let storageState: Record<string, unknown> = {
|
||||
codex: {
|
||||
accessToken: 'access-old',
|
||||
refreshToken: 'refresh-old',
|
||||
accountId: 'acct_old',
|
||||
profileId: 'profile_codex_oauth',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => storageState,
|
||||
readAsync: async () => storageState,
|
||||
update: (next: Record<string, unknown>) => {
|
||||
storageState = next
|
||||
return { success: true }
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { readCodexCredentials, saveCodexCredentials } = await import(
|
||||
'./codexCredentials.js?preserve-profile-id'
|
||||
)
|
||||
|
||||
const saved = saveCodexCredentials({
|
||||
accessToken: 'access-new',
|
||||
refreshToken: 'refresh-new',
|
||||
accountId: 'acct_new',
|
||||
})
|
||||
|
||||
expect(saved.success).toBe(true)
|
||||
expect(readCodexCredentials()?.profileId).toBe('profile_codex_oauth')
|
||||
})
|
||||
|
||||
test('attachCodexProfileIdToStoredCredentials links the saved profile id', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
|
||||
let storageState: Record<string, unknown> = {
|
||||
codex: {
|
||||
accessToken: 'access-old',
|
||||
refreshToken: 'refresh-old',
|
||||
accountId: 'acct_old',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => storageState,
|
||||
readAsync: async () => storageState,
|
||||
update: (next: Record<string, unknown>) => {
|
||||
storageState = next
|
||||
return { success: true }
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const {
|
||||
attachCodexProfileIdToStoredCredentials,
|
||||
readCodexCredentials,
|
||||
} = await import('./codexCredentials.js?attach-profile-id')
|
||||
|
||||
const result =
|
||||
attachCodexProfileIdToStoredCredentials('profile_codex_oauth')
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
expect(readCodexCredentials()?.profileId).toBe('profile_codex_oauth')
|
||||
})
|
||||
|
||||
test('refreshCodexAccessTokenIfNeeded uses async secure-storage reads in its request path', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CODEX_API_KEY
|
||||
|
||||
const freshToken = makeJwt({
|
||||
exp: Math.floor((Date.now() + 3_600_000) / 1000),
|
||||
chatgpt_account_id: 'acct_async',
|
||||
})
|
||||
|
||||
let storageState: Record<string, unknown> = {
|
||||
codex: {
|
||||
accessToken: freshToken,
|
||||
refreshToken: 'refresh-async',
|
||||
accountId: 'acct_async',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => {
|
||||
throw new Error(
|
||||
'sync storage read should not run during refresh checks',
|
||||
)
|
||||
},
|
||||
readAsync: async () => storageState,
|
||||
update: (next: Record<string, unknown>) => {
|
||||
storageState = next
|
||||
return { success: true }
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { refreshCodexAccessTokenIfNeeded } = await import(
|
||||
'./codexCredentials.js?refresh-async-read'
|
||||
)
|
||||
|
||||
const result = await refreshCodexAccessTokenIfNeeded()
|
||||
expect(result.refreshed).toBe(false)
|
||||
expect(result.credentials?.accessToken).toBe(freshToken)
|
||||
})
|
||||
|
||||
test('refreshCodexAccessTokenIfNeeded keeps a cooldown in memory when secure storage cannot persist it', async () => {
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.CODEX_API_KEY
|
||||
|
||||
const expiredToken = makeJwt({
|
||||
exp: Math.floor((Date.now() - 60_000) / 1000),
|
||||
chatgpt_account_id: 'acct_old',
|
||||
})
|
||||
|
||||
const storageState: Record<string, unknown> = {
|
||||
codex: {
|
||||
accessToken: expiredToken,
|
||||
refreshToken: 'refresh-old',
|
||||
accountId: 'acct_old',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => storageState,
|
||||
readAsync: async () => storageState,
|
||||
update: () => ({
|
||||
success: false,
|
||||
warning: 'secure storage unavailable',
|
||||
}),
|
||||
}),
|
||||
}))
|
||||
|
||||
let refreshAttempts = 0
|
||||
globalThis.fetch = mock(async () => {
|
||||
refreshAttempts += 1
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
error: {
|
||||
code: 'invalid_grant',
|
||||
message: 'refresh token expired',
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 400,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
}) as unknown as typeof fetch
|
||||
|
||||
// @ts-expect-error cache-busting query string for Bun module mocks
|
||||
const { refreshCodexAccessTokenIfNeeded } = await import(
|
||||
'./codexCredentials.js?refresh-memory-cooldown'
|
||||
)
|
||||
|
||||
await expect(refreshCodexAccessTokenIfNeeded()).rejects.toThrow(
|
||||
'Codex token refresh failed (invalid_grant): refresh token expired',
|
||||
)
|
||||
|
||||
const secondAttempt = await refreshCodexAccessTokenIfNeeded()
|
||||
expect(secondAttempt.refreshed).toBe(false)
|
||||
expect(secondAttempt.credentials?.accessToken).toBe(expiredToken)
|
||||
expect(refreshAttempts).toBe(1)
|
||||
})
|
||||
})
|
||||
375
src/utils/codexCredentials.ts
Normal file
375
src/utils/codexCredentials.ts
Normal file
@@ -0,0 +1,375 @@
|
||||
import { isBareMode } from './envUtils.js'
|
||||
import { getSecureStorage } from './secureStorage/index.js'
|
||||
import {
|
||||
asTrimmedString,
|
||||
CODEX_REFRESH_URL,
|
||||
exchangeCodexIdTokenForApiKey,
|
||||
getCodexOAuthClientId,
|
||||
parseChatgptAccountId,
|
||||
decodeJwtPayload,
|
||||
} from '../services/api/codexOAuthShared.js'
|
||||
|
||||
export const CODEX_STORAGE_KEY = 'codex' as const
|
||||
const CODEX_TOKEN_REFRESH_SKEW_MS = 60_000
|
||||
const CODEX_TOKEN_REFRESH_RETRY_COOLDOWN_MS = 60_000
|
||||
|
||||
export type CodexCredentialBlob = {
|
||||
apiKey?: string
|
||||
accessToken: string
|
||||
refreshToken?: string
|
||||
idToken?: string
|
||||
accountId?: string
|
||||
profileId?: string
|
||||
lastRefreshAt?: number
|
||||
lastRefreshFailureAt?: number
|
||||
}
|
||||
|
||||
type CodexTokenRefreshResponse = {
|
||||
access_token?: string
|
||||
refresh_token?: string
|
||||
id_token?: string
|
||||
}
|
||||
|
||||
let inFlightCodexRefresh:
|
||||
| Promise<{
|
||||
refreshed: boolean
|
||||
credentials?: CodexCredentialBlob
|
||||
}>
|
||||
| null = null
|
||||
let inMemoryLastRefreshFailureAt: number | null = null
|
||||
|
||||
function getCodexSecureStorage() {
|
||||
return getSecureStorage({ allowPlainTextFallback: false })
|
||||
}
|
||||
|
||||
function parseJwtExpiryMs(token: string | undefined): number | undefined {
|
||||
if (!token) return undefined
|
||||
const payload = decodeJwtPayload(token)
|
||||
const exp = payload?.exp
|
||||
if (typeof exp === 'number' && Number.isFinite(exp)) {
|
||||
return exp * 1000
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
function normalizeCodexCredentialBlob(
|
||||
value: unknown,
|
||||
): CodexCredentialBlob | undefined {
|
||||
if (!value || typeof value !== 'object') return undefined
|
||||
|
||||
const record = value as Record<string, unknown>
|
||||
const apiKey = asTrimmedString(record.apiKey)
|
||||
const accessToken = asTrimmedString(record.accessToken)
|
||||
if (!accessToken) return undefined
|
||||
|
||||
const refreshToken = asTrimmedString(record.refreshToken)
|
||||
const idToken = asTrimmedString(record.idToken)
|
||||
const accountId =
|
||||
asTrimmedString(record.accountId) ??
|
||||
parseChatgptAccountId(idToken) ??
|
||||
parseChatgptAccountId(accessToken)
|
||||
const profileId = asTrimmedString(record.profileId)
|
||||
|
||||
const lastRefreshAt =
|
||||
typeof record.lastRefreshAt === 'number' &&
|
||||
Number.isFinite(record.lastRefreshAt)
|
||||
? record.lastRefreshAt
|
||||
: undefined
|
||||
const lastRefreshFailureAt =
|
||||
typeof record.lastRefreshFailureAt === 'number' &&
|
||||
Number.isFinite(record.lastRefreshFailureAt)
|
||||
? record.lastRefreshFailureAt
|
||||
: undefined
|
||||
|
||||
return {
|
||||
apiKey,
|
||||
accessToken,
|
||||
refreshToken,
|
||||
idToken,
|
||||
accountId,
|
||||
profileId,
|
||||
lastRefreshAt,
|
||||
lastRefreshFailureAt,
|
||||
}
|
||||
}
|
||||
|
||||
function shouldRefreshCodexToken(blob: CodexCredentialBlob): boolean {
|
||||
const expiresAt =
|
||||
parseJwtExpiryMs(blob.accessToken) ?? parseJwtExpiryMs(blob.idToken)
|
||||
if (expiresAt === undefined) {
|
||||
return false
|
||||
}
|
||||
return expiresAt <= Date.now() + CODEX_TOKEN_REFRESH_SKEW_MS
|
||||
}
|
||||
|
||||
function isWithinRefreshFailureCooldown(
|
||||
blob: CodexCredentialBlob,
|
||||
now = Date.now(),
|
||||
): boolean {
|
||||
const lastRefreshFailureAt = Math.max(
|
||||
blob.lastRefreshFailureAt ?? 0,
|
||||
inMemoryLastRefreshFailureAt ?? 0,
|
||||
)
|
||||
|
||||
if (!lastRefreshFailureAt) {
|
||||
return false
|
||||
}
|
||||
|
||||
return (
|
||||
now - lastRefreshFailureAt < CODEX_TOKEN_REFRESH_RETRY_COOLDOWN_MS
|
||||
)
|
||||
}
|
||||
|
||||
function getRefreshErrorMessage(
|
||||
status: number,
|
||||
bodyText: string,
|
||||
): string {
|
||||
if (!bodyText.trim()) {
|
||||
return `Codex token refresh failed with status ${status}.`
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(bodyText) as Record<string, unknown>
|
||||
const nestedError =
|
||||
parsed.error && typeof parsed.error === 'object'
|
||||
? (parsed.error as Record<string, unknown>)
|
||||
: undefined
|
||||
const code = asTrimmedString(nestedError?.code ?? parsed.code)
|
||||
const message =
|
||||
asTrimmedString(nestedError?.message ?? parsed.error_description) ??
|
||||
bodyText.trim()
|
||||
return code
|
||||
? `Codex token refresh failed (${code}): ${message}`
|
||||
: `Codex token refresh failed with status ${status}: ${message}`
|
||||
} catch {
|
||||
return `Codex token refresh failed with status ${status}: ${bodyText.trim()}`
|
||||
}
|
||||
}
|
||||
|
||||
export function readCodexCredentials(): CodexCredentialBlob | undefined {
|
||||
if (isBareMode()) return undefined
|
||||
|
||||
try {
|
||||
const data = getCodexSecureStorage().read()
|
||||
return normalizeCodexCredentialBlob(data?.codex)
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
export async function readCodexCredentialsAsync(): Promise<
|
||||
CodexCredentialBlob | undefined
|
||||
> {
|
||||
if (isBareMode()) return undefined
|
||||
|
||||
try {
|
||||
const data = await getCodexSecureStorage().readAsync()
|
||||
return normalizeCodexCredentialBlob(data?.codex)
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
export function isCodexRefreshFailureCoolingDown(
|
||||
blob: Pick<CodexCredentialBlob, 'lastRefreshFailureAt'>,
|
||||
now = Date.now(),
|
||||
): boolean {
|
||||
return isWithinRefreshFailureCooldown(
|
||||
blob as CodexCredentialBlob,
|
||||
now,
|
||||
)
|
||||
}
|
||||
|
||||
export function saveCodexCredentials(
|
||||
credentials: CodexCredentialBlob,
|
||||
): { success: boolean; warning?: string } {
|
||||
if (isBareMode()) {
|
||||
return { success: false, warning: 'Bare mode: secure storage is disabled.' }
|
||||
}
|
||||
|
||||
const normalized = normalizeCodexCredentialBlob(credentials)
|
||||
if (!normalized) {
|
||||
return { success: false, warning: 'Codex credentials are incomplete.' }
|
||||
}
|
||||
|
||||
const secureStorage = getCodexSecureStorage()
|
||||
const previous = secureStorage.read() || {}
|
||||
const previousCodex = normalizeCodexCredentialBlob(previous[CODEX_STORAGE_KEY])
|
||||
const next = {
|
||||
...(previous as Record<string, unknown>),
|
||||
[CODEX_STORAGE_KEY]: {
|
||||
...normalized,
|
||||
profileId: normalized.profileId ?? previousCodex?.profileId,
|
||||
lastRefreshAt: normalized.lastRefreshAt ?? Date.now(),
|
||||
},
|
||||
}
|
||||
const result = secureStorage.update(next as typeof previous)
|
||||
if (result.success) {
|
||||
const storedCodex = normalizeCodexCredentialBlob(next[CODEX_STORAGE_KEY])
|
||||
inMemoryLastRefreshFailureAt = storedCodex?.lastRefreshFailureAt ?? null
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
export function attachCodexProfileIdToStoredCredentials(profileId: string): {
|
||||
success: boolean
|
||||
warning?: string
|
||||
} {
|
||||
if (isBareMode()) {
|
||||
return { success: false, warning: 'Bare mode: secure storage is disabled.' }
|
||||
}
|
||||
|
||||
const current = readCodexCredentials()
|
||||
if (!current) {
|
||||
return {
|
||||
success: false,
|
||||
warning: 'Codex credentials are not stored securely yet.',
|
||||
}
|
||||
}
|
||||
|
||||
return saveCodexCredentials({
|
||||
...current,
|
||||
profileId,
|
||||
})
|
||||
}
|
||||
|
||||
function persistCodexRefreshFailure(
|
||||
credentials: CodexCredentialBlob,
|
||||
occurredAt: number,
|
||||
): void {
|
||||
const result = saveCodexCredentials({
|
||||
...credentials,
|
||||
lastRefreshFailureAt: occurredAt,
|
||||
})
|
||||
if (!result.success) {
|
||||
inMemoryLastRefreshFailureAt = occurredAt
|
||||
}
|
||||
}
|
||||
|
||||
export function clearCodexCredentials(): {
|
||||
success: boolean
|
||||
warning?: string
|
||||
} {
|
||||
if (isBareMode()) {
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
const secureStorage = getCodexSecureStorage()
|
||||
const previous = secureStorage.read() || {}
|
||||
const next = { ...(previous as Record<string, unknown>) }
|
||||
delete next[CODEX_STORAGE_KEY]
|
||||
const result = secureStorage.update(next as typeof previous)
|
||||
if (result.success) {
|
||||
inMemoryLastRefreshFailureAt = null
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
export async function refreshCodexAccessTokenIfNeeded(options?: {
|
||||
force?: boolean
|
||||
}): Promise<{
|
||||
refreshed: boolean
|
||||
credentials?: CodexCredentialBlob
|
||||
}> {
|
||||
if (isBareMode()) {
|
||||
return { refreshed: false }
|
||||
}
|
||||
|
||||
if (process.env.CODEX_API_KEY?.trim()) {
|
||||
return { refreshed: false }
|
||||
}
|
||||
|
||||
const current = await readCodexCredentialsAsync()
|
||||
if (!current) {
|
||||
return { refreshed: false }
|
||||
}
|
||||
|
||||
if (!current.refreshToken) {
|
||||
return { refreshed: false, credentials: current }
|
||||
}
|
||||
|
||||
if (!options?.force && !shouldRefreshCodexToken(current)) {
|
||||
return { refreshed: false, credentials: current }
|
||||
}
|
||||
|
||||
if (!options?.force && isWithinRefreshFailureCooldown(current)) {
|
||||
return { refreshed: false, credentials: current }
|
||||
}
|
||||
|
||||
if (inFlightCodexRefresh) {
|
||||
return inFlightCodexRefresh
|
||||
}
|
||||
|
||||
inFlightCodexRefresh = (async () => {
|
||||
const refreshAttemptedAt = Date.now()
|
||||
|
||||
try {
|
||||
const body = new URLSearchParams({
|
||||
client_id: getCodexOAuthClientId(),
|
||||
grant_type: 'refresh_token',
|
||||
refresh_token: current.refreshToken,
|
||||
})
|
||||
|
||||
const response = await fetch(CODEX_REFRESH_URL, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
},
|
||||
body,
|
||||
signal: AbortSignal.timeout(15_000),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const bodyText = await response.text().catch(() => '')
|
||||
throw new Error(getRefreshErrorMessage(response.status, bodyText))
|
||||
}
|
||||
|
||||
const payload = (await response.json()) as CodexTokenRefreshResponse
|
||||
const accessToken = asTrimmedString(payload.access_token)
|
||||
if (!accessToken) {
|
||||
throw new Error(
|
||||
'Codex token refresh succeeded without a new access token.',
|
||||
)
|
||||
}
|
||||
|
||||
const next: CodexCredentialBlob = {
|
||||
accessToken,
|
||||
refreshToken:
|
||||
asTrimmedString(payload.refresh_token) ?? current.refreshToken,
|
||||
idToken: asTrimmedString(payload.id_token) ?? current.idToken,
|
||||
accountId:
|
||||
parseChatgptAccountId(payload.id_token) ??
|
||||
parseChatgptAccountId(payload.access_token) ??
|
||||
current.accountId,
|
||||
lastRefreshAt: Date.now(),
|
||||
}
|
||||
|
||||
const idTokenForExchange = next.idToken ?? current.idToken
|
||||
if (idTokenForExchange) {
|
||||
next.apiKey = await exchangeCodexIdTokenForApiKey(
|
||||
idTokenForExchange,
|
||||
).catch(() => undefined)
|
||||
}
|
||||
|
||||
const saveResult = saveCodexCredentials(next)
|
||||
if (!saveResult.success) {
|
||||
throw new Error(
|
||||
saveResult.warning ??
|
||||
'Codex token refresh succeeded but credentials could not be saved.',
|
||||
)
|
||||
}
|
||||
|
||||
return {
|
||||
refreshed: true,
|
||||
credentials: next,
|
||||
}
|
||||
} catch (error) {
|
||||
persistCodexRefreshFailure(current, refreshAttemptedAt)
|
||||
throw error
|
||||
} finally {
|
||||
inFlightCodexRefresh = null
|
||||
}
|
||||
})()
|
||||
|
||||
return inFlightCodexRefresh
|
||||
}
|
||||
@@ -107,9 +107,23 @@ test('MiniMax-M2.7 uses explicit provider-specific context and output caps', ()
|
||||
expect(getMaxOutputTokensForModel('MiniMax-M2.7')).toBe(131_072)
|
||||
})
|
||||
|
||||
test('unknown openai-compatible models still use the conservative fallback window', () => {
|
||||
test('unknown openai-compatible models use the 128k fallback window (not 8k, see #635)', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
|
||||
expect(getContextWindowForModel('some-unknown-3p-model')).toBe(8_000)
|
||||
expect(getContextWindowForModel('some-unknown-3p-model')).toBe(128_000)
|
||||
})
|
||||
|
||||
test('MiniMax-M2.5 and M2.1 use explicit provider-specific context and output caps', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
|
||||
expect(getContextWindowForModel('MiniMax-M2.5')).toBe(204_800)
|
||||
expect(getContextWindowForModel('MiniMax-M2.5-highspeed')).toBe(204_800)
|
||||
expect(getContextWindowForModel('MiniMax-M2.1')).toBe(204_800)
|
||||
expect(getContextWindowForModel('MiniMax-M2.1-highspeed')).toBe(204_800)
|
||||
expect(getModelMaxOutputTokens('MiniMax-M2.5')).toEqual({
|
||||
default: 131_072,
|
||||
upperLimit: 131_072,
|
||||
})
|
||||
})
|
||||
|
||||
@@ -9,6 +9,11 @@ import { getOpenAIContextWindow, getOpenAIMaxOutputTokens } from './model/openai
|
||||
// Model context window size (200k tokens for all models right now)
|
||||
export const MODEL_CONTEXT_WINDOW_DEFAULT = 200_000
|
||||
|
||||
// Fallback context window for unknown 3P models. Must be large enough that
|
||||
// the effective context (this minus output token reservation) stays positive,
|
||||
// otherwise auto-compact fires on every message (issue #635).
|
||||
export const OPENAI_FALLBACK_CONTEXT_WINDOW = 128_000
|
||||
|
||||
// Maximum output tokens for compact operations
|
||||
export const COMPACT_MAX_OUTPUT_TOKENS = 20_000
|
||||
|
||||
@@ -73,8 +78,9 @@ export function getContextWindowForModel(
|
||||
}
|
||||
|
||||
// OpenAI-compatible provider — use known context windows for the model.
|
||||
// Unknown models get a conservative 8k default so auto-compact triggers
|
||||
// before hitting a hard context_window_exceeded error.
|
||||
// Unknown models get a conservative 128k default. This was previously 8k,
|
||||
// but that caused auto-compact to fire on every turn because the effective
|
||||
// context (8k minus output reservation) became negative (issue #635).
|
||||
const isOpenAIProvider =
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
@@ -86,10 +92,10 @@ export function getContextWindowForModel(
|
||||
return openaiWindow
|
||||
}
|
||||
console.error(
|
||||
`[context] Warning: model "${model}" not in context window table — using conservative 8k default. ` +
|
||||
`[context] Warning: model "${model}" not in context window table — using conservative 128k default. ` +
|
||||
'Add it to src/utils/model/openaiContextWindows.ts for accurate compaction.',
|
||||
)
|
||||
return 8_000
|
||||
return OPENAI_FALLBACK_CONTEXT_WINDOW
|
||||
}
|
||||
|
||||
const cap = getModelCapability(model)
|
||||
|
||||
65
src/utils/effort.codex.test.ts
Normal file
65
src/utils/effort.codex.test.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
async function importFreshEffortModule(options: {
|
||||
provider: 'codex' | 'openai'
|
||||
supportsCodexReasoningEffort: boolean
|
||||
}) {
|
||||
mock.module('./model/providers.js', () => ({
|
||||
getAPIProvider: () => options.provider,
|
||||
}))
|
||||
mock.module('./model/modelSupportOverrides.js', () => ({
|
||||
get3PModelCapabilityOverride: () => undefined,
|
||||
}))
|
||||
mock.module('../services/api/providerConfig.js', () => ({
|
||||
supportsCodexReasoningEffort: () => options.supportsCodexReasoningEffort,
|
||||
}))
|
||||
|
||||
return import(`./effort.js?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
test('gpt-5.4 on the ChatGPT Codex backend supports effort selection', async () => {
|
||||
const { getAvailableEffortLevels, modelSupportsEffort } =
|
||||
await importFreshEffortModule({
|
||||
provider: 'codex',
|
||||
supportsCodexReasoningEffort: true,
|
||||
})
|
||||
|
||||
expect(modelSupportsEffort('gpt-5.4')).toBe(true)
|
||||
expect(getAvailableEffortLevels('gpt-5.4')).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'xhigh',
|
||||
])
|
||||
})
|
||||
|
||||
test('gpt-5.4 on the OpenAI provider still supports effort selection', async () => {
|
||||
const { getAvailableEffortLevels, modelSupportsEffort } =
|
||||
await importFreshEffortModule({
|
||||
provider: 'openai',
|
||||
supportsCodexReasoningEffort: true,
|
||||
})
|
||||
|
||||
expect(modelSupportsEffort('gpt-5.4')).toBe(true)
|
||||
expect(getAvailableEffortLevels('gpt-5.4')).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'xhigh',
|
||||
])
|
||||
})
|
||||
|
||||
test('gpt-5.3-codex-spark stays without effort controls', async () => {
|
||||
const { getAvailableEffortLevels, modelSupportsEffort } =
|
||||
await importFreshEffortModule({
|
||||
provider: 'codex',
|
||||
supportsCodexReasoningEffort: false,
|
||||
})
|
||||
|
||||
expect(modelSupportsEffort('gpt-5.3-codex-spark')).toBe(false)
|
||||
expect(getAvailableEffortLevels('gpt-5.3-codex-spark')).toEqual([])
|
||||
})
|
||||
@@ -5,6 +5,7 @@ import { isProSubscriber, isMaxSubscriber, isTeamSubscriber } from './auth.js'
|
||||
import { getFeatureValue_CACHED_MAY_BE_STALE } from 'src/services/analytics/growthbook.js'
|
||||
import { getAPIProvider } from './model/providers.js'
|
||||
import { get3PModelCapabilityOverride } from './model/modelSupportOverrides.js'
|
||||
import { supportsCodexReasoningEffort } from '../services/api/providerConfig.js'
|
||||
import { isEnvTruthy } from './envUtils.js'
|
||||
import type { EffortLevel } from 'src/entrypoints/sdk/runtimeTypes.js'
|
||||
|
||||
@@ -37,6 +38,9 @@ export function modelSupportsEffort(model: string): boolean {
|
||||
if (supported3P !== undefined) {
|
||||
return supported3P
|
||||
}
|
||||
if (modelUsesOpenAIEffort(model) && supportsCodexReasoningEffort(model)) {
|
||||
return true
|
||||
}
|
||||
// Supported by a subset of Claude 4 models
|
||||
if (m.includes('opus-4-6') || m.includes('sonnet-4-6')) {
|
||||
return true
|
||||
@@ -86,6 +90,9 @@ export function modelUsesOpenAIEffort(model: string): boolean {
|
||||
}
|
||||
|
||||
export function getAvailableEffortLevels(model: string): EffortLevel[] | OpenAIEffortLevel[] {
|
||||
if (!modelSupportsEffort(model)) {
|
||||
return []
|
||||
}
|
||||
if (modelUsesOpenAIEffort(model)) {
|
||||
return [...OPENAI_EFFORT_LEVELS] as OpenAIEffortLevel[]
|
||||
}
|
||||
@@ -136,7 +143,7 @@ export function parseEffortValue(value: unknown): EffortValue | undefined {
|
||||
|
||||
/**
|
||||
* Numeric values are model-default only and not persisted.
|
||||
* 'max' is session-scoped for external users (ants can persist it).
|
||||
* 'max' can now be persisted by all users.
|
||||
* Write sites call this before saving to settings so the Zod schema
|
||||
* (which only accepts string levels) never rejects a write.
|
||||
*/
|
||||
@@ -146,15 +153,15 @@ export function toPersistableEffort(
|
||||
if (value === 'low' || value === 'medium' || value === 'high') {
|
||||
return value
|
||||
}
|
||||
if (value === 'max' && process.env.USER_TYPE === 'ant') {
|
||||
if (value === 'max') {
|
||||
return value
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
export function getInitialEffortSetting(): EffortLevel | undefined {
|
||||
// toPersistableEffort filters 'max' for non-ants on read, so a manually
|
||||
// edited settings.json doesn't leak session-scoped max into a fresh session.
|
||||
// toPersistableEffort validates 'max' on read, so a manually
|
||||
// edited settings.json with an invalid level doesn't leak into a fresh session.
|
||||
return toPersistableEffort(getInitialSettings().effortLevel)
|
||||
}
|
||||
|
||||
|
||||
@@ -107,15 +107,15 @@ export function _resetTmuxControlModeProbeForTesting(): void {
|
||||
|
||||
/**
|
||||
* Whether fullscreen (flicker-free) mode is enabled. Env var takes highest
|
||||
* precedence, then the `flickerFreeMode` config setting, then the internal-only
|
||||
* default. External users can enable via `/config` instead of setting the env.
|
||||
* precedence, then the `flickerFreeMode` config setting, then defaults to off.
|
||||
* Users can enable via `/config` instead of setting the env.
|
||||
*
|
||||
* Priority order:
|
||||
* CLAUDE_CODE_NO_FLICKER=0 → always off
|
||||
* CLAUDE_CODE_NO_FLICKER=1 → always on (overrides tmux -CC guard too)
|
||||
* tmux -CC detected → off (corrupts terminal state)
|
||||
* config flickerFreeMode → on/off per user preference
|
||||
* USER_TYPE=ant → on by default for internal users
|
||||
* default → off
|
||||
*/
|
||||
export function isFullscreenEnvEnabled(): boolean {
|
||||
// Explicit env opt-out always wins.
|
||||
@@ -137,7 +137,7 @@ export function isFullscreenEnvEnabled(): boolean {
|
||||
// `/config` without having to set an env var.
|
||||
const configValue = getGlobalConfig().flickerFreeMode
|
||||
if (configValue !== undefined) return configValue
|
||||
return process.env.USER_TYPE === 'ant'
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -104,9 +104,19 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'devstral-latest': 256_000,
|
||||
'ministral-3b-latest': 256_000,
|
||||
|
||||
// MiniMax
|
||||
// MiniMax (all M2.x variants share 204,800 context, 131,072 max output)
|
||||
'MiniMax-M2.7': 204_800,
|
||||
'MiniMax-M2.7-highspeed': 204_800,
|
||||
'MiniMax-M2.5': 204_800,
|
||||
'MiniMax-M2.5-highspeed': 204_800,
|
||||
'MiniMax-M2.1': 204_800,
|
||||
'MiniMax-M2.1-highspeed': 204_800,
|
||||
'minimax-m2.7': 204_800,
|
||||
'minimax-m2.7-highspeed': 204_800,
|
||||
'minimax-m2.5': 204_800,
|
||||
'minimax-m2.5-highspeed': 204_800,
|
||||
'minimax-m2.1': 204_800,
|
||||
'minimax-m2.1-highspeed': 204_800,
|
||||
|
||||
// Google (via OpenRouter)
|
||||
'google/gemini-2.0-flash':1_048_576,
|
||||
@@ -223,9 +233,19 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
'mistral-large-latest': 32_768,
|
||||
'mistral-small-latest': 32_768,
|
||||
|
||||
// MiniMax
|
||||
// MiniMax (all M2.x variants share 131,072 max output)
|
||||
'MiniMax-M2.7': 131_072,
|
||||
'MiniMax-M2.7-highspeed': 131_072,
|
||||
'MiniMax-M2.5': 131_072,
|
||||
'MiniMax-M2.5-highspeed': 131_072,
|
||||
'MiniMax-M2.1': 131_072,
|
||||
'MiniMax-M2.1-highspeed': 131_072,
|
||||
'minimax-m2.7': 131_072,
|
||||
'minimax-m2.7-highspeed': 131_072,
|
||||
'minimax-m2.5': 131_072,
|
||||
'minimax-m2.5-highspeed': 131_072,
|
||||
'minimax-m2.1': 131_072,
|
||||
'minimax-m2.1-highspeed': 131_072,
|
||||
|
||||
// Google (via OpenRouter)
|
||||
'google/gemini-2.0-flash': 8_192,
|
||||
|
||||
@@ -19,6 +19,7 @@ import {
|
||||
getSettings_DEPRECATED,
|
||||
getSettingsFilePathForSource,
|
||||
getUseAutoModeDuringPlan,
|
||||
hasAllowBypassPermissionsMode,
|
||||
hasAutoModeOptIn,
|
||||
} from '../settings/settings.js'
|
||||
import {
|
||||
@@ -936,9 +937,11 @@ export async function initializeToolPermissionContext({
|
||||
const settings = getSettings_DEPRECATED() || {}
|
||||
const settingsDisableBypassPermissionsMode =
|
||||
settings.permissions?.disableBypassPermissionsMode === 'disable'
|
||||
const settingsAllowBypassPermissionsMode = hasAllowBypassPermissionsMode()
|
||||
const isBypassPermissionsModeAvailable =
|
||||
(permissionMode === 'bypassPermissions' ||
|
||||
allowDangerouslySkipPermissions) &&
|
||||
allowDangerouslySkipPermissions ||
|
||||
settingsAllowBypassPermissionsMode) &&
|
||||
!growthBookDisableBypassPermissionsMode &&
|
||||
!settingsDisableBypassPermissionsMode
|
||||
|
||||
|
||||
@@ -45,12 +45,9 @@ export function getPlanModeV2ExploreAgentCount(): number {
|
||||
/**
|
||||
* Check if plan mode interview phase is enabled.
|
||||
*
|
||||
* Config: ant=always_on, external=tengu_plan_mode_interview_phase gate, envVar=true
|
||||
* Config: tengu_plan_mode_interview_phase gate, envVar=true
|
||||
*/
|
||||
export function isPlanModeInterviewPhaseEnabled(): boolean {
|
||||
// Always on for ants
|
||||
if (process.env.USER_TYPE === 'ant') return true
|
||||
|
||||
const env = process.env.CLAUDE_CODE_PLAN_MODE_INTERVIEW_PHASE
|
||||
if (isEnvTruthy(env)) return true
|
||||
if (isEnvDefinedFalsy(env)) return false
|
||||
|
||||
@@ -4,6 +4,7 @@ import { tmpdir } from 'node:os'
|
||||
import { join } from 'node:path'
|
||||
import test from 'node:test'
|
||||
|
||||
import { DEFAULT_CODEX_BASE_URL } from '../services/api/providerConfig.ts'
|
||||
import {
|
||||
buildStartupEnvFromProfile,
|
||||
buildAtomicChatProfileEnv,
|
||||
@@ -12,7 +13,9 @@ import {
|
||||
buildLaunchEnv,
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
clearPersistedCodexOAuthProfile,
|
||||
createProfileFile,
|
||||
isPersistedCodexOAuthProfile,
|
||||
maskSecretForDisplay,
|
||||
loadProfileFile,
|
||||
PROFILE_FILE_NAME,
|
||||
@@ -23,6 +26,13 @@ import {
|
||||
type ProfileFile,
|
||||
} from './providerProfile.ts'
|
||||
|
||||
function makeJwt(payload: Record<string, unknown>): string {
|
||||
const header = Buffer.from(JSON.stringify({ alg: 'none', typ: 'JWT' }))
|
||||
.toString('base64url')
|
||||
const body = Buffer.from(JSON.stringify(payload)).toString('base64url')
|
||||
return `${header}.${body}.signature`
|
||||
}
|
||||
|
||||
function profile(profile: ProfileFile['profile'], env: ProfileFile['env']): ProfileFile {
|
||||
return {
|
||||
profile,
|
||||
@@ -330,6 +340,7 @@ test('codex profiles accept explicit codex credentials', () => {
|
||||
assert.deepEqual(env, {
|
||||
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
|
||||
OPENAI_MODEL: 'codexspark',
|
||||
CODEX_CREDENTIAL_SOURCE: 'existing',
|
||||
CODEX_API_KEY: 'codex-live',
|
||||
CHATGPT_ACCOUNT_ID: 'acct_123',
|
||||
})
|
||||
@@ -417,6 +428,77 @@ test('saveProfileFile writes a profile that loadProfileFile can read back', () =
|
||||
}
|
||||
})
|
||||
|
||||
test('buildCodexProfileEnv tags OAuth-saved profiles so logout can remove them safely', () => {
|
||||
const env = buildCodexProfileEnv({
|
||||
model: 'codexplan',
|
||||
apiKey: makeJwt({
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_oauth',
|
||||
},
|
||||
}),
|
||||
credentialSource: 'oauth',
|
||||
processEnv: {},
|
||||
})
|
||||
|
||||
assert.deepEqual(env, {
|
||||
OPENAI_BASE_URL: DEFAULT_CODEX_BASE_URL,
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
CODEX_CREDENTIAL_SOURCE: 'oauth',
|
||||
CODEX_API_KEY: makeJwt({
|
||||
'https://api.openai.com/auth': {
|
||||
chatgpt_account_id: 'acct_oauth',
|
||||
},
|
||||
}),
|
||||
CHATGPT_ACCOUNT_ID: 'acct_oauth',
|
||||
})
|
||||
})
|
||||
|
||||
test('clearPersistedCodexOAuthProfile removes only persisted Codex OAuth profiles', async () => {
|
||||
const cwd = mkdtempSync(join(tmpdir(), 'openclaude-codex-oauth-profile-'))
|
||||
|
||||
try {
|
||||
const providerProfileModule = await import(
|
||||
`./providerProfile.ts?ts=${Date.now()}-${Math.random()}`
|
||||
)
|
||||
const {
|
||||
PROFILE_FILE_NAME,
|
||||
clearPersistedCodexOAuthProfile,
|
||||
createProfileFile,
|
||||
isPersistedCodexOAuthProfile,
|
||||
loadProfileFile,
|
||||
saveProfileFile,
|
||||
} = providerProfileModule
|
||||
const oauthProfile = createProfileFile('codex', {
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
OPENAI_BASE_URL: DEFAULT_CODEX_BASE_URL,
|
||||
CHATGPT_ACCOUNT_ID: 'acct_oauth',
|
||||
CODEX_CREDENTIAL_SOURCE: 'oauth',
|
||||
})
|
||||
saveProfileFile(oauthProfile, { cwd })
|
||||
|
||||
assert.equal(isPersistedCodexOAuthProfile(loadProfileFile({ cwd })), true)
|
||||
assert.equal(
|
||||
clearPersistedCodexOAuthProfile({ cwd }),
|
||||
join(cwd, PROFILE_FILE_NAME),
|
||||
)
|
||||
assert.equal(loadProfileFile({ cwd }), null)
|
||||
|
||||
const existingCredentialProfile = createProfileFile('codex', {
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
OPENAI_BASE_URL: DEFAULT_CODEX_BASE_URL,
|
||||
CHATGPT_ACCOUNT_ID: 'acct_existing',
|
||||
CODEX_CREDENTIAL_SOURCE: 'existing',
|
||||
})
|
||||
saveProfileFile(existingCredentialProfile, { cwd })
|
||||
|
||||
assert.equal(isPersistedCodexOAuthProfile(loadProfileFile({ cwd })), false)
|
||||
assert.equal(clearPersistedCodexOAuthProfile({ cwd }), null)
|
||||
assert.deepEqual(loadProfileFile({ cwd }), existingCredentialProfile)
|
||||
} finally {
|
||||
rmSync(cwd, { recursive: true, force: true })
|
||||
}
|
||||
})
|
||||
|
||||
test('buildStartupEnvFromProfile applies persisted gemini settings when no provider is explicitly selected', async () => {
|
||||
const env = await buildStartupEnvFromProfile({
|
||||
persisted: profile('gemini', {
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
} from '../services/api/providerConfig.ts'
|
||||
import { parseChatgptAccountId } from '../services/api/codexOAuthShared.js'
|
||||
import {
|
||||
getGoalDefaultOpenAIModel,
|
||||
normalizeRecommendationGoal,
|
||||
@@ -14,6 +15,20 @@ import {
|
||||
} from './providerRecommendation.ts'
|
||||
import { readGeminiAccessToken } from './geminiCredentials.ts'
|
||||
import { getOllamaChatBaseUrl } from './providerDiscovery.ts'
|
||||
import { getProviderValidationError } from './providerValidation.ts'
|
||||
import {
|
||||
maskSecretForDisplay,
|
||||
redactSecretValueForDisplay,
|
||||
sanitizeApiKey,
|
||||
sanitizeProviderConfigValue,
|
||||
} from './providerSecrets.ts'
|
||||
|
||||
export {
|
||||
maskSecretForDisplay,
|
||||
redactSecretValueForDisplay,
|
||||
sanitizeApiKey,
|
||||
sanitizeProviderConfigValue,
|
||||
} from './providerSecrets.ts'
|
||||
|
||||
export const PROFILE_FILE_NAME = '.openclaude-profile.json'
|
||||
export const DEFAULT_GEMINI_BASE_URL =
|
||||
@@ -33,6 +48,7 @@ const PROFILE_ENV_KEYS = [
|
||||
'OPENAI_MODEL',
|
||||
'OPENAI_API_KEY',
|
||||
'CODEX_API_KEY',
|
||||
'CODEX_CREDENTIAL_SOURCE',
|
||||
'CHATGPT_ACCOUNT_ID',
|
||||
'CODEX_ACCOUNT_ID',
|
||||
'GEMINI_API_KEY',
|
||||
@@ -46,21 +62,20 @@ const PROFILE_ENV_KEYS = [
|
||||
'MISTRAL_MODEL',
|
||||
] as const
|
||||
|
||||
const SECRET_ENV_KEYS = [
|
||||
'OPENAI_API_KEY',
|
||||
'CODEX_API_KEY',
|
||||
'GEMINI_API_KEY',
|
||||
'GOOGLE_API_KEY',
|
||||
'MISTRAL_API_KEY',
|
||||
] as const
|
||||
|
||||
export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' | 'atomic-chat' | 'mistral'
|
||||
export type ProviderProfile =
|
||||
| 'openai'
|
||||
| 'ollama'
|
||||
| 'codex'
|
||||
| 'gemini'
|
||||
| 'atomic-chat'
|
||||
| 'mistral'
|
||||
|
||||
export type ProfileEnv = {
|
||||
OPENAI_BASE_URL?: string
|
||||
OPENAI_MODEL?: string
|
||||
OPENAI_API_KEY?: string
|
||||
CODEX_API_KEY?: string
|
||||
CODEX_CREDENTIAL_SOURCE?: 'oauth' | 'existing'
|
||||
CHATGPT_ACCOUNT_ID?: string
|
||||
CODEX_ACCOUNT_ID?: string
|
||||
GEMINI_API_KEY?: string
|
||||
@@ -78,13 +93,6 @@ export type ProfileFile = {
|
||||
createdAt: string
|
||||
}
|
||||
|
||||
type SecretValueSource = Partial<
|
||||
Pick<
|
||||
NodeJS.ProcessEnv & ProfileEnv,
|
||||
(typeof SECRET_ENV_KEYS)[number]
|
||||
>
|
||||
>
|
||||
|
||||
type ProfileFileLocation = {
|
||||
cwd?: string
|
||||
filePath?: string
|
||||
@@ -109,102 +117,6 @@ export function isProviderProfile(value: unknown): value is ProviderProfile {
|
||||
)
|
||||
}
|
||||
|
||||
export function sanitizeApiKey(
|
||||
key: string | null | undefined,
|
||||
): string | undefined {
|
||||
if (!key || key === 'SUA_CHAVE') return undefined
|
||||
return key
|
||||
}
|
||||
|
||||
function looksLikeSecretValue(value: string): boolean {
|
||||
const trimmed = value.trim()
|
||||
if (!trimmed) return false
|
||||
|
||||
if (trimmed.startsWith('sk-') || trimmed.startsWith('sk-ant-')) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (trimmed.startsWith('AIza')) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
function collectSecretValues(
|
||||
sources: Array<SecretValueSource | null | undefined>,
|
||||
): string[] {
|
||||
const values = new Set<string>()
|
||||
|
||||
for (const source of sources) {
|
||||
if (!source) continue
|
||||
|
||||
for (const key of SECRET_ENV_KEYS) {
|
||||
const value = sanitizeApiKey(source[key])
|
||||
if (value) {
|
||||
values.add(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return [...values]
|
||||
}
|
||||
|
||||
export function maskSecretForDisplay(
|
||||
value: string | null | undefined,
|
||||
): string | undefined {
|
||||
const sanitized = sanitizeApiKey(value)
|
||||
if (!sanitized) return undefined
|
||||
|
||||
if (sanitized.length <= 8) {
|
||||
return 'configured'
|
||||
}
|
||||
|
||||
if (sanitized.startsWith('sk-')) {
|
||||
return `${sanitized.slice(0, 3)}...${sanitized.slice(-4)}`
|
||||
}
|
||||
|
||||
if (sanitized.startsWith('AIza')) {
|
||||
return `${sanitized.slice(0, 4)}...${sanitized.slice(-4)}`
|
||||
}
|
||||
|
||||
return `${sanitized.slice(0, 2)}...${sanitized.slice(-4)}`
|
||||
}
|
||||
|
||||
export function redactSecretValueForDisplay(
|
||||
value: string | null | undefined,
|
||||
...sources: Array<SecretValueSource | null | undefined>
|
||||
): string | undefined {
|
||||
if (!value) return undefined
|
||||
|
||||
const trimmed = value.trim()
|
||||
if (!trimmed) return trimmed
|
||||
|
||||
const secretValues = collectSecretValues(sources)
|
||||
if (secretValues.includes(trimmed) || looksLikeSecretValue(trimmed)) {
|
||||
return maskSecretForDisplay(trimmed) ?? 'configured'
|
||||
}
|
||||
|
||||
return trimmed
|
||||
}
|
||||
|
||||
export function sanitizeProviderConfigValue(
|
||||
value: string | null | undefined,
|
||||
...sources: Array<SecretValueSource | null | undefined>
|
||||
): string | undefined {
|
||||
if (!value) return undefined
|
||||
|
||||
const trimmed = value.trim()
|
||||
if (!trimmed) return undefined
|
||||
|
||||
const secretValues = collectSecretValues(sources)
|
||||
if (secretValues.includes(trimmed) || looksLikeSecretValue(trimmed)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return trimmed
|
||||
}
|
||||
|
||||
export function buildOllamaProfileEnv(
|
||||
model: string,
|
||||
options: {
|
||||
@@ -335,6 +247,7 @@ export function buildCodexProfileEnv(options: {
|
||||
model?: string | null
|
||||
baseUrl?: string | null
|
||||
apiKey?: string | null
|
||||
credentialSource?: 'oauth' | 'existing'
|
||||
processEnv?: NodeJS.ProcessEnv
|
||||
}): ProfileEnv | null {
|
||||
const processEnv = options.processEnv ?? process.env
|
||||
@@ -346,10 +259,14 @@ export function buildCodexProfileEnv(options: {
|
||||
if (!credentials.apiKey || !credentials.accountId) {
|
||||
return null
|
||||
}
|
||||
const credentialSource =
|
||||
options.credentialSource ??
|
||||
(credentials.source === 'secure-storage' ? 'oauth' : 'existing')
|
||||
|
||||
const env: ProfileEnv = {
|
||||
OPENAI_BASE_URL: options.baseUrl || DEFAULT_CODEX_BASE_URL,
|
||||
OPENAI_MODEL: options.model || 'codexplan',
|
||||
CODEX_CREDENTIAL_SOURCE: credentialSource,
|
||||
}
|
||||
|
||||
if (key) {
|
||||
@@ -399,6 +316,30 @@ export function buildMistralProfileEnv(options: {
|
||||
return env
|
||||
}
|
||||
|
||||
export function buildCodexOAuthProfileEnv(
|
||||
tokens: {
|
||||
accessToken: string
|
||||
idToken?: string
|
||||
accountId?: string
|
||||
},
|
||||
): ProfileEnv | null {
|
||||
const accountId =
|
||||
tokens.accountId ??
|
||||
parseChatgptAccountId(tokens.idToken) ??
|
||||
parseChatgptAccountId(tokens.accessToken)
|
||||
|
||||
if (!accountId) {
|
||||
return null
|
||||
}
|
||||
|
||||
return {
|
||||
OPENAI_BASE_URL: DEFAULT_CODEX_BASE_URL,
|
||||
OPENAI_MODEL: 'codexplan',
|
||||
CHATGPT_ACCOUNT_ID: accountId,
|
||||
CODEX_CREDENTIAL_SOURCE: 'oauth',
|
||||
}
|
||||
}
|
||||
|
||||
export function createProfileFile(
|
||||
profile: ProviderProfile,
|
||||
env: ProfileEnv,
|
||||
@@ -410,6 +351,26 @@ export function createProfileFile(
|
||||
}
|
||||
}
|
||||
|
||||
export function isPersistedCodexOAuthProfile(
|
||||
persisted: ProfileFile | null,
|
||||
): boolean {
|
||||
return (
|
||||
persisted?.profile === 'codex' &&
|
||||
persisted.env.CODEX_CREDENTIAL_SOURCE === 'oauth'
|
||||
)
|
||||
}
|
||||
|
||||
export function clearPersistedCodexOAuthProfile(
|
||||
options?: ProfileFileLocation,
|
||||
): string | null {
|
||||
const persisted = loadProfileFile(options)
|
||||
if (!isPersistedCodexOAuthProfile(persisted)) {
|
||||
return null
|
||||
}
|
||||
|
||||
return deleteProfileFile(options)
|
||||
}
|
||||
|
||||
export function loadProfileFile(options?: ProfileFileLocation): ProfileFile | null {
|
||||
const filePath = resolveProfileFilePath(options)
|
||||
if (!existsSync(filePath)) {
|
||||
@@ -545,6 +506,7 @@ export async function buildLaunchEnv(options: {
|
||||
|
||||
delete env.CLAUDE_CODE_USE_OPENAI
|
||||
delete env.CLAUDE_CODE_USE_GITHUB
|
||||
delete env.CODEX_CREDENTIAL_SOURCE
|
||||
|
||||
env.GEMINI_MODEL =
|
||||
shellGeminiModel ||
|
||||
@@ -668,6 +630,7 @@ export async function buildLaunchEnv(options: {
|
||||
delete env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete env.CLAUDE_CODE_USE_GEMINI
|
||||
delete env.CLAUDE_CODE_USE_GITHUB
|
||||
delete env.CODEX_CREDENTIAL_SOURCE
|
||||
delete env.GEMINI_API_KEY
|
||||
delete env.GEMINI_AUTH_MODE
|
||||
delete env.GEMINI_ACCESS_TOKEN
|
||||
@@ -838,3 +801,40 @@ export function applyProfileEnvToProcessEnv(
|
||||
|
||||
Object.assign(targetEnv, nextEnv)
|
||||
}
|
||||
|
||||
export async function applySavedProfileToCurrentSession(options: {
|
||||
profileFile: ProfileFile
|
||||
processEnv?: NodeJS.ProcessEnv
|
||||
}): Promise<string | null> {
|
||||
const processEnv = options.processEnv ?? process.env
|
||||
const baseEnv = { ...processEnv }
|
||||
const isCodexOAuthProfile =
|
||||
options.profileFile.profile === 'codex' &&
|
||||
options.profileFile.env.CODEX_CREDENTIAL_SOURCE === 'oauth'
|
||||
|
||||
delete baseEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
delete baseEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
|
||||
if (isCodexOAuthProfile) {
|
||||
delete baseEnv.CODEX_API_KEY
|
||||
delete baseEnv.CODEX_ACCOUNT_ID
|
||||
delete baseEnv.CHATGPT_ACCOUNT_ID
|
||||
}
|
||||
|
||||
const nextEnv = await buildLaunchEnv({
|
||||
profile: options.profileFile.profile,
|
||||
persisted: options.profileFile,
|
||||
goal: normalizeRecommendationGoal(processEnv.OPENCLAUDE_PROFILE_GOAL),
|
||||
processEnv: baseEnv,
|
||||
getOllamaChatBaseUrl,
|
||||
readGeminiAccessToken,
|
||||
})
|
||||
const validationError = await getProviderValidationError(nextEnv)
|
||||
if (validationError) {
|
||||
return validationError
|
||||
}
|
||||
|
||||
delete processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
delete processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
|
||||
applyProfileEnvToProcessEnv(processEnv, nextEnv)
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -259,6 +259,8 @@ describe('applyActiveProviderProfileFromConfig', () => {
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
|
||||
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5:3b'
|
||||
|
||||
@@ -417,7 +417,7 @@ export function applyActiveProviderProfileFromConfig(
|
||||
processEnv[PROFILE_ENV_APPLIED_FLAG] === '1' &&
|
||||
trimOrUndefined(processEnv[PROFILE_ENV_APPLIED_ID]) === activeProfile.id
|
||||
|
||||
if (!options?.force && hasProviderSelectionFlags(processEnv)) {
|
||||
if (!options?.force && (hasProviderSelectionFlags(processEnv) || processEnv[PROFILE_ENV_APPLIED_FLAG] === '1')) {
|
||||
// Respect explicit startup provider intent. Auto-heal only when this
|
||||
// exact active profile previously applied the current env.
|
||||
if (!isCurrentEnvProfileManaged) {
|
||||
|
||||
107
src/utils/providerSecrets.ts
Normal file
107
src/utils/providerSecrets.ts
Normal file
@@ -0,0 +1,107 @@
|
||||
const SECRET_ENV_KEYS = [
|
||||
'OPENAI_API_KEY',
|
||||
'CODEX_API_KEY',
|
||||
'GEMINI_API_KEY',
|
||||
'GOOGLE_API_KEY',
|
||||
'MISTRAL_API_KEY',
|
||||
] as const
|
||||
|
||||
export type SecretValueSource = Partial<
|
||||
Record<(typeof SECRET_ENV_KEYS)[number], string | undefined>
|
||||
>
|
||||
|
||||
export function sanitizeApiKey(
|
||||
key: string | null | undefined,
|
||||
): string | undefined {
|
||||
if (!key || key === 'SUA_CHAVE') return undefined
|
||||
return key
|
||||
}
|
||||
|
||||
function looksLikeSecretValue(value: string): boolean {
|
||||
const trimmed = value.trim()
|
||||
if (!trimmed) return false
|
||||
|
||||
if (trimmed.startsWith('sk-') || trimmed.startsWith('sk-ant-')) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (trimmed.startsWith('AIza')) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
function collectSecretValues(
|
||||
sources: Array<SecretValueSource | null | undefined>,
|
||||
): string[] {
|
||||
const values = new Set<string>()
|
||||
|
||||
for (const source of sources) {
|
||||
if (!source) continue
|
||||
|
||||
for (const key of SECRET_ENV_KEYS) {
|
||||
const value = sanitizeApiKey(source[key])
|
||||
if (value) {
|
||||
values.add(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return [...values]
|
||||
}
|
||||
|
||||
export function maskSecretForDisplay(
|
||||
value: string | null | undefined,
|
||||
): string | undefined {
|
||||
const sanitized = sanitizeApiKey(value)
|
||||
if (!sanitized) return undefined
|
||||
|
||||
if (sanitized.length <= 8) {
|
||||
return 'configured'
|
||||
}
|
||||
|
||||
if (sanitized.startsWith('sk-')) {
|
||||
return `${sanitized.slice(0, 3)}...${sanitized.slice(-4)}`
|
||||
}
|
||||
|
||||
if (sanitized.startsWith('AIza')) {
|
||||
return `${sanitized.slice(0, 4)}...${sanitized.slice(-4)}`
|
||||
}
|
||||
|
||||
return `${sanitized.slice(0, 2)}...${sanitized.slice(-4)}`
|
||||
}
|
||||
|
||||
export function redactSecretValueForDisplay(
|
||||
value: string | null | undefined,
|
||||
...sources: Array<SecretValueSource | null | undefined>
|
||||
): string | undefined {
|
||||
if (!value) return undefined
|
||||
|
||||
const trimmed = value.trim()
|
||||
if (!trimmed) return trimmed
|
||||
|
||||
const secretValues = collectSecretValues(sources)
|
||||
if (secretValues.includes(trimmed) || looksLikeSecretValue(trimmed)) {
|
||||
return maskSecretForDisplay(trimmed) ?? 'configured'
|
||||
}
|
||||
|
||||
return trimmed
|
||||
}
|
||||
|
||||
export function sanitizeProviderConfigValue(
|
||||
value: string | null | undefined,
|
||||
...sources: Array<SecretValueSource | null | undefined>
|
||||
): string | undefined {
|
||||
if (!value) return undefined
|
||||
|
||||
const trimmed = value.trim()
|
||||
if (!trimmed) return undefined
|
||||
|
||||
const secretValues = collectSecretValues(sources)
|
||||
if (secretValues.includes(trimmed) || looksLikeSecretValue(trimmed)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return trimmed
|
||||
}
|
||||
@@ -6,11 +6,13 @@ import {
|
||||
resolveProviderRequest,
|
||||
} from '../services/api/providerConfig.js'
|
||||
import { getGlobalClaudeFile } from './env.js'
|
||||
import { isBareMode } from './envUtils.js'
|
||||
import {
|
||||
type GeminiResolvedCredential,
|
||||
resolveGeminiCredential,
|
||||
} from './geminiAuth.js'
|
||||
import { PROFILE_FILE_NAME, redactSecretValueForDisplay } from './providerProfile.js'
|
||||
import { PROFILE_FILE_NAME } from './providerProfile.js'
|
||||
import { redactSecretValueForDisplay } from './providerSecrets.js'
|
||||
|
||||
function isEnvTruthy(value: string | undefined): boolean {
|
||||
if (!value) return false
|
||||
@@ -82,6 +84,7 @@ export async function getProviderValidationError(
|
||||
) => Promise<GeminiResolvedCredential>
|
||||
},
|
||||
): Promise<string | null> {
|
||||
const secretSource = env
|
||||
const useOpenAI = isEnvTruthy(env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isEnvTruthy(env.CLAUDE_CODE_USE_GITHUB)
|
||||
|
||||
@@ -131,16 +134,17 @@ export async function getProviderValidationError(
|
||||
if (request.transport === 'codex_responses') {
|
||||
const credentials = resolveCodexApiCredentials(env)
|
||||
if (!credentials.apiKey) {
|
||||
const oauthHint = isBareMode() ? '' : ', choose Codex OAuth in /provider'
|
||||
const authHint = credentials.authPath
|
||||
? ` or put auth.json at ${credentials.authPath}`
|
||||
: ''
|
||||
? `${oauthHint} or put auth.json at ${credentials.authPath}`
|
||||
: oauthHint
|
||||
const safeModel =
|
||||
redactSecretValueForDisplay(request.requestedModel, env) ??
|
||||
redactSecretValueForDisplay(request.requestedModel, secretSource) ??
|
||||
'the requested model'
|
||||
return `Codex auth is required for ${safeModel}. Set CODEX_API_KEY${authHint}.`
|
||||
}
|
||||
if (!credentials.accountId) {
|
||||
return 'Codex auth is missing chatgpt_account_id. Re-login with Codex or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.'
|
||||
return 'Codex auth is missing chatgpt_account_id. Re-login with Codex OAuth, Codex CLI, or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.'
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -5,6 +5,16 @@ import { windowsCredentialStorage } from './windowsCredentialStorage.js'
|
||||
import { plainTextStorage } from './plainTextStorage.js'
|
||||
|
||||
export interface SecureStorageData {
|
||||
codex?: {
|
||||
apiKey?: string
|
||||
accessToken: string
|
||||
refreshToken?: string
|
||||
idToken?: string
|
||||
accountId?: string
|
||||
profileId?: string
|
||||
lastRefreshAt?: number
|
||||
lastRefreshFailureAt?: number
|
||||
}
|
||||
mcpOAuth?: Record<
|
||||
string,
|
||||
{
|
||||
@@ -36,22 +46,44 @@ export interface SecureStorage {
|
||||
delete(): boolean
|
||||
}
|
||||
|
||||
const unavailableSecureStorage: SecureStorage = {
|
||||
name: 'unavailable-secure-storage',
|
||||
read: () => null,
|
||||
readAsync: async () => null,
|
||||
update: () => ({
|
||||
success: false,
|
||||
warning:
|
||||
'Secure storage is unavailable on this platform without plaintext fallback.',
|
||||
}),
|
||||
delete: () => true,
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the appropriate secure storage implementation for the current platform.
|
||||
* Prefers native OS vaults (Keychain, libsecret, Credential Locker) with a plaintext fallback.
|
||||
*/
|
||||
export function getSecureStorage(): SecureStorage {
|
||||
export function getSecureStorage(options?: {
|
||||
allowPlainTextFallback?: boolean
|
||||
}): SecureStorage {
|
||||
const allowPlainTextFallback = options?.allowPlainTextFallback ?? true
|
||||
|
||||
if (process.platform === 'darwin') {
|
||||
return createFallbackStorage(macOsKeychainStorage, plainTextStorage)
|
||||
return allowPlainTextFallback
|
||||
? createFallbackStorage(macOsKeychainStorage, plainTextStorage)
|
||||
: macOsKeychainStorage
|
||||
}
|
||||
|
||||
if (process.platform === 'linux') {
|
||||
return createFallbackStorage(linuxSecretStorage, plainTextStorage)
|
||||
return allowPlainTextFallback
|
||||
? createFallbackStorage(linuxSecretStorage, plainTextStorage)
|
||||
: linuxSecretStorage
|
||||
}
|
||||
|
||||
if (process.platform === 'win32') {
|
||||
return createFallbackStorage(windowsCredentialStorage, plainTextStorage)
|
||||
return allowPlainTextFallback
|
||||
? createFallbackStorage(windowsCredentialStorage, plainTextStorage)
|
||||
: windowsCredentialStorage
|
||||
}
|
||||
|
||||
return plainTextStorage
|
||||
return allowPlainTextFallback ? plainTextStorage : unavailableSecureStorage
|
||||
}
|
||||
|
||||
@@ -64,8 +64,10 @@ describe("Secure Storage Platform Implementations", () => {
|
||||
windowsCredentialStorage.update(testData);
|
||||
|
||||
const script = mockExecaSync.mock.calls[0][1][1];
|
||||
const options = mockExecaSync.mock.calls[0][2];
|
||||
expect(script).toContain(expectedName);
|
||||
expect(script).toContain("Add-Type -AssemblyName System.Runtime.WindowsRuntime");
|
||||
expect(script).toContain("ProtectedData");
|
||||
expect(options.input).toContain("secret-token");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -85,32 +87,54 @@ describe("Secure Storage Platform Implementations", () => {
|
||||
windowsCredentialStorage.update(dataWithDollar);
|
||||
|
||||
const script = mockExecaSync.mock.calls[0][1][1];
|
||||
// Should use single quotes for the payload
|
||||
expect(script).toMatch(/'\{.*\}'/);
|
||||
// Should escape ' by doubling it
|
||||
expect(script).not.toContain("'token-with-$env:USERNAME'");
|
||||
// But since it's JSON, the value will be "token-with-$env:USERNAME" inside the single-quoted string
|
||||
// The JSON itself shouldn't have single quotes unless the data has them.
|
||||
const options = mockExecaSync.mock.calls[0][2];
|
||||
expect(script).toContain("[Console]::In.ReadToEnd()");
|
||||
expect(options.input).toContain("token-with-$env:USERNAME");
|
||||
|
||||
const dataWithQuote = { mcpOAuth: { "s": { accessToken: "token'quote", expiresAt: 1, serverName: "s", serverUrl: "u" } } };
|
||||
windowsCredentialStorage.update(dataWithQuote);
|
||||
const script2 = mockExecaSync.mock.calls[1][1][1];
|
||||
expect(script2).toContain("token''quote");
|
||||
const options2 = mockExecaSync.mock.calls[1][2];
|
||||
expect(options2.input).toContain("token'quote");
|
||||
});
|
||||
|
||||
test("delete() includes assembly load", () => {
|
||||
windowsCredentialStorage.delete();
|
||||
const script = mockExecaSync.mock.calls[0][1][1];
|
||||
const script = mockExecaSync.mock.calls[1][1][1];
|
||||
expect(script).toContain("Add-Type -AssemblyName System.Runtime.WindowsRuntime");
|
||||
});
|
||||
|
||||
test("escapes double quotes in username", () => {
|
||||
process.env.USER = 'user"name';
|
||||
windowsCredentialStorage.read();
|
||||
const script = mockExecaSync.mock.calls[0][1][1];
|
||||
const script = mockExecaSync.mock.calls[1][1][1];
|
||||
expect(script).toContain('user`"name');
|
||||
expect(script).not.toContain('user"name');
|
||||
});
|
||||
|
||||
test("read() falls back to legacy PasswordVault when the DPAPI payload is invalid JSON", () => {
|
||||
mockExecaSync
|
||||
.mockImplementationOnce(() => ({ exitCode: 0, stdout: "{not-json" }))
|
||||
.mockImplementationOnce(() => ({
|
||||
exitCode: 0,
|
||||
stdout: JSON.stringify(testData),
|
||||
}));
|
||||
|
||||
const result = windowsCredentialStorage.read();
|
||||
|
||||
expect(result).toEqual(testData);
|
||||
expect(mockExecaSync).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
test("read() fails closed when the legacy PasswordVault payload is invalid JSON", () => {
|
||||
mockExecaSync
|
||||
.mockImplementationOnce(() => ({ exitCode: 1, stdout: "" }))
|
||||
.mockImplementationOnce(() => ({ exitCode: 0, stdout: "{not-json" }));
|
||||
|
||||
const result = windowsCredentialStorage.read();
|
||||
|
||||
expect(result).toBeNull();
|
||||
expect(mockExecaSync).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Linux secret-tool Interaction", () => {
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import { execaSync } from 'execa'
|
||||
import { join } from 'path'
|
||||
import { getClaudeConfigHomeDir } from '../envUtils.js'
|
||||
import { jsonParse, jsonStringify } from '../slowOperations.js'
|
||||
import {
|
||||
CREDENTIALS_SERVICE_SUFFIX,
|
||||
@@ -8,90 +10,216 @@ import {
|
||||
import type { SecureStorage, SecureStorageData } from './index.js'
|
||||
|
||||
/**
|
||||
* Windows-specific secure storage implementation using the Windows Credential Locker.
|
||||
* Accessed via PowerShell's [Windows.Security.Credentials.PasswordVault].
|
||||
* Windows-specific secure storage implementation using DPAPI for new writes,
|
||||
* with best-effort reads/deletes from the legacy PasswordVault path.
|
||||
*/
|
||||
export const windowsCredentialStorage: SecureStorage = {
|
||||
name: 'credential-locker',
|
||||
read(): SecureStorageData | null {
|
||||
const resourceName = getSecureStorageServiceName(
|
||||
CREDENTIALS_SERVICE_SUFFIX,
|
||||
).replace(/"/g, '`"')
|
||||
const username = getUsername().replace(/"/g, '`"')
|
||||
// PowerShell script to retrieve password from vault
|
||||
const script = `
|
||||
Add-Type -AssemblyName System.Runtime.WindowsRuntime
|
||||
function escapePowerShellSingleQuoted(value: string): string {
|
||||
return value.replace(/'/g, "''")
|
||||
}
|
||||
|
||||
function getLegacyResourceName(): string {
|
||||
return getSecureStorageServiceName(CREDENTIALS_SERVICE_SUFFIX)
|
||||
}
|
||||
|
||||
function getWindowsSecureStorageEntropy(): string {
|
||||
return `${getLegacyResourceName()}:${getUsername()}`
|
||||
}
|
||||
|
||||
function getWindowsSecureStorageFilePath(): string {
|
||||
const resourceName = getLegacyResourceName().replace(/[^a-zA-Z0-9._-]/g, '_')
|
||||
return join(getClaudeConfigHomeDir(), `${resourceName}.secure.dpapi`)
|
||||
}
|
||||
|
||||
function runPowerShell(
|
||||
script: string,
|
||||
options?: { input?: string },
|
||||
): ReturnType<typeof execaSync> | null {
|
||||
try {
|
||||
return execaSync('powershell.exe', ['-Command', script], {
|
||||
input: options?.input,
|
||||
reject: false,
|
||||
})
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
function getFailureWarning(
|
||||
result: ReturnType<typeof execaSync> | null,
|
||||
fallback: string,
|
||||
): string {
|
||||
const stderr = result?.stderr?.trim()
|
||||
if (stderr) {
|
||||
return stderr
|
||||
}
|
||||
|
||||
if (typeof result?.exitCode === 'number' && result.exitCode !== 0) {
|
||||
return `${fallback} (exit code ${result.exitCode}).`
|
||||
}
|
||||
|
||||
return fallback
|
||||
}
|
||||
|
||||
function readLegacyPasswordVault(): SecureStorageData | null {
|
||||
const resourceName = getLegacyResourceName().replace(/"/g, '`"')
|
||||
const username = getUsername().replace(/"/g, '`"')
|
||||
const script = `
|
||||
Add-Type -AssemblyName System.Runtime.WindowsRuntime
|
||||
try {
|
||||
$vault = New-Object Windows.Security.Credentials.PasswordVault
|
||||
$cred = $vault.Retrieve("${resourceName}", "${username}")
|
||||
$cred.FillPassword()
|
||||
[Console]::Out.Write($cred.Password)
|
||||
} catch {
|
||||
exit 1
|
||||
}
|
||||
`
|
||||
|
||||
const result = runPowerShell(script)
|
||||
if (result?.exitCode === 0 && result.stdout) {
|
||||
try {
|
||||
return jsonParse(result.stdout)
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
export const windowsCredentialStorage: SecureStorage = {
|
||||
name: 'credential-locker-dpapi',
|
||||
read(): SecureStorageData | null {
|
||||
const filePath = escapePowerShellSingleQuoted(
|
||||
getWindowsSecureStorageFilePath(),
|
||||
)
|
||||
const entropy = escapePowerShellSingleQuoted(
|
||||
getWindowsSecureStorageEntropy(),
|
||||
)
|
||||
const script = `
|
||||
try {
|
||||
$cred = $vault.Retrieve("${resourceName}", "${username}")
|
||||
$cred.FillPassword()
|
||||
$cred.Password
|
||||
Add-Type -AssemblyName System.Security
|
||||
$path = '${filePath}'
|
||||
if (!(Test-Path -LiteralPath $path)) {
|
||||
exit 1
|
||||
}
|
||||
|
||||
$protectedBase64 = [System.IO.File]::ReadAllText(
|
||||
$path,
|
||||
[System.Text.Encoding]::UTF8
|
||||
).Trim()
|
||||
if (-not $protectedBase64) {
|
||||
exit 1
|
||||
}
|
||||
|
||||
$protectedBytes = [Convert]::FromBase64String($protectedBase64)
|
||||
$entropyBytes = [System.Text.Encoding]::UTF8.GetBytes('${entropy}')
|
||||
$bytes = [System.Security.Cryptography.ProtectedData]::Unprotect(
|
||||
$protectedBytes,
|
||||
$entropyBytes,
|
||||
[System.Security.Cryptography.DataProtectionScope]::CurrentUser
|
||||
)
|
||||
[Console]::Out.Write([System.Text.Encoding]::UTF8.GetString($bytes))
|
||||
} catch {
|
||||
exit 1
|
||||
}
|
||||
`
|
||||
try {
|
||||
const result = execaSync('powershell.exe', ['-Command', script], {
|
||||
reject: false,
|
||||
})
|
||||
if (result.exitCode === 0 && result.stdout) {
|
||||
|
||||
const result = runPowerShell(script)
|
||||
if (result?.exitCode === 0 && result.stdout) {
|
||||
try {
|
||||
return jsonParse(result.stdout)
|
||||
} catch {
|
||||
return readLegacyPasswordVault()
|
||||
}
|
||||
} catch {
|
||||
// fall through
|
||||
}
|
||||
return null
|
||||
|
||||
return readLegacyPasswordVault()
|
||||
},
|
||||
async readAsync(): Promise<SecureStorageData | null> {
|
||||
return this.read()
|
||||
},
|
||||
update(data: SecureStorageData): { success: boolean; warning?: string } {
|
||||
const resourceName = getSecureStorageServiceName(
|
||||
CREDENTIALS_SERVICE_SUFFIX,
|
||||
).replace(/"/g, '`"')
|
||||
const username = getUsername().replace(/"/g, '`"')
|
||||
// Use single quotes for the payload and escape ' by doubling it ('').
|
||||
// This prevents PowerShell from expanding $... inside the string.
|
||||
const payload = jsonStringify(data).replace(/'/g, "''")
|
||||
// PowerShell script to add/update credential in vault
|
||||
const filePath = escapePowerShellSingleQuoted(
|
||||
getWindowsSecureStorageFilePath(),
|
||||
)
|
||||
const entropy = escapePowerShellSingleQuoted(
|
||||
getWindowsSecureStorageEntropy(),
|
||||
)
|
||||
const payload = jsonStringify(data)
|
||||
const script = `
|
||||
Add-Type -AssemblyName System.Runtime.WindowsRuntime
|
||||
$vault = New-Object Windows.Security.Credentials.PasswordVault
|
||||
$cred = New-Object Windows.Security.Credentials.PasswordCredential("${resourceName}", "${username}", '${payload}')
|
||||
$vault.Add($cred)
|
||||
try {
|
||||
Add-Type -AssemblyName System.Security
|
||||
$path = '${filePath}'
|
||||
$directory = [System.IO.Path]::GetDirectoryName($path)
|
||||
if ($directory) {
|
||||
[System.IO.Directory]::CreateDirectory($directory) | Out-Null
|
||||
}
|
||||
|
||||
$payload = [Console]::In.ReadToEnd()
|
||||
$bytes = [System.Text.Encoding]::UTF8.GetBytes($payload)
|
||||
$entropyBytes = [System.Text.Encoding]::UTF8.GetBytes('${entropy}')
|
||||
$protectedBytes = [System.Security.Cryptography.ProtectedData]::Protect(
|
||||
$bytes,
|
||||
$entropyBytes,
|
||||
[System.Security.Cryptography.DataProtectionScope]::CurrentUser
|
||||
)
|
||||
$protectedBase64 = [Convert]::ToBase64String($protectedBytes)
|
||||
[System.IO.File]::WriteAllText(
|
||||
$path,
|
||||
$protectedBase64,
|
||||
[System.Text.Encoding]::UTF8
|
||||
)
|
||||
} catch {
|
||||
Write-Error $_.Exception.Message
|
||||
exit 1
|
||||
}
|
||||
`
|
||||
try {
|
||||
const result = execaSync('powershell.exe', ['-Command', script], {
|
||||
reject: false,
|
||||
})
|
||||
return { success: result.exitCode === 0 }
|
||||
} catch {
|
||||
return { success: false }
|
||||
const result = runPowerShell(script, { input: payload })
|
||||
if (result?.exitCode === 0) {
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
warning: getFailureWarning(
|
||||
result,
|
||||
'Windows secure storage could not encrypt credentials with DPAPI',
|
||||
),
|
||||
}
|
||||
},
|
||||
delete(): boolean {
|
||||
const resourceName = getSecureStorageServiceName(
|
||||
CREDENTIALS_SERVICE_SUFFIX,
|
||||
).replace(/"/g, '`"')
|
||||
const username = getUsername().replace(/"/g, '`"')
|
||||
// PowerShell script to remove credential from vault
|
||||
const script = `
|
||||
Add-Type -AssemblyName System.Runtime.WindowsRuntime
|
||||
$vault = New-Object Windows.Security.Credentials.PasswordVault
|
||||
const filePath = escapePowerShellSingleQuoted(
|
||||
getWindowsSecureStorageFilePath(),
|
||||
)
|
||||
const removeDpapiScript = `
|
||||
try {
|
||||
$path = '${filePath}'
|
||||
if (Test-Path -LiteralPath $path) {
|
||||
Remove-Item -LiteralPath $path -Force
|
||||
}
|
||||
} catch {
|
||||
exit 1
|
||||
}
|
||||
`
|
||||
const removeDpapiResult = runPowerShell(removeDpapiScript)
|
||||
|
||||
const resourceName = getLegacyResourceName().replace(/"/g, '`"')
|
||||
const username = getUsername().replace(/"/g, '`"')
|
||||
const removeLegacyScript = `
|
||||
Add-Type -AssemblyName System.Runtime.WindowsRuntime
|
||||
try {
|
||||
$vault = New-Object Windows.Security.Credentials.PasswordVault
|
||||
$cred = $vault.Retrieve("${resourceName}", "${username}")
|
||||
$vault.Remove($cred)
|
||||
} catch {
|
||||
exit 0
|
||||
}
|
||||
`
|
||||
try {
|
||||
const result = execaSync('powershell.exe', ['-Command', script], {
|
||||
reject: false,
|
||||
})
|
||||
return result.exitCode === 0
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
const removeLegacyResult = runPowerShell(removeLegacyScript)
|
||||
|
||||
void removeLegacyResult
|
||||
|
||||
return (removeDpapiResult?.exitCode ?? 1) === 0
|
||||
},
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user