Compare commits
397 Commits
v0.1.6
...
fix/issue-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cce4b5afa4 | ||
|
|
6ea3eb6483 | ||
|
|
f699c1f2fc | ||
|
|
52b4c5c2ff | ||
|
|
c6c5f0608c | ||
|
|
46a9d3eec4 | ||
|
|
2586a9cddb | ||
|
|
d45628c413 | ||
|
|
6dedffe5ff | ||
|
|
a3e728a114 | ||
|
|
818689b2ee | ||
|
|
d9ae56bc58 | ||
|
|
af9a3caa4d | ||
|
|
a0d657ee18 | ||
|
|
29f7579377 | ||
|
|
9e23c2bec4 | ||
|
|
9070220292 | ||
|
|
26413f6d30 | ||
|
|
44f9cac70d | ||
|
|
ff2a380723 | ||
|
|
c4cb98a4f0 | ||
|
|
b5f7047358 | ||
|
|
64b1014b9a | ||
|
|
5a21d05741 | ||
|
|
038f715b7a | ||
|
|
b694ccfff1 | ||
|
|
dcbe29558a | ||
|
|
a4c6757023 | ||
|
|
6e58b81937 | ||
|
|
e346b8d5ec | ||
|
|
b750e9e97d | ||
|
|
28de94df5d | ||
|
|
23e8cfbd5b | ||
|
|
531e3f1059 | ||
|
|
3c4d8435c4 | ||
|
|
67de6bd2cf | ||
|
|
4d559c9135 | ||
|
|
b7b83eff13 | ||
|
|
44a2c30d5f | ||
|
|
5b9cd21e37 | ||
|
|
e92e5274b2 | ||
|
|
86bce4ae74 | ||
|
|
c13842e91c | ||
|
|
458120889f | ||
|
|
ee19159c17 | ||
|
|
13de4e85df | ||
|
|
a5bfcbbadf | ||
|
|
268c0398e4 | ||
|
|
761924daa7 | ||
|
|
e908864da7 | ||
|
|
b95d2221df | ||
|
|
2b15e16421 | ||
|
|
6a62e3ff76 | ||
|
|
06e7684eb5 | ||
|
|
ae3b723f3b | ||
|
|
a6a3de5ac1 | ||
|
|
64582c119d | ||
|
|
85eab2751e | ||
|
|
4d4fb2880e | ||
|
|
fdef4a1b4c | ||
|
|
4cb963e660 | ||
|
|
b09972f223 | ||
|
|
336ddcc50d | ||
|
|
c0b8a59a23 | ||
|
|
aab489055c | ||
|
|
7002cb302b | ||
|
|
739b8d1f40 | ||
|
|
f166ec1a4e | ||
|
|
13e9f22a83 | ||
|
|
f828171ef1 | ||
|
|
e6e8d9a248 | ||
|
|
2c98be7002 | ||
|
|
b786b765f0 | ||
|
|
55c5f262a9 | ||
|
|
002a8f1f6d | ||
|
|
3d1979ff06 | ||
|
|
b0d9fe7112 | ||
|
|
651123db1f | ||
|
|
34246635fb | ||
|
|
43ac6dba75 | ||
|
|
80a00acc2c | ||
|
|
eed77e6579 | ||
|
|
b280c740a6 | ||
|
|
2ff5710329 | ||
|
|
d6f5130c20 | ||
|
|
d32a2a1329 | ||
|
|
fbcd928f7f | ||
|
|
77083d769b | ||
|
|
b66633ea4d | ||
|
|
51191d6132 | ||
|
|
6b2121da12 | ||
|
|
c207cdbdcc | ||
|
|
a00b7928de | ||
|
|
12dd3755c6 | ||
|
|
114f772a4a | ||
|
|
7187fc007a | ||
|
|
0ed50ccfe7 | ||
|
|
131b31bf0e | ||
|
|
c1beea9867 | ||
|
|
658d076909 | ||
|
|
a07e5ef990 | ||
|
|
25ce2ca7bf | ||
|
|
1741f32cb7 | ||
|
|
fc7dc9ca0d | ||
|
|
252808bbd0 | ||
|
|
0e48884f56 | ||
|
|
b818dd5958 | ||
|
|
24d485f42f | ||
|
|
99a17144ee | ||
|
|
df2b9f2b7b | ||
|
|
adbe391e63 | ||
|
|
03e0b06e07 | ||
|
|
31be66d764 | ||
|
|
7c8bdcc3e2 | ||
|
|
64298a663f | ||
|
|
30c866d31a | ||
|
|
f6a4455ecf | ||
|
|
aeaa658f77 | ||
|
|
d2a057c6f1 | ||
|
|
08cc6f3287 | ||
|
|
84fcc7f7e0 | ||
|
|
ad11414def | ||
|
|
9419e8a4a2 | ||
|
|
41a86d05fa | ||
|
|
fa4b6a96c0 | ||
|
|
d03d77b110 | ||
|
|
15de1d6190 | ||
|
|
812facf024 | ||
|
|
2e39d2607a | ||
|
|
a3633ac094 | ||
|
|
3cefe2297d | ||
|
|
40ac164501 | ||
|
|
b3f3dc4e66 | ||
|
|
2e0e14d713 | ||
|
|
a02c44143b | ||
|
|
7817fe88bd | ||
|
|
4c50977f3c | ||
|
|
b126e38b1a | ||
|
|
6e94dd9136 | ||
|
|
91e4cfb15b | ||
|
|
f4ac709fa6 | ||
|
|
8aaa4f22ac | ||
|
|
a7f5982f64 | ||
|
|
cb8f8b7ac2 | ||
|
|
07621a6f8d | ||
|
|
692471850f | ||
|
|
68c296833d | ||
|
|
9ccaa7a675 | ||
|
|
598651f423 | ||
|
|
c385047abb | ||
|
|
42b121bd0d | ||
|
|
32fbd0c7b4 | ||
|
|
e30ad17ae0 | ||
|
|
c328fdf9e2 | ||
|
|
4ad6bc50c1 | ||
|
|
284d9bda36 | ||
|
|
537c469c3a | ||
|
|
ccaa193eec | ||
|
|
2caf2fd982 | ||
|
|
ad724dc3a4 | ||
|
|
648ae8053b | ||
|
|
3188f6ac66 | ||
|
|
69ea1f1e4a | ||
|
|
f9ce81bfb3 | ||
|
|
4975cfc2e0 | ||
|
|
600c01faf7 | ||
|
|
b07bafa5bd | ||
|
|
85aa8b0985 | ||
|
|
e365cb4010 | ||
|
|
52d33a87a0 | ||
|
|
b4bd95b477 | ||
|
|
1e057025d6 | ||
|
|
aff2bd87e4 | ||
|
|
72e6a945fe | ||
|
|
39f3b2babd | ||
|
|
ff7d49990d | ||
|
|
8ece290087 | ||
|
|
6c61790063 | ||
|
|
26eef92fe7 | ||
|
|
112df59117 | ||
|
|
8724d59d48 | ||
|
|
af08b4f762 | ||
|
|
5012c160c9 | ||
|
|
c1934974aa | ||
|
|
94de37d44f | ||
|
|
3b3aca716d | ||
|
|
d5852ca73d | ||
|
|
c534aa5771 | ||
|
|
60d3d8961a | ||
|
|
3b9893b586 | ||
|
|
daf2c90b6d | ||
|
|
4ac7367733 | ||
|
|
7350a798cb | ||
|
|
5ef79546e9 | ||
|
|
daa3aa27a0 | ||
|
|
5ff34283c4 | ||
|
|
d1a2df2f69 | ||
|
|
ba1b9913aa | ||
|
|
0d27ca596a | ||
|
|
8fc40ee8c4 | ||
|
|
2f162af60c | ||
|
|
9e84d2fddc | ||
|
|
75d2543854 | ||
|
|
01acc4c10e | ||
|
|
e4cf810e14 | ||
|
|
0951c8bc59 | ||
|
|
4c3118e071 | ||
|
|
80a2f1414c | ||
|
|
462a985d7e | ||
|
|
ef881b247f | ||
|
|
a0bdab24c0 | ||
|
|
cdc92d16e4 | ||
|
|
fbf3385395 | ||
|
|
ea335aeddc | ||
|
|
280c9732f5 | ||
|
|
08be5181ab | ||
|
|
b4725c19e0 | ||
|
|
3c2e80a1ae | ||
|
|
c3c60b7bab | ||
|
|
27e6505bfd | ||
|
|
cdbe016e6f | ||
|
|
bd4daa3ee7 | ||
|
|
5be5387096 | ||
|
|
897ef2002e | ||
|
|
ab3c46a591 | ||
|
|
03dff274a1 | ||
|
|
bffd43056f | ||
|
|
c52245fc0a | ||
|
|
365bd3102d | ||
|
|
3df635c24d | ||
|
|
2031c67d46 | ||
|
|
694c242865 | ||
|
|
fb221baa21 | ||
|
|
e5c9a6f629 | ||
|
|
70cfa61582 | ||
|
|
afed73fa5a | ||
|
|
c735233f92 | ||
|
|
8ce09ae743 | ||
|
|
931ee96f5a | ||
|
|
c1e5e363cd | ||
|
|
b0d796e5c3 | ||
|
|
6987a54a71 | ||
|
|
fb32e3f829 | ||
|
|
59ab2701f7 | ||
|
|
7668abaed0 | ||
|
|
36d1c45954 | ||
|
|
116cc8e6bd | ||
|
|
19c00e67ed | ||
|
|
7c0ea68b65 | ||
|
|
f3a984dde1 | ||
|
|
72c6e97094 | ||
|
|
f3ab727ec2 | ||
|
|
29edece72f | ||
|
|
6181050811 | ||
|
|
0fd0026a76 | ||
|
|
6919d774f2 | ||
|
|
aa69e85795 | ||
|
|
66bbb75836 | ||
|
|
2c6ec0119e | ||
|
|
74a25d01a6 | ||
|
|
7cf4c88ab8 | ||
|
|
f68b9aa57d | ||
|
|
20d1ee8427 | ||
|
|
089a42fc07 | ||
|
|
f5b20fc517 | ||
|
|
184ec250fd | ||
|
|
43deb49c2c | ||
|
|
0e7a2446c7 | ||
|
|
63ad0196d6 | ||
|
|
32046e9b40 | ||
|
|
7bd7d0f54d | ||
|
|
cdf4bad95b | ||
|
|
4158214895 | ||
|
|
a6ed57d0f4 | ||
|
|
7b68eb1acb | ||
|
|
84950642ae | ||
|
|
a287597273 | ||
|
|
1cd4164062 | ||
|
|
47c53a18e8 | ||
|
|
cf90457428 | ||
|
|
5e77d82620 | ||
|
|
11d9660a80 | ||
|
|
1a57335d74 | ||
|
|
7bc903d875 | ||
|
|
4c22de2585 | ||
|
|
63daf33b48 | ||
|
|
2ee43d7ee8 | ||
|
|
3581d3f83f | ||
|
|
4a4394bb65 | ||
|
|
b4aa27183d | ||
|
|
96b9e0235b | ||
|
|
7095abb837 | ||
|
|
8501786852 | ||
|
|
37d4c21739 | ||
|
|
a43023705b | ||
|
|
73db9b5fd3 | ||
|
|
2b5cf9f0c1 | ||
|
|
4237a72b92 | ||
|
|
942d09ca9c | ||
|
|
ac4efae870 | ||
|
|
4c6adf4774 | ||
|
|
ff124dcdfb | ||
|
|
8e8671fc51 | ||
|
|
4c1ba35aa1 | ||
|
|
5baee3b491 | ||
|
|
43ba2cbfae | ||
|
|
5c25ac4e9a | ||
|
|
84ac06bac9 | ||
|
|
c66b859342 | ||
|
|
1709f5c098 | ||
|
|
5d6443799a | ||
|
|
3ef09f911e | ||
|
|
3353101e83 | ||
|
|
6f4aa02123 | ||
|
|
b65921e8c3 | ||
|
|
0fe8551d33 | ||
|
|
145c99b297 | ||
|
|
6319df02f0 | ||
|
|
3c8c63a78e | ||
|
|
35676be381 | ||
|
|
d430ddd568 | ||
|
|
1514220ee7 | ||
|
|
680cd69d8a | ||
|
|
0a5849e4d2 | ||
|
|
708a0a18fe | ||
|
|
5c4469fe81 | ||
|
|
8f50f17674 | ||
|
|
9f48bb4431 | ||
|
|
4d0886a4fe | ||
|
|
6e311f96a3 | ||
|
|
0a1ac92341 | ||
|
|
1ee2ce931a | ||
|
|
bc2a4bcdd5 | ||
|
|
118b0793e0 | ||
|
|
5ccda35941 | ||
|
|
f385740bd6 | ||
|
|
ef251fe3f5 | ||
|
|
f4818dc213 | ||
|
|
aac326fa3f | ||
|
|
71a3f36e95 | ||
|
|
23216ca01c | ||
|
|
3d72d9e5e2 | ||
|
|
4260f5bcd7 | ||
|
|
49b9c043f5 | ||
|
|
a7ec88b1e5 | ||
|
|
903a30916a | ||
|
|
6b7c0e5339 | ||
|
|
0c88dea247 | ||
|
|
cec3629017 | ||
|
|
7c09b1f01c | ||
|
|
0a42839475 | ||
|
|
64ba7fdb9a | ||
|
|
fb27164ddf | ||
|
|
ad1f328672 | ||
|
|
001f89f62c | ||
|
|
5cd95f4bb1 | ||
|
|
6c4225f6f4 | ||
|
|
3ca6c299d6 | ||
|
|
7a7437b309 | ||
|
|
c94f9e18c3 | ||
|
|
e16917614c | ||
|
|
38d35e314f | ||
|
|
14de9cf0fb | ||
|
|
7f969200fb | ||
|
|
e494015e9a | ||
|
|
5b20fe783d | ||
|
|
6aec8416cc | ||
|
|
08f0b6030e | ||
|
|
4f78bde085 | ||
|
|
3b7b9740f2 | ||
|
|
577e654ae7 | ||
|
|
f07f11b7b6 | ||
|
|
d156aed32d | ||
|
|
93bc50f8cd | ||
|
|
2619401d34 | ||
|
|
25c5987276 | ||
|
|
1059915c84 | ||
|
|
fcb1b82d9b | ||
|
|
e54c39e3cb | ||
|
|
a6ba34a3de | ||
|
|
7128a938d9 | ||
|
|
f340b199c8 | ||
|
|
63546dcd9c | ||
|
|
302d9d4e44 | ||
|
|
310f1d344a | ||
|
|
9590066b5b | ||
|
|
ad947e996a | ||
|
|
b2ba2c0cc5 | ||
|
|
0746802b6a | ||
|
|
2bade922ef | ||
|
|
4918caa22b | ||
|
|
ffbc1f8f6e | ||
|
|
5f75f67a27 | ||
|
|
f3ebd7d256 | ||
|
|
1a60509fdc | ||
|
|
47b19c9a00 | ||
|
|
8c6a10517f |
16
.dockerignore
Normal file
16
.dockerignore
Normal file
@@ -0,0 +1,16 @@
|
||||
node_modules
|
||||
dist
|
||||
.git
|
||||
.gitignore
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
coverage
|
||||
reports
|
||||
vscode-extension
|
||||
python
|
||||
docs
|
||||
*.md
|
||||
!README.md
|
||||
.github
|
||||
.tsbuildinfo
|
||||
423
.env.example
Normal file
423
.env.example
Normal file
@@ -0,0 +1,423 @@
|
||||
# =============================================================================
|
||||
# OpenClaude Environment Configuration
|
||||
# =============================================================================
|
||||
# Copy this file to .env and fill in your values:
|
||||
# cp .env.example .env
|
||||
#
|
||||
# Only set the variables for the provider you want to use.
|
||||
# All other sections can be left commented out.
|
||||
# =============================================================================
|
||||
|
||||
# =============================================================================
|
||||
# SYSTEM-WIDE SETUP (OPTIONAL)
|
||||
# =============================================================================
|
||||
# Instead of using a .env file per project, you can set these variables
|
||||
# system-wide so OpenClaude works from any directory on your machine.
|
||||
#
|
||||
# STEP 1: Pick your provider variables from the list below.
|
||||
# STEP 2: Set them using the method for your OS (see further down).
|
||||
#
|
||||
# ── Provider variables ───────────────────────────────────────────────
|
||||
#
|
||||
# Option 1 — Anthropic:
|
||||
# ANTHROPIC_API_KEY=sk-ant-your-key-here
|
||||
# ANTHROPIC_MODEL=claude-sonnet-4-5 (optional)
|
||||
# ANTHROPIC_BASE_URL=https://api.anthropic.com (optional)
|
||||
#
|
||||
# Option 2 — OpenAI:
|
||||
# CLAUDE_CODE_USE_OPENAI=1
|
||||
# OPENAI_API_KEY=sk-your-key-here
|
||||
# OPENAI_MODEL=gpt-4o
|
||||
# OPENAI_BASE_URL=https://api.openai.com/v1 (optional)
|
||||
#
|
||||
# Option 3 — Google Gemini:
|
||||
# CLAUDE_CODE_USE_GEMINI=1
|
||||
# GEMINI_API_KEY=your-gemini-key-here
|
||||
# GEMINI_MODEL=gemini-2.0-flash
|
||||
# GEMINI_BASE_URL=https://generativelanguage.googleapis.com (optional)
|
||||
#
|
||||
# Option 4 — GitHub Models:
|
||||
# CLAUDE_CODE_USE_GITHUB=1
|
||||
# GITHUB_TOKEN=ghp_your-token-here
|
||||
#
|
||||
# Option 5 — Ollama (local):
|
||||
# CLAUDE_CODE_USE_OPENAI=1
|
||||
# OPENAI_BASE_URL=http://localhost:11434/v1
|
||||
# OPENAI_API_KEY=ollama
|
||||
# OPENAI_MODEL=llama3.2
|
||||
#
|
||||
# Option 6 — LM Studio (local):
|
||||
# CLAUDE_CODE_USE_OPENAI=1
|
||||
# OPENAI_BASE_URL=http://localhost:1234/v1
|
||||
# OPENAI_MODEL=your-model-id-here
|
||||
# OPENAI_API_KEY=lmstudio (optional)
|
||||
#
|
||||
# Option 7 — AWS Bedrock (may also need: aws configure):
|
||||
# CLAUDE_CODE_USE_BEDROCK=1
|
||||
# AWS_REGION=us-east-1
|
||||
# AWS_DEFAULT_REGION=us-east-1
|
||||
# AWS_BEARER_TOKEN_BEDROCK=your-bearer-token-here
|
||||
# ANTHROPIC_BEDROCK_BASE_URL=https://bedrock-runtime.us-east-1.amazonaws.com
|
||||
#
|
||||
# Option 8 — Google Vertex AI:
|
||||
# CLAUDE_CODE_USE_VERTEX=1
|
||||
# ANTHROPIC_VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
# CLOUD_ML_REGION=us-east5
|
||||
# GOOGLE_CLOUD_PROJECT=your-gcp-project-id
|
||||
#
|
||||
# ── How to set variables on each OS ──────────────────────────────────
|
||||
#
|
||||
# macOS (zsh):
|
||||
# 1. Open: nano ~/.zshrc
|
||||
# 2. Add each variable as: export VAR_NAME=value
|
||||
# 3. Save and reload: source ~/.zshrc
|
||||
#
|
||||
# Linux (bash):
|
||||
# 1. Open: nano ~/.bashrc
|
||||
# 2. Add each variable as: export VAR_NAME=value
|
||||
# 3. Save and reload: source ~/.bashrc
|
||||
#
|
||||
# Windows (PowerShell):
|
||||
# Run for each variable:
|
||||
# [System.Environment]::SetEnvironmentVariable('VAR_NAME', 'value', 'User')
|
||||
# Then restart your terminal.
|
||||
#
|
||||
# Windows (Command Prompt):
|
||||
# Run for each variable:
|
||||
# setx VAR_NAME value
|
||||
# Then restart your terminal.
|
||||
#
|
||||
# Windows (GUI):
|
||||
# Settings > System > About > Advanced System Settings >
|
||||
# Environment Variables > under "User variables" click New,
|
||||
# then add each variable.
|
||||
#
|
||||
# ── Important notes ──────────────────────────────────────────────────
|
||||
#
|
||||
# LOCAL SERVERS: If using LM Studio or Ollama, the server MUST be
|
||||
# running with a model loaded before you launch OpenClaude —
|
||||
# otherwise you'll get connection errors.
|
||||
#
|
||||
# SWITCHING PROVIDERS: To temporarily switch, unset the relevant
|
||||
# variables in your current terminal session:
|
||||
#
|
||||
# macOS / Linux:
|
||||
# unset VAR_NAME
|
||||
# # e.g.: unset CLAUDE_CODE_USE_OPENAI OPENAI_BASE_URL OPENAI_MODEL
|
||||
#
|
||||
# Windows (PowerShell — current session only):
|
||||
# Remove-Item Env:VAR_NAME
|
||||
#
|
||||
# To permanently remove a variable on Windows:
|
||||
# [System.Environment]::SetEnvironmentVariable('VAR_NAME', $null, 'User')
|
||||
#
|
||||
# LOAD ORDER:
|
||||
# Shell and system environment variables are inherited by the process.
|
||||
# Project .env files are only used if your launcher or shell loads them
|
||||
# before starting OpenClaude.
|
||||
# COMPATIBILITY:
|
||||
# System-wide variables work regardless of how you run OpenClaude:
|
||||
# npx, global npm install, bun run, or node directly. Any process
|
||||
# launched from your terminal inherits your shell's environment.
|
||||
#
|
||||
# REMINDER: Make sure .env is in your .gitignore to avoid committing secrets.
|
||||
# =============================================================================
|
||||
|
||||
# =============================================================================
|
||||
# PROVIDER SELECTION — uncomment ONE block below
|
||||
# =============================================================================
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 1: Anthropic (default — no provider flag needed)
|
||||
# -----------------------------------------------------------------------------
|
||||
ANTHROPIC_API_KEY=sk-ant-your-key-here
|
||||
|
||||
# Override the default model (optional)
|
||||
# ANTHROPIC_MODEL=claude-sonnet-4-5
|
||||
|
||||
# Use a custom Anthropic-compatible endpoint (optional)
|
||||
# ANTHROPIC_BASE_URL=https://api.anthropic.com
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 2: OpenAI
|
||||
# -----------------------------------------------------------------------------
|
||||
# CLAUDE_CODE_USE_OPENAI=1
|
||||
# OPENAI_API_KEY=sk-your-key-here
|
||||
# OPENAI_MODEL=gpt-4o
|
||||
# For DeepSeek, set:
|
||||
# OPENAI_BASE_URL=https://api.deepseek.com/v1
|
||||
# OPENAI_MODEL=deepseek-v4-flash
|
||||
# Optional: OPENAI_MODEL=deepseek-v4-pro
|
||||
# Legacy aliases also work: deepseek-chat and deepseek-reasoner
|
||||
# For Z.AI GLM Coding Plan, set:
|
||||
# OPENAI_BASE_URL=https://api.z.ai/api/coding/paas/v4
|
||||
# OPENAI_MODEL=GLM-5.1
|
||||
# Optional: OPENAI_MODEL=GLM-5-Turbo, GLM-4.7, or GLM-4.5-Air
|
||||
|
||||
# Use a custom OpenAI-compatible endpoint (optional — defaults to api.openai.com)
|
||||
# OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
# Choose the OpenAI-compatible API surface (optional — defaults to chat_completions)
|
||||
# Supported: chat_completions, responses
|
||||
# OPENAI_API_FORMAT=chat_completions
|
||||
# Choose a custom auth header for OpenAI-compatible providers (optional).
|
||||
# Authorization defaults to Bearer; custom headers default to the raw API key.
|
||||
# Set OPENAI_AUTH_HEADER_VALUE when the header value differs from OPENAI_API_KEY.
|
||||
# OPENAI_AUTH_HEADER=api-key
|
||||
# OPENAI_AUTH_SCHEME=raw
|
||||
# OPENAI_AUTH_HEADER_VALUE=your-header-value-here
|
||||
|
||||
# Fallback context window size (tokens) when the model is not found in the
|
||||
# built-in table (default: 128000). Increase this for models with larger
|
||||
# context windows (e.g. 200000 for Claude-sized contexts).
|
||||
# CLAUDE_CODE_OPENAI_FALLBACK_CONTEXT_WINDOW=128000
|
||||
|
||||
# Per-model context window overrides as a JSON object.
|
||||
# Takes precedence over the built-in table, so you can register new or
|
||||
# custom models without patching source.
|
||||
# Example: CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS={"my-corp/llm-v3":262144,"gpt-4o-mini":128000}
|
||||
# CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS=
|
||||
|
||||
# Per-model maximum output token overrides as a JSON object.
|
||||
# Use this alongside CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS when your model
|
||||
# supports a different output limit than what the built-in table specifies.
|
||||
# Example: CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS={"my-corp/llm-v3":8192}
|
||||
# CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS=
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 3: Google Gemini
|
||||
# -----------------------------------------------------------------------------
|
||||
# CLAUDE_CODE_USE_GEMINI=1
|
||||
# GEMINI_API_KEY=your-gemini-key-here
|
||||
# GEMINI_MODEL=gemini-2.0-flash
|
||||
|
||||
# Use a custom Gemini endpoint (optional)
|
||||
# GEMINI_BASE_URL=https://generativelanguage.googleapis.com/v1beta/openai
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 4: GitHub Models
|
||||
# -----------------------------------------------------------------------------
|
||||
# CLAUDE_CODE_USE_GITHUB=1
|
||||
# GITHUB_TOKEN=ghp_your-token-here
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 5: Ollama (local models)
|
||||
# -----------------------------------------------------------------------------
|
||||
# CLAUDE_CODE_USE_OPENAI=1
|
||||
# OPENAI_BASE_URL=http://localhost:11434/v1
|
||||
# OPENAI_API_KEY=ollama
|
||||
# OPENAI_MODEL=llama3.2
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 6: LM Studio (local models)
|
||||
# -----------------------------------------------------------------------------
|
||||
# LM Studio exposes an OpenAI-compatible API, so we use the OpenAI provider.
|
||||
# Make sure LM Studio is running with the Developer server enabled
|
||||
# (Developer tab > toggle server ON).
|
||||
#
|
||||
# Steps:
|
||||
# 1. Download and install LM Studio from https://lmstudio.ai
|
||||
# 2. Search for and download a model (e.g. any coding or instruct model)
|
||||
# 3. Load the model and start the Developer server
|
||||
# 4. Set OPENAI_MODEL to the model ID shown in LM Studio's Developer tab
|
||||
#
|
||||
# The default server URL is http://localhost:1234 — change the port below
|
||||
# if you've configured a different one in LM Studio.
|
||||
#
|
||||
# OPENAI_API_KEY is optional — LM Studio runs locally and ignores it.
|
||||
# Some clients require a non-empty value; if you get auth errors, set it
|
||||
# to any dummy value (e.g. "lmstudio").
|
||||
#
|
||||
# CLAUDE_CODE_USE_OPENAI=1
|
||||
# OPENAI_BASE_URL=http://localhost:1234/v1
|
||||
# OPENAI_MODEL=your-model-id-here
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 7: AWS Bedrock
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# You may also need AWS CLI credentials configured (run: aws configure)
|
||||
# or have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY set in your
|
||||
# environment in addition to the variables below.
|
||||
#
|
||||
# CLAUDE_CODE_USE_BEDROCK=1
|
||||
# AWS_REGION=us-east-1
|
||||
# AWS_DEFAULT_REGION=us-east-1
|
||||
# AWS_BEARER_TOKEN_BEDROCK=your-bearer-token-here
|
||||
# ANTHROPIC_BEDROCK_BASE_URL=https://bedrock-runtime.us-east-1.amazonaws.com
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 8: Google Vertex AI
|
||||
# -----------------------------------------------------------------------------
|
||||
# CLAUDE_CODE_USE_VERTEX=1
|
||||
# ANTHROPIC_VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
# CLOUD_ML_REGION=us-east5
|
||||
# GOOGLE_CLOUD_PROJECT=your-gcp-project-id
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 9: NVIDIA NIM
|
||||
# -----------------------------------------------------------------------------
|
||||
# NVIDIA NIM provides hosted inference endpoints for NVIDIA models.
|
||||
# Get your API key from https://build.nvidia.com/
|
||||
#
|
||||
# CLAUDE_CODE_USE_OPENAI=1
|
||||
# NVIDIA_API_KEY=nvapi-your-key-here
|
||||
# OPENAI_BASE_URL=https://integrate.api.nvidia.com/v1
|
||||
# OPENAI_MODEL=nvidia/llama-3.1-nemotron-70b-instruct
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 10: MiniMax
|
||||
# -----------------------------------------------------------------------------
|
||||
# MiniMax API provides text generation models.
|
||||
# Get your API key from https://platform.minimax.io/
|
||||
#
|
||||
# CLAUDE_CODE_USE_OPENAI=1
|
||||
# MINIMAX_API_KEY=your-minimax-key-here
|
||||
# OPENAI_BASE_URL=https://api.minimax.io/v1
|
||||
# OPENAI_MODEL=MiniMax-M2.5
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# OPTIONAL TUNING
|
||||
# =============================================================================
|
||||
|
||||
# Max number of API retries on failure (default: 10)
|
||||
# CLAUDE_CODE_MAX_RETRIES=10
|
||||
|
||||
# Enable persistent retry mode for unattended/CI sessions
|
||||
# Retries 429/529 indefinitely with smart backoff
|
||||
# CLAUDE_CODE_UNATTENDED_RETRY=1
|
||||
|
||||
# Enable extended key reporting (Kitty keyboard protocol)
|
||||
# Useful for iTerm2, WezTerm, Ghostty if modifier keys feel off
|
||||
# OPENCLAUDE_ENABLE_EXTENDED_KEYS=1
|
||||
|
||||
# Disable "Co-authored-by" line in git commits made by OpenClaude
|
||||
# OPENCLAUDE_DISABLE_CO_AUTHORED_BY=1
|
||||
|
||||
# Disable strict tool schema normalization for non-Gemini providers
|
||||
# Useful when MCP tools with complex optional params (e.g. list[dict])
|
||||
# trigger "Extra required key ... supplied" errors from OpenAI-compatible endpoints
|
||||
# OPENCLAUDE_DISABLE_STRICT_TOOLS=1
|
||||
|
||||
# Disable hidden <system-reminder> messages injected into tool output
|
||||
# Suppresses the file-read cyber-risk reminder and the todo/task tool nudges
|
||||
# Useful for users who want full transparency over what the model sees
|
||||
# OPENCLAUDE_DISABLE_TOOL_REMINDERS=1
|
||||
|
||||
# Log structured per-request token usage (including cache metrics) to stderr.
|
||||
# Useful for auditing cache hit rate / debugging cost spikes outside the REPL.
|
||||
# Any truthy value enables it ("verbose", "1", "true").
|
||||
#
|
||||
# Complements (does NOT replace) CLAUDE_CODE_ENABLE_TOKEN_USAGE_ATTACHMENT —
|
||||
# they serve different audiences:
|
||||
# - OPENCLAUDE_LOG_TOKEN_USAGE is user-facing: one JSON line per API
|
||||
# request on stderr, intended for humans inspecting cost/caching.
|
||||
# - CLAUDE_CODE_ENABLE_TOKEN_USAGE_ATTACHMENT is model-facing: injects
|
||||
# a context-usage attachment INTO the prompt so the model can reason
|
||||
# about its own remaining context. Does not touch stderr.
|
||||
# Turn on whichever audience you're debugging; both can run together.
|
||||
# OPENCLAUDE_LOG_TOKEN_USAGE=verbose
|
||||
|
||||
# Custom timeout for API requests in milliseconds (default: varies)
|
||||
# API_TIMEOUT_MS=60000
|
||||
|
||||
# Enable debug logging
|
||||
# CLAUDE_DEBUG=1
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# WEB SEARCH (OPTIONAL)
|
||||
# =============================================================================
|
||||
# OpenClaude includes a web search tool. By default it uses DuckDuckGo (free)
|
||||
# or the provider's native search (Anthropic firstParty / vertex).
|
||||
#
|
||||
# Set one API key below to enable a provider. That's it.
|
||||
|
||||
# ── Provider API keys — set ONE of these ────────────────────────────
|
||||
|
||||
# Tavily (AI-optimized search, recommended)
|
||||
# TAVILY_API_KEY=tvly-your-key-here
|
||||
|
||||
# Exa (neural/semantic search)
|
||||
# EXA_API_KEY=your-exa-key-here
|
||||
|
||||
# You.com (RAG-ready snippets)
|
||||
# YOU_API_KEY=your-you-key-here
|
||||
|
||||
# Jina (s.jina.ai endpoint)
|
||||
# JINA_API_KEY=your-jina-key-here
|
||||
|
||||
# Bing Web Search
|
||||
# BING_API_KEY=your-bing-key-here
|
||||
|
||||
# Mojeek (privacy-focused)
|
||||
# MOJEEK_API_KEY=your-mojeek-key-here
|
||||
|
||||
# Linkup
|
||||
# LINKUP_API_KEY=your-linkup-key-here
|
||||
|
||||
# Firecrawl (premium, uses @mendable/firecrawl-js)
|
||||
# FIRECRAWL_API_KEY=fc-your-key-here
|
||||
|
||||
# ── Provider selection mode ─────────────────────────────────────────
|
||||
#
|
||||
# WEB_SEARCH_PROVIDER controls fallback behavior:
|
||||
#
|
||||
# "auto" (default) — try all configured providers, fall through on failure
|
||||
# "custom" — custom API only, throw on failure (NOT in auto chain)
|
||||
# "firecrawl" — firecrawl only
|
||||
# "tavily" — tavily only
|
||||
# "exa" — exa only
|
||||
# "you" — you.com only
|
||||
# "jina" — jina only
|
||||
# "bing" — bing only
|
||||
# "mojeek" — mojeek only
|
||||
# "linkup" — linkup only
|
||||
# "ddg" — duckduckgo only
|
||||
# "native" — anthropic native / codex only
|
||||
#
|
||||
# Auto mode priority: firecrawl → tavily → exa → you → jina → bing → mojeek →
|
||||
# linkup → ddg
|
||||
# Note: "custom" is NOT in the auto chain. To use the custom API provider,
|
||||
# you must explicitly set WEB_SEARCH_PROVIDER=custom.
|
||||
#
|
||||
# WEB_SEARCH_PROVIDER=auto
|
||||
|
||||
# ── Built-in custom API presets ─────────────────────────────────────
|
||||
#
|
||||
# Use with WEB_KEY for the API key:
|
||||
# WEB_PROVIDER=searxng|google|brave|serpapi
|
||||
# WEB_KEY=your-api-key-here
|
||||
|
||||
# ── Custom API endpoint (advanced) ──────────────────────────────────
|
||||
#
|
||||
# WEB_SEARCH_API — base URL of your search endpoint
|
||||
# WEB_QUERY_PARAM — query parameter name (default: "q")
|
||||
# WEB_METHOD — GET or POST (default: GET)
|
||||
# WEB_PARAMS — extra static query params as JSON: {"lang":"en","count":"10"}
|
||||
# WEB_URL_TEMPLATE — URL template with {query} for path embedding
|
||||
# WEB_BODY_TEMPLATE — custom POST body with {query} placeholder
|
||||
# WEB_AUTH_HEADER — header name for API key (default: "Authorization")
|
||||
# WEB_AUTH_SCHEME — prefix before key (default: "Bearer")
|
||||
# WEB_HEADERS — extra headers as "Name: value; Name2: value2"
|
||||
# WEB_JSON_PATH — dot-path to results array in response
|
||||
|
||||
# ── Custom API security guardrails ──────────────────────────────────
|
||||
#
|
||||
# The custom provider enforces security guardrails by default.
|
||||
# Override these only if you understand the risks.
|
||||
#
|
||||
# WEB_CUSTOM_TIMEOUT_SEC=15 — request timeout in seconds (default 15)
|
||||
# WEB_CUSTOM_MAX_BODY_KB=300 — max POST body size in KB (default 300)
|
||||
# WEB_CUSTOM_ALLOW_ARBITRARY_HEADERS=false — set "true" to use non-standard headers
|
||||
# WEB_CUSTOM_ALLOW_HTTP=false — set "true" to allow http:// URLs
|
||||
# WEB_CUSTOM_ALLOW_PRIVATE=false — set "true" to target localhost/private IPs
|
||||
# (needed for self-hosted SearXNG)
|
||||
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a reproducible problem in OpenClaude
|
||||
title: ""
|
||||
labels: ""
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
What is broken?
|
||||
|
||||
## Steps to Reproduce
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
What should have happened?
|
||||
|
||||
## Actual Behavior
|
||||
|
||||
What happened instead?
|
||||
|
||||
## Environment
|
||||
|
||||
- OpenClaude version:
|
||||
- OS:
|
||||
- Terminal:
|
||||
- Provider:
|
||||
- Model:
|
||||
|
||||
## Logs / Screenshots
|
||||
|
||||
Paste the exact error output or attach screenshots if useful.
|
||||
|
||||
## Additional Context
|
||||
|
||||
Anything else maintainers should know?
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: OpenClaude Discussions
|
||||
url: https://github.com/Gitlawb/openclaude/discussions
|
||||
about: Use Discussions for setup help, questions, ideas, and community conversation.
|
||||
27
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
27
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an improvement or new capability for OpenClaude
|
||||
title: ""
|
||||
labels: ""
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
What would you like OpenClaude to do?
|
||||
|
||||
## Problem
|
||||
|
||||
What problem does this solve for you?
|
||||
|
||||
## Proposed Direction
|
||||
|
||||
Describe the smallest useful version of the feature if possible.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
What are you doing today instead?
|
||||
|
||||
## Additional Context
|
||||
|
||||
Examples, screenshots, related projects, or prior art.
|
||||
21
.github/pull_request_template.md
vendored
Normal file
21
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
## Summary
|
||||
|
||||
- what changed
|
||||
- why it changed
|
||||
|
||||
## Impact
|
||||
|
||||
- user-facing impact:
|
||||
- developer/maintainer impact:
|
||||
|
||||
## Testing
|
||||
|
||||
- [ ] `bun run build`
|
||||
- [ ] `bun run smoke`
|
||||
- [ ] focused tests:
|
||||
|
||||
## Notes
|
||||
|
||||
- provider/model path tested:
|
||||
- screenshots attached (if UI changed):
|
||||
- follow-up work or known limitations:
|
||||
29
.github/workflows/pr-checks.yml
vendored
29
.github/workflows/pr-checks.yml
vendored
@@ -6,30 +6,53 @@ on:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
smoke-and-tests:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Set up Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@4bc047ad259df6fc24a6c9b0f9a0cb08cf17fbe5 # v2.0.1
|
||||
with:
|
||||
bun-version: 1.3.11
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: "pip"
|
||||
cache-dependency-path: python/requirements.txt
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
- name: Smoke check
|
||||
run: bun run smoke
|
||||
|
||||
- name: Full unit test suite
|
||||
run: bun test --max-concurrency=1
|
||||
|
||||
- name: Install Python test dependencies
|
||||
run: python -m pip install -r python/requirements.txt
|
||||
|
||||
- name: Python unit tests
|
||||
run: python -m pytest -q python/tests
|
||||
|
||||
- name: Suspicious PR intent scan
|
||||
run: bun run security:pr-scan -- --base ${{ github.event.pull_request.base.sha || 'origin/main' }}
|
||||
- name: Provider tests
|
||||
run: bun run test:provider
|
||||
|
||||
|
||||
144
.github/workflows/release.yml
vendored
Normal file
144
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
name: Auto Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: auto-release-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
release-please:
|
||||
if: ${{ github.repository == 'Gitlawb/openclaude' }}
|
||||
name: Release Please
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
outputs:
|
||||
release_created: ${{ steps.release.outputs.release_created }}
|
||||
tag_name: ${{ steps.release.outputs.tag_name }}
|
||||
version: ${{ steps.release.outputs.version }}
|
||||
steps:
|
||||
- name: Run release-please
|
||||
id: release
|
||||
uses: googleapis/release-please-action@16a9c90856f42705d54a6fda1823352bdc62cf38
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
release-type: node
|
||||
|
||||
publish-npm:
|
||||
name: Publish to npm
|
||||
needs: release-please
|
||||
if: ${{ needs.release-please.outputs.release_created == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout release tag
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5
|
||||
with:
|
||||
ref: ${{ needs.release-please.outputs.tag_name }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020
|
||||
with:
|
||||
node-version: 24
|
||||
registry-url: https://registry.npmjs.org
|
||||
|
||||
- name: Set up Bun
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6
|
||||
with:
|
||||
bun-version: 1.3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
- name: Run unit tests
|
||||
run: bun test --max-concurrency=1
|
||||
|
||||
- name: Smoke test
|
||||
run: bun run smoke
|
||||
|
||||
- name: Build
|
||||
run: bun run build
|
||||
|
||||
- name: Dry-run package
|
||||
run: npm pack --dry-run
|
||||
|
||||
- name: Clear token auth for trusted publishing
|
||||
run: |
|
||||
unset NODE_AUTH_TOKEN
|
||||
echo "NODE_AUTH_TOKEN=" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Publish to npm
|
||||
run: npm publish --access public --provenance
|
||||
|
||||
- name: Release summary
|
||||
run: |
|
||||
{
|
||||
echo "## Released ${{ needs.release-please.outputs.tag_name }}"
|
||||
echo
|
||||
echo "- npm: https://www.npmjs.com/package/@gitlawb/openclaude"
|
||||
echo "- GitHub: https://github.com/Gitlawb/openclaude/releases/tag/${{ needs.release-please.outputs.tag_name }}"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
docker:
|
||||
name: Build & Push Docker Image
|
||||
needs: release-please
|
||||
if: ${{ needs.release-please.outputs.release_created == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout release tag
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ needs.release-please.outputs.tag_name }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=${{ needs.release-please.outputs.version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=${{ needs.release-please.outputs.version }}
|
||||
type=raw,value=latest
|
||||
|
||||
- name: Build and load locally
|
||||
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
|
||||
with:
|
||||
context: .
|
||||
load: true
|
||||
tags: openclaude:smoke
|
||||
cache-from: type=gha
|
||||
|
||||
- name: Smoke test
|
||||
run: docker run --rm openclaude:smoke --version
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -3,5 +3,12 @@ dist/
|
||||
*.tsbuildinfo
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
.openclaude-profile.json
|
||||
reports/
|
||||
GEMINI.md
|
||||
CLAUDE.md
|
||||
package-lock.json
|
||||
/.claude
|
||||
coverage/
|
||||
agent.log
|
||||
|
||||
3
.release-please-manifest.json
Normal file
3
.release-please-manifest.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
".": "0.7.0"
|
||||
}
|
||||
162
ANDROID_INSTALL.md
Normal file
162
ANDROID_INSTALL.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# OpenClaude on Android (Termux)
|
||||
|
||||
A complete guide to running OpenClaude on Android using Termux + proot Ubuntu.
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Android phone with ~700MB free storage
|
||||
- [Termux](https://f-droid.org/en/packages/com.termux/) installed from **F-Droid** (not Play Store)
|
||||
- An [OpenRouter](https://openrouter.ai) API key (free, no credit card required)
|
||||
|
||||
---
|
||||
|
||||
## Why This Setup?
|
||||
|
||||
OpenClaude requires [Bun](https://bun.sh) to build, and Bun does not support Android natively. The workaround is running a real Ubuntu environment inside Termux via `proot-distro`, where Bun's Linux binary works correctly.
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
### Step 1 — Update Termux
|
||||
|
||||
```bash
|
||||
pkg update && pkg upgrade
|
||||
```
|
||||
|
||||
Press `N` or Enter for any config file conflict prompts.
|
||||
|
||||
### Step 2 — Install dependencies
|
||||
|
||||
```bash
|
||||
pkg install nodejs-lts git proot-distro
|
||||
```
|
||||
|
||||
Verify Node.js:
|
||||
```bash
|
||||
node --version # should be v20+
|
||||
```
|
||||
|
||||
### Step 3 — Clone OpenClaude
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Gitlawb/openclaude.git
|
||||
cd openclaude
|
||||
npm install
|
||||
npm link
|
||||
```
|
||||
|
||||
### Step 4 — Install Ubuntu via proot
|
||||
|
||||
```bash
|
||||
proot-distro install ubuntu
|
||||
```
|
||||
|
||||
This downloads ~200–400MB. Wait for it to complete.
|
||||
|
||||
### Step 5 — Install Bun inside Ubuntu
|
||||
|
||||
```bash
|
||||
proot-distro login ubuntu
|
||||
curl -fsSL https://bun.sh/install | bash
|
||||
source ~/.bashrc
|
||||
bun --version # should show 1.3.11+
|
||||
```
|
||||
|
||||
### Step 6 — Build OpenClaude
|
||||
|
||||
```bash
|
||||
cd /data/data/com.termux/files/home/openclaude
|
||||
bun run build
|
||||
```
|
||||
|
||||
You should see:
|
||||
```
|
||||
✓ Built openclaude v0.1.6 → dist/cli.mjs
|
||||
```
|
||||
|
||||
### Step 7 — Save env vars permanently
|
||||
|
||||
Still inside Ubuntu, add your OpenRouter config to `.bashrc`:
|
||||
|
||||
```bash
|
||||
echo 'export CLAUDE_CODE_USE_OPENAI=1' >> ~/.bashrc
|
||||
echo 'export OPENAI_API_KEY=your_openrouter_key_here' >> ~/.bashrc
|
||||
echo 'export OPENAI_BASE_URL=https://openrouter.ai/api/v1' >> ~/.bashrc
|
||||
echo 'export OPENAI_MODEL=qwen/qwen3.6-plus-preview:free' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
Replace `your_openrouter_key_here` with your actual key from [openrouter.ai/keys](https://openrouter.ai/keys).
|
||||
|
||||
### Step 8 — Run OpenClaude
|
||||
|
||||
```bash
|
||||
node dist/cli.mjs
|
||||
```
|
||||
|
||||
Select **3** (3rd-party platform) at the login screen. Your env vars will be detected automatically.
|
||||
|
||||
---
|
||||
|
||||
## Restarting After Closing Termux
|
||||
|
||||
Every time you reopen Termux after killing it, run:
|
||||
|
||||
```bash
|
||||
proot-distro login ubuntu
|
||||
cd /data/data/com.termux/files/home/openclaude
|
||||
node dist/cli.mjs
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Recommended Free Model
|
||||
|
||||
**`qwen/qwen3.6-plus-preview:free`** — Best free model on OpenRouter as of April 2026.
|
||||
|
||||
- 1M token context window
|
||||
- Beats Claude 4.5 Opus on Terminal-Bench 2.0 agentic coding (61.6 vs 59.3)
|
||||
- Built-in chain-of-thought reasoning
|
||||
- Native tool use and function calling
|
||||
- $0/M tokens (preview period)
|
||||
|
||||
> ⚠️ Free status may change when the preview period ends. Check [openrouter.ai](https://openrouter.ai/qwen/qwen3.6-plus-preview:free) for current pricing.
|
||||
|
||||
---
|
||||
|
||||
## Alternative Free Models (OpenRouter)
|
||||
|
||||
| Model ID | Context | Notes |
|
||||
|---|---|---|
|
||||
| `qwen/qwen3-coder:free` | 262K | Best for pure coding tasks |
|
||||
| `openai/gpt-oss-120b:free` | 131K | OpenAI open model, strong tool calling |
|
||||
| `nvidia/nemotron-3-super-120b-a12b:free` | 262K | Hybrid MoE, good general use |
|
||||
| `meta-llama/llama-3.3-70b-instruct:free` | 66K | Reliable, widely tested |
|
||||
|
||||
Switch models anytime:
|
||||
```bash
|
||||
export OPENAI_MODEL=qwen/qwen3-coder:free
|
||||
node dist/cli.mjs
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Why Not Groq or Cerebras?
|
||||
|
||||
Both were tested and fail due to OpenClaude's large system prompt (~50K tokens):
|
||||
|
||||
- **Groq free tier**: TPM limits too low (6K–12K tokens/min)
|
||||
- **Cerebras free tier**: TPM limits exceeded, even on `llama3.1-8b`
|
||||
|
||||
OpenRouter free models have no TPM restrictions — only 20 req/min and 200 req/day.
|
||||
|
||||
---
|
||||
|
||||
## Tips
|
||||
|
||||
- **Don't swipe Termux away** from recent apps mid-session — use the home button to minimize instead.
|
||||
- The Ubuntu environment persists between Termux sessions; your build and config are saved.
|
||||
- Run `bun run build` again only if you pull updates to the OpenClaude repo.
|
||||
210
CHANGELOG.md
Normal file
210
CHANGELOG.md
Normal file
@@ -0,0 +1,210 @@
|
||||
# Changelog
|
||||
|
||||
## [0.7.0](https://github.com/Gitlawb/openclaude/compare/v0.6.0...v0.7.0) (2026-04-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add model-specific tokenizers and compression ratio detection ([#799](https://github.com/Gitlawb/openclaude/issues/799)) ([e92e527](https://github.com/Gitlawb/openclaude/commit/e92e5274b223d935d380b1fbd234cb631ab03211))
|
||||
* add OPENCLAUDE_DISABLE_TOOL_REMINDERS env var to suppress hidden tool-output reminders ([#837](https://github.com/Gitlawb/openclaude/issues/837)) ([28de94d](https://github.com/Gitlawb/openclaude/commit/28de94df5dcd7718cb334e2e793e9472f5b291c5)), closes [#809](https://github.com/Gitlawb/openclaude/issues/809)
|
||||
* add streaming optimizer and structured request logging ([#703](https://github.com/Gitlawb/openclaude/issues/703)) ([5b9cd21](https://github.com/Gitlawb/openclaude/commit/5b9cd21e373823a77fd552d6e02f5d4b68ae06b1))
|
||||
* add xAI as official provider ([#865](https://github.com/Gitlawb/openclaude/issues/865)) ([2586a9c](https://github.com/Gitlawb/openclaude/commit/2586a9cddbd2512826bca81cb5deb3ec97f00f0f))
|
||||
* **api:** expose cache metrics in REPL + normalize across providers ([#813](https://github.com/Gitlawb/openclaude/issues/813)) ([9e23c2b](https://github.com/Gitlawb/openclaude/commit/9e23c2bec43697187762601db5b1585c9b0fb1a3))
|
||||
* implement Hook Chains runtime integration for self-healing agent mesh MVP ([#711](https://github.com/Gitlawb/openclaude/issues/711)) ([44a2c30](https://github.com/Gitlawb/openclaude/commit/44a2c30d5f9b98027e454466c680360f6b4625fc))
|
||||
* **memory:** implement persistent project-level Knowledge Graph and RAG ([#899](https://github.com/Gitlawb/openclaude/issues/899)) ([29f7579](https://github.com/Gitlawb/openclaude/commit/29f757937732be0f8cca2bc0627a27eeafc2a992))
|
||||
* **minimax:** add /usage support and fix MiniMax quota parsing ([#869](https://github.com/Gitlawb/openclaude/issues/869)) ([26413f6](https://github.com/Gitlawb/openclaude/commit/26413f6d307928a4f14c9c61c9860a28f8d81358))
|
||||
* **model:** add GPT-5.5 support for Codex provider ([#880](https://github.com/Gitlawb/openclaude/issues/880)) ([038f715](https://github.com/Gitlawb/openclaude/commit/038f715b7ab9714340bda421b73a86d8590cf531))
|
||||
* **tools:** resilient web search and fetch across all providers ([#836](https://github.com/Gitlawb/openclaude/issues/836)) ([531e3f1](https://github.com/Gitlawb/openclaude/commit/531e3f10592a73d81f26675c2479d46a3d5b55f5))
|
||||
* **zai:** add Z.AI GLM Coding Plan provider preset ([#896](https://github.com/Gitlawb/openclaude/issues/896)) ([a0d657e](https://github.com/Gitlawb/openclaude/commit/a0d657ee188f52f8a4ceaad1658c81343a32fdad))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **agent:** provider-aware fallback for haiku/sonnet aliases ([#908](https://github.com/Gitlawb/openclaude/issues/908)) ([a3e728a](https://github.com/Gitlawb/openclaude/commit/a3e728a114f6379b80daefc8abcac17a752c5f96))
|
||||
* bugs ([#885](https://github.com/Gitlawb/openclaude/issues/885)) ([c6c5f06](https://github.com/Gitlawb/openclaude/commit/c6c5f0608cf6509b412b121954547d72b3f3a411))
|
||||
* make OpenAI fallback context window configurable + support external model lookup ([#861](https://github.com/Gitlawb/openclaude/issues/861)) ([b750e9e](https://github.com/Gitlawb/openclaude/commit/b750e9e97d15926d094d435772b2d6d12e5e545c))
|
||||
* **mcp:** disable MCP_SKILLS feature flag — source not mirrored ([#872](https://github.com/Gitlawb/openclaude/issues/872)) ([dcbe295](https://github.com/Gitlawb/openclaude/commit/dcbe29558ab9c74d335b138488005a6509aa906a))
|
||||
* normalize /provider multi-model selection and semicolon parsing ([#841](https://github.com/Gitlawb/openclaude/issues/841)) ([c4cb98a](https://github.com/Gitlawb/openclaude/commit/c4cb98a4f092062da02a4728cf59fed0fc3a6d3f))
|
||||
* **openai-shim:** echo reasoning_content on assistant tool-call messages for Moonshot ([#828](https://github.com/Gitlawb/openclaude/issues/828)) ([67de6bd](https://github.com/Gitlawb/openclaude/commit/67de6bd2cffc3381f0f28fd3ffce043970611667))
|
||||
* **query:** restore system prompt structure and add missing config import ([#907](https://github.com/Gitlawb/openclaude/issues/907)) ([818689b](https://github.com/Gitlawb/openclaude/commit/818689b2ee71cb6966cb4dc5a5ebd90fd22b0fcb))
|
||||
* **shell:** recover when CWD path was replaced by a non-directory ([#871](https://github.com/Gitlawb/openclaude/issues/871)) ([a4c6757](https://github.com/Gitlawb/openclaude/commit/a4c67570238794317d049a225396672b465fdbfc))
|
||||
* **startup:** show --model flag override on startup screen ([#898](https://github.com/Gitlawb/openclaude/issues/898)) ([d45628c](https://github.com/Gitlawb/openclaude/commit/d45628c41300b83b466e6a97983099615a50e7d7))
|
||||
* **startup:** url authoritative over model name in banner provider detect ([#864](https://github.com/Gitlawb/openclaude/issues/864)) ([e346b8d](https://github.com/Gitlawb/openclaude/commit/e346b8d5ec2d58a4e8db337918d52d844ee52766)), closes [#855](https://github.com/Gitlawb/openclaude/issues/855)
|
||||
* surface actionable error when DuckDuckGo web search is rate-limited ([#834](https://github.com/Gitlawb/openclaude/issues/834)) ([3c4d843](https://github.com/Gitlawb/openclaude/commit/3c4d8435c42e1ee04f9defd31c4c589017f524c5))
|
||||
* **test:** add missing teammate exports to hookChains integration mock ([#840](https://github.com/Gitlawb/openclaude/issues/840)) ([23e8cfb](https://github.com/Gitlawb/openclaude/commit/23e8cfbd5b22179684276bef4131e26b830ce69c)), closes [#839](https://github.com/Gitlawb/openclaude/issues/839)
|
||||
* **update:** show real package version and give actionable guidance ([#870](https://github.com/Gitlawb/openclaude/issues/870)) ([6e58b81](https://github.com/Gitlawb/openclaude/commit/6e58b819370128b923dda4fcc774bb556f4b951a))
|
||||
|
||||
## [0.6.0](https://github.com/Gitlawb/openclaude/compare/v0.5.2...v0.6.0) (2026-04-22)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add model caching and benchmarking utilities ([#671](https://github.com/Gitlawb/openclaude/issues/671)) ([2b15e16](https://github.com/Gitlawb/openclaude/commit/2b15e16421f793f954a92c53933a07094544b29d))
|
||||
* add thinking token extraction ([#798](https://github.com/Gitlawb/openclaude/issues/798)) ([268c039](https://github.com/Gitlawb/openclaude/commit/268c0398e4bf1ab898069c61500a2b3c226a0322))
|
||||
* **api:** compress old tool_result content for small-context providers ([#801](https://github.com/Gitlawb/openclaude/issues/801)) ([a6a3de5](https://github.com/Gitlawb/openclaude/commit/a6a3de5ac155fe9d00befbfcab98d439314effd8))
|
||||
* **api:** improve local provider reliability with readiness and self-healing ([#738](https://github.com/Gitlawb/openclaude/issues/738)) ([4cb963e](https://github.com/Gitlawb/openclaude/commit/4cb963e660dbd6ee438c04042700db05a9d32c59))
|
||||
* **api:** smart model routing primitive (cheap-for-simple, strong-for-hard) ([#785](https://github.com/Gitlawb/openclaude/issues/785)) ([e908864](https://github.com/Gitlawb/openclaude/commit/e908864da7e7c987a98053ac5d18d702e192db2b))
|
||||
* enable 15 additional feature flags in open build ([#667](https://github.com/Gitlawb/openclaude/issues/667)) ([6a62e3f](https://github.com/Gitlawb/openclaude/commit/6a62e3ff76ba9ba446b8e20cf2bb139ee76a9387))
|
||||
* native Anthropic API mode for Claude models on GitHub Copilot ([#579](https://github.com/Gitlawb/openclaude/issues/579)) ([fdef4a1](https://github.com/Gitlawb/openclaude/commit/fdef4a1b4ce218ded4937ca83b30acce7c726472))
|
||||
* **provider:** expose Atomic Chat in /provider picker with autodetect ([#810](https://github.com/Gitlawb/openclaude/issues/810)) ([ee19159](https://github.com/Gitlawb/openclaude/commit/ee19159c17b3de3b4a8b4a4541a6569f4261d54e))
|
||||
* **provider:** zero-config autodetection primitive ([#784](https://github.com/Gitlawb/openclaude/issues/784)) ([a5bfcbb](https://github.com/Gitlawb/openclaude/commit/a5bfcbbadf8e9a1fd42f3e103d295524b8da64b0))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **api:** ensure strict role sequence and filter empty assistant messages after interruption ([#745](https://github.com/Gitlawb/openclaude/issues/745) regression) ([#794](https://github.com/Gitlawb/openclaude/issues/794)) ([06e7684](https://github.com/Gitlawb/openclaude/commit/06e7684eb56df8e694ac784575e163641931c44c))
|
||||
* Collapse all-text arrays to string for DeepSeek compatibility ([#806](https://github.com/Gitlawb/openclaude/issues/806)) ([761924d](https://github.com/Gitlawb/openclaude/commit/761924daa7e225fe8acf41651408c7cae639a511))
|
||||
* **model:** codex/nvidia-nim/minimax now read OPENAI_MODEL env ([#815](https://github.com/Gitlawb/openclaude/issues/815)) ([4581208](https://github.com/Gitlawb/openclaude/commit/458120889f6ce54cc9f0b287461d5e38eae48a20))
|
||||
* **provider:** saved profile ignored when stale CLAUDE_CODE_USE_* in shell ([#807](https://github.com/Gitlawb/openclaude/issues/807)) ([13de4e8](https://github.com/Gitlawb/openclaude/commit/13de4e85df7f5fadc8cd15a76076374dc112360b))
|
||||
* rename .claude.json to .openclaude.json with legacy fallback ([#582](https://github.com/Gitlawb/openclaude/issues/582)) ([4d4fb28](https://github.com/Gitlawb/openclaude/commit/4d4fb2880e4d0e3a62d8715e1ec13d932e736279))
|
||||
* replace discontinued gemini-2.5-pro-preview-03-25 with stable gemini-2.5-pro ([#802](https://github.com/Gitlawb/openclaude/issues/802)) ([64582c1](https://github.com/Gitlawb/openclaude/commit/64582c119d5d0278195271379da4a68d59a89c1f)), closes [#398](https://github.com/Gitlawb/openclaude/issues/398)
|
||||
* **security:** harden project settings trust boundary + MCP sanitization ([#789](https://github.com/Gitlawb/openclaude/issues/789)) ([ae3b723](https://github.com/Gitlawb/openclaude/commit/ae3b723f3b297b49925cada4728f3174aee8bf12))
|
||||
* **test:** autoCompact floor assertion is flag-sensitive ([#816](https://github.com/Gitlawb/openclaude/issues/816)) ([c13842e](https://github.com/Gitlawb/openclaude/commit/c13842e91c7227246520955de6ae0636b30def9a))
|
||||
* **ui:** prevent provider manager lag by deferring sync I/O ([#803](https://github.com/Gitlawb/openclaude/issues/803)) ([85eab27](https://github.com/Gitlawb/openclaude/commit/85eab2751e7d351bb0ed6a3fe0e15461d241c9cb))
|
||||
|
||||
## [0.5.2](https://github.com/Gitlawb/openclaude/compare/v0.5.1...v0.5.2) (2026-04-20)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **api:** replace phrase-based reasoning sanitizer with tag-based filter ([#779](https://github.com/Gitlawb/openclaude/issues/779)) ([336ddcc](https://github.com/Gitlawb/openclaude/commit/336ddcc50d59d79ebff50993f2673652aecb0d7d))
|
||||
|
||||
## [0.5.1](https://github.com/Gitlawb/openclaude/compare/v0.5.0...v0.5.1) (2026-04-20)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* enforce Bash path constraints after sandbox allow ([#777](https://github.com/Gitlawb/openclaude/issues/777)) ([7002cb3](https://github.com/Gitlawb/openclaude/commit/7002cb302b78ea2a19da3f26226de24e2903fa1d))
|
||||
* enforce MCP OAuth callback state before errors ([#775](https://github.com/Gitlawb/openclaude/issues/775)) ([739b8d1](https://github.com/Gitlawb/openclaude/commit/739b8d1f40fde0e401a5cbd2b9a55d88bd5124ad))
|
||||
* require trusted approval for sandbox override ([#778](https://github.com/Gitlawb/openclaude/issues/778)) ([aab4890](https://github.com/Gitlawb/openclaude/commit/aab489055c53dd64369414116fe93226d2656273))
|
||||
|
||||
## [0.5.0](https://github.com/Gitlawb/openclaude/compare/v0.4.0...v0.5.0) (2026-04-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add OPENCLAUDE_DISABLE_STRICT_TOOLS env var to opt out of strict MCP tool schema normalization ([#770](https://github.com/Gitlawb/openclaude/issues/770)) ([e6e8d9a](https://github.com/Gitlawb/openclaude/commit/e6e8d9a24897e4c9ef08b72df20fabbf8ef27f38))
|
||||
* mask provider api key input ([#772](https://github.com/Gitlawb/openclaude/issues/772)) ([13e9f22](https://github.com/Gitlawb/openclaude/commit/13e9f22a83a2b0f85f557b1e12c9442ba61241e4))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* allow provider recovery during startup ([#765](https://github.com/Gitlawb/openclaude/issues/765)) ([f828171](https://github.com/Gitlawb/openclaude/commit/f828171ef1ab94e2acf73a28a292799e4e26cc0d))
|
||||
* **api:** drop orphan tool results to satisfy strict role sequence ([#745](https://github.com/Gitlawb/openclaude/issues/745)) ([b786b76](https://github.com/Gitlawb/openclaude/commit/b786b765f01f392652eaf28ed3579a96b7260a53))
|
||||
* **help:** prevent /help tab crash from undefined descriptions ([#732](https://github.com/Gitlawb/openclaude/issues/732)) ([3d1979f](https://github.com/Gitlawb/openclaude/commit/3d1979ff066db32415e0c8321af916d81f5f2621))
|
||||
* **mcp:** sync required array with properties in tool schemas ([#754](https://github.com/Gitlawb/openclaude/issues/754)) ([002a8f1](https://github.com/Gitlawb/openclaude/commit/002a8f1f6de2fcfc917165d828501d3047bad61f))
|
||||
* remove cached mcpClient in diagnostic tracking to prevent stale references ([#727](https://github.com/Gitlawb/openclaude/issues/727)) ([2c98be7](https://github.com/Gitlawb/openclaude/commit/2c98be700274a4241963b5f43530bf3bd8f8963f))
|
||||
* use raw context window for auto-compact percentage display ([#748](https://github.com/Gitlawb/openclaude/issues/748)) ([55c5f26](https://github.com/Gitlawb/openclaude/commit/55c5f262a9a5a8be0aa9ae8dc6c7dafc465eb2c6))
|
||||
|
||||
## [0.4.0](https://github.com/Gitlawb/openclaude/compare/v0.3.0...v0.4.0) (2026-04-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add Alibaba Coding Plan (DashScope) provider support ([#509](https://github.com/Gitlawb/openclaude/issues/509)) ([43ac6db](https://github.com/Gitlawb/openclaude/commit/43ac6dba75537282da1e2ad8f855082bc4e25f1e))
|
||||
* add NVIDIA NIM and MiniMax provider support ([#552](https://github.com/Gitlawb/openclaude/issues/552)) ([51191d6](https://github.com/Gitlawb/openclaude/commit/51191d61326e1f8319d70b3a3c0d9229e185a564))
|
||||
* add ripgrep to Dockerfile for faster file searching ([#688](https://github.com/Gitlawb/openclaude/issues/688)) ([12dd375](https://github.com/Gitlawb/openclaude/commit/12dd3755c619cc27af3b151ae8fdb9d425a7b9a2))
|
||||
* **api:** classify openai-compatible provider failures ([#708](https://github.com/Gitlawb/openclaude/issues/708)) ([80a00ac](https://github.com/Gitlawb/openclaude/commit/80a00acc2c6dc4657a78de7366f7a9ebc920bfbb))
|
||||
* **vscode:** add full chat interface to OpenClaude extension ([#608](https://github.com/Gitlawb/openclaude/issues/608)) ([fbcd928](https://github.com/Gitlawb/openclaude/commit/fbcd928f7f8511da795aea3ad318bddf0ab9a1a7))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* focus "Done" option after completing provider manager actions ([#718](https://github.com/Gitlawb/openclaude/issues/718)) ([d6f5130](https://github.com/Gitlawb/openclaude/commit/d6f5130c204d8ffe582212466768706cd7fd6774))
|
||||
* **models:** prevent /models crash from non-string saved model values ([#691](https://github.com/Gitlawb/openclaude/issues/691)) ([6b2121d](https://github.com/Gitlawb/openclaude/commit/6b2121da12189fa7ce1f33394d18abd24cf8a01b))
|
||||
* prevent crash in commands tab when description is undefined ([#730](https://github.com/Gitlawb/openclaude/issues/730)) ([eed77e6](https://github.com/Gitlawb/openclaude/commit/eed77e6579866a98384dcc948a0ad6406614ede3))
|
||||
* strip comments before scanning for missing imports ([#676](https://github.com/Gitlawb/openclaude/issues/676)) ([a00b792](https://github.com/Gitlawb/openclaude/commit/a00b7928de9662ffb7ef6abd8cd040afe6f4f122))
|
||||
* **ui:** show correct endpoint URL in intro screen for custom Anthropic endpoints ([#735](https://github.com/Gitlawb/openclaude/issues/735)) ([3424663](https://github.com/Gitlawb/openclaude/commit/34246635fb9a09499047a52e7f96ca9b36c8a85a))
|
||||
|
||||
## [0.3.0](https://github.com/Gitlawb/openclaude/compare/v0.2.3...v0.3.0) (2026-04-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* activate coordinator mode in open build ([#647](https://github.com/Gitlawb/openclaude/issues/647)) ([99a1714](https://github.com/Gitlawb/openclaude/commit/99a17144ee285b892a0801acb6abcc9af68879af))
|
||||
* activate local-only team memory in open build ([#648](https://github.com/Gitlawb/openclaude/issues/648)) ([24d485f](https://github.com/Gitlawb/openclaude/commit/24d485f42f5b1405d2fab13f2f497d5edd3b5300))
|
||||
* activate message actions in open build ([#632](https://github.com/Gitlawb/openclaude/issues/632)) ([252808b](https://github.com/Gitlawb/openclaude/commit/252808bbd0a12a6ccf97e2cb09752a0212ea3acd))
|
||||
* add allowBypassPermissionsMode setting ([#658](https://github.com/Gitlawb/openclaude/issues/658)) ([31be66d](https://github.com/Gitlawb/openclaude/commit/31be66d7645ea3473334c9ce89ea1a5095b8df6e))
|
||||
* add Docker image build and push to GHCR on release ([#656](https://github.com/Gitlawb/openclaude/issues/656)) ([658d076](https://github.com/Gitlawb/openclaude/commit/658d076909e14eb0459bcb98aee9aa0472118265))
|
||||
* implement /loop command with fixed and dynamic scheduling ([#621](https://github.com/Gitlawb/openclaude/issues/621)) ([64298a6](https://github.com/Gitlawb/openclaude/commit/64298a663f1391b16aa1f5a49e8a877e1d3742f2))
|
||||
* implement Monitor tool for streaming shell output ([#649](https://github.com/Gitlawb/openclaude/issues/649)) ([b818dd5](https://github.com/Gitlawb/openclaude/commit/b818dd5958f4e8428566ce25a1a6be5fd4fe66f8))
|
||||
* local feature flag overrides via ~/.claude/feature-flags.json ([#639](https://github.com/Gitlawb/openclaude/issues/639)) ([0e48884](https://github.com/Gitlawb/openclaude/commit/0e48884f56c6c008f047a7926d3b2cb924170625))
|
||||
* open useful USER_TYPE-gated features to all users ([#644](https://github.com/Gitlawb/openclaude/issues/644)) ([c1beea9](https://github.com/Gitlawb/openclaude/commit/c1beea98676a413c54152a45a6b9fbe7fb9ed028))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* bump axios 1.14.0 → 1.15.0 (Dependabot [#4](https://github.com/Gitlawb/openclaude/issues/4), [#5](https://github.com/Gitlawb/openclaude/issues/5)) ([#670](https://github.com/Gitlawb/openclaude/issues/670)) ([a07e5ef](https://github.com/Gitlawb/openclaude/commit/a07e5ef990a5ed01a72e83fdbd1fcab36f515a08))
|
||||
* extend provider guard to protect anthropic profiles from cross-terminal override ([#641](https://github.com/Gitlawb/openclaude/issues/641)) ([03e0b06](https://github.com/Gitlawb/openclaude/commit/03e0b06e0784e4ea46945b3950840b10b6e3ca49))
|
||||
* improve fetch diagnostics for bootstrap and session requests ([#646](https://github.com/Gitlawb/openclaude/issues/646)) ([df2b9f2](https://github.com/Gitlawb/openclaude/commit/df2b9f2b7b4c661ee3d9ed5dc58b3064de0599d1))
|
||||
* **openai-shim:** preserve tool result images and local token caps ([#659](https://github.com/Gitlawb/openclaude/issues/659)) ([30c866d](https://github.com/Gitlawb/openclaude/commit/30c866d31ad8538496460667d86ed5efbd4a8547))
|
||||
* replace broken bun:bundle shim with source pre-processing ([#657](https://github.com/Gitlawb/openclaude/issues/657)) ([adbe391](https://github.com/Gitlawb/openclaude/commit/adbe391e63721918b5d147f4f845111c1a3143db))
|
||||
* resolve 12 bugs across API, MCP, agent tools, web search, and context overflow ([#674](https://github.com/Gitlawb/openclaude/issues/674)) ([25ce2ca](https://github.com/Gitlawb/openclaude/commit/25ce2ca7bff8937b0b79ad7f85c6dc1c68432069))
|
||||
* route OpenAI Codex shortcuts to correct endpoint ([#566](https://github.com/Gitlawb/openclaude/issues/566)) ([7c8bdcc](https://github.com/Gitlawb/openclaude/commit/7c8bdcc3e2ac1ecb98286c705c85671044be3d6b))
|
||||
|
||||
## [0.2.3](https://github.com/Gitlawb/openclaude/compare/v0.2.2...v0.2.3) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* prevent infinite auto-compact loop for unknown 3P models ([#635](https://github.com/Gitlawb/openclaude/issues/635)) ([#636](https://github.com/Gitlawb/openclaude/issues/636)) ([aeaa658](https://github.com/Gitlawb/openclaude/commit/aeaa658f776fb8df95721e8b8962385f8b00f66a))
|
||||
|
||||
## [0.2.2](https://github.com/Gitlawb/openclaude/compare/v0.2.1...v0.2.2) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **read/edit:** make compact line prefix unambiguous for tab-indented files ([#613](https://github.com/Gitlawb/openclaude/issues/613)) ([08cc6f3](https://github.com/Gitlawb/openclaude/commit/08cc6f328711cd93ce9fa53351266c29a0b0a341))
|
||||
|
||||
## [0.2.1](https://github.com/Gitlawb/openclaude/compare/v0.2.0...v0.2.1) (2026-04-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **provider:** add recovery guidance for missing OpenAI API key ([#616](https://github.com/Gitlawb/openclaude/issues/616)) ([9419e8a](https://github.com/Gitlawb/openclaude/commit/9419e8a4a21b3771d9ddb10f7072e0a8c5b5b631))
|
||||
|
||||
## [0.2.0](https://github.com/Gitlawb/openclaude/compare/v0.1.8...v0.2.0) (2026-04-12)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add /cache-probe diagnostic command ([#580](https://github.com/Gitlawb/openclaude/issues/580)) ([9ccaa7a](https://github.com/Gitlawb/openclaude/commit/9ccaa7a6759b6991f4a566b4118c06e68a2398fe)), closes [#515](https://github.com/Gitlawb/openclaude/issues/515)
|
||||
* add auto-fix service — auto-lint and test after AI file edits ([#508](https://github.com/Gitlawb/openclaude/issues/508)) ([c385047](https://github.com/Gitlawb/openclaude/commit/c385047abba4366866f4c87bfb5e0b0bd4dcbb9d))
|
||||
* Add Gemini support with thought_signature fix ([#404](https://github.com/Gitlawb/openclaude/issues/404)) ([5012c16](https://github.com/Gitlawb/openclaude/commit/5012c160c9a2dff9418e7ee19dc9a4d29ef2b024))
|
||||
* add headless gRPC server for external agent integration ([#278](https://github.com/Gitlawb/openclaude/issues/278)) ([26eef92](https://github.com/Gitlawb/openclaude/commit/26eef92fe72e9c3958d61435b8d3571e12bf2b74))
|
||||
* add wiki mvp commands ([#532](https://github.com/Gitlawb/openclaude/issues/532)) ([c328fdf](https://github.com/Gitlawb/openclaude/commit/c328fdf9e2fe59ad101b049301298ce9ff24caca))
|
||||
* GitHub provider lifecycle and onboarding hardening ([#351](https://github.com/Gitlawb/openclaude/issues/351)) ([ff7d499](https://github.com/Gitlawb/openclaude/commit/ff7d49990de515825ddbe4099f3a39b944b61370))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add File polyfill for Node < 20 to prevent startup deadlock with proxy ([#442](https://github.com/Gitlawb/openclaude/issues/442)) ([85aa8b0](https://github.com/Gitlawb/openclaude/commit/85aa8b0985c8f3cb8801efa5141114a0ab0f6a83))
|
||||
* add GitHub Copilot model context windows and output limits ([#576](https://github.com/Gitlawb/openclaude/issues/576)) ([a7f5982](https://github.com/Gitlawb/openclaude/commit/a7f5982f6438ab0ddc3f0daae31ea68ac7ac206c)), closes [#515](https://github.com/Gitlawb/openclaude/issues/515)
|
||||
* add LiteLLM-style aliases for GitHub Copilot context windows ([#606](https://github.com/Gitlawb/openclaude/issues/606)) ([2e0e14d](https://github.com/Gitlawb/openclaude/commit/2e0e14d71313e0e501efaa9e55c6c56f2742fb10))
|
||||
* add store:false to Chat Completions and /responses fallback ([#578](https://github.com/Gitlawb/openclaude/issues/578)) ([8aaa4f2](https://github.com/Gitlawb/openclaude/commit/8aaa4f22ac5b942d82aa9cad54af30d56034515a))
|
||||
* address code scanning alerts ([#434](https://github.com/Gitlawb/openclaude/issues/434)) ([e365cb4](https://github.com/Gitlawb/openclaude/commit/e365cb4010becabacd7cbccb4c3e59ea23a41e90))
|
||||
* avoid sync github credential reads in provider manager ([#428](https://github.com/Gitlawb/openclaude/issues/428)) ([aff2bd8](https://github.com/Gitlawb/openclaude/commit/aff2bd87e4f2821992f74fb95481c505d0ba5d5d))
|
||||
* convert dragged file paths to [@mentions](https://github.com/mentions) for attachment ([#382](https://github.com/Gitlawb/openclaude/issues/382)) ([112df59](https://github.com/Gitlawb/openclaude/commit/112df5911791ea71ee9efbb98ea59c5ded1ea161))
|
||||
* custom web search — WEB_URL_TEMPLATE not recognized, timeout too short, silent native fallback ([#537](https://github.com/Gitlawb/openclaude/issues/537)) ([32fbd0c](https://github.com/Gitlawb/openclaude/commit/32fbd0c7b4168b32dcb13a5b69342e2727269201))
|
||||
* defer startup checks and suppress recommendation dialogs during startup window (issue [#363](https://github.com/Gitlawb/openclaude/issues/363)) ([#504](https://github.com/Gitlawb/openclaude/issues/504)) ([2caf2fd](https://github.com/Gitlawb/openclaude/commit/2caf2fd982af1ec845c50152ad9d28d1a597f82f))
|
||||
* display selected model in startup screen instead of hardcoded sonnet 4.6 ([#587](https://github.com/Gitlawb/openclaude/issues/587)) ([b126e38](https://github.com/Gitlawb/openclaude/commit/b126e38b1affddd2de83fcc3ba26f2e44b42a509))
|
||||
* handle missing skill parameter in SkillTool ([#485](https://github.com/Gitlawb/openclaude/issues/485)) ([f9ce81b](https://github.com/Gitlawb/openclaude/commit/f9ce81bfb384e909353813fb6f6760cadd508ae7))
|
||||
* include MCP tool results in microcompact to reduce token waste ([#348](https://github.com/Gitlawb/openclaude/issues/348)) ([52d33a8](https://github.com/Gitlawb/openclaude/commit/52d33a87a047b943aedaaaf772cd48636c263509))
|
||||
* **ink:** restore host prop updates in React 19 reconciler ([#589](https://github.com/Gitlawb/openclaude/issues/589)) ([6e94dd9](https://github.com/Gitlawb/openclaude/commit/6e94dd913688b2d6433a9abe62a245c5f031b776))
|
||||
* let saved provider profiles win on restart ([#513](https://github.com/Gitlawb/openclaude/issues/513)) ([cb8f8b7](https://github.com/Gitlawb/openclaude/commit/cb8f8b7ac2e3e74516ee219a3a48156db7c6ed78))
|
||||
* normalize malformed Bash tool arguments from OpenAI-compatible providers ([#385](https://github.com/Gitlawb/openclaude/issues/385)) ([b4bd95b](https://github.com/Gitlawb/openclaude/commit/b4bd95b47715c9896240d708c106777507fd26ec))
|
||||
* preserve only originally-required properties in strict tool schemas ([#471](https://github.com/Gitlawb/openclaude/issues/471)) ([ccaa193](https://github.com/Gitlawb/openclaude/commit/ccaa193eec5761f0972ffb58eb3189a81a9244b0))
|
||||
* preserve unicode in Windows clipboard fallback ([#388](https://github.com/Gitlawb/openclaude/issues/388)) ([c193497](https://github.com/Gitlawb/openclaude/commit/c1934974aaf64db460cc850a044bd13cc744cce7))
|
||||
* rebrand prompt identity to openclaude ([#496](https://github.com/Gitlawb/openclaude/issues/496)) ([598651f](https://github.com/Gitlawb/openclaude/commit/598651f42389ce76311ec00e8a9c701c939ead27))
|
||||
* replace isDeepStrictEqual with navigation-aware options comparison ([#507](https://github.com/Gitlawb/openclaude/issues/507)) ([537c469](https://github.com/Gitlawb/openclaude/commit/537c469c3a2f7cb0eed05fa2f54dca57b6bc273f)), closes [#472](https://github.com/Gitlawb/openclaude/issues/472)
|
||||
* report cache reads in streaming and correct cost calculation ([#577](https://github.com/Gitlawb/openclaude/issues/577)) ([f4ac709](https://github.com/Gitlawb/openclaude/commit/f4ac709fa6eda732bf45204fcab625ba6c5674b9))
|
||||
* restore default context window for unknown 3p models ([#494](https://github.com/Gitlawb/openclaude/issues/494)) ([69ea1f1](https://github.com/Gitlawb/openclaude/commit/69ea1f1e4a99e9436215d8cb391a116a64442b94))
|
||||
* restore Grep and Glob reliability on OpenAI paths ([#461](https://github.com/Gitlawb/openclaude/issues/461)) ([600c01f](https://github.com/Gitlawb/openclaude/commit/600c01faf761a080a2c7dede872ddbe05a132f23))
|
||||
* restore Ollama auto-detect in first-run setup ([#561](https://github.com/Gitlawb/openclaude/issues/561)) ([68c2968](https://github.com/Gitlawb/openclaude/commit/68c296833dcef54ce44cb18b24357230b5204dbc))
|
||||
* scrub canonical Anthropic headers from 3P shim requests ([#499](https://github.com/Gitlawb/openclaude/issues/499)) ([07621a6](https://github.com/Gitlawb/openclaude/commit/07621a6f8d0918170281869a47b5dbff90e71594))
|
||||
* strip Anthropic params from 3P resume paths ([#479](https://github.com/Gitlawb/openclaude/issues/479)) ([4975cfc](https://github.com/Gitlawb/openclaude/commit/4975cfc2e0ddbe34aa4e8e3f52ee5eba07fbe465))
|
||||
* suppress startup dialogs when input is buffered ([#423](https://github.com/Gitlawb/openclaude/issues/423)) ([8ece290](https://github.com/Gitlawb/openclaude/commit/8ece2900872dadd157e798ef501ddf126dac66c4))
|
||||
* **tui:** restore prompt rendering on startup ([#498](https://github.com/Gitlawb/openclaude/issues/498)) ([e30ad17](https://github.com/Gitlawb/openclaude/commit/e30ad17ae0056787273be2caafd6cf5340b6ab57))
|
||||
* update theme preview on focus change ([#562](https://github.com/Gitlawb/openclaude/issues/562)) ([6924718](https://github.com/Gitlawb/openclaude/commit/692471850fc789ee0797190089272407f9a4d953))
|
||||
* **web-search:** close SSRF bypasses in custom provider hostname guard ([#610](https://github.com/Gitlawb/openclaude/issues/610)) ([a02c441](https://github.com/Gitlawb/openclaude/commit/a02c44143b257fbee7f38f1b93873cc0ea68a1f9))
|
||||
* WebSearch providers + MCPTool bugs ([#593](https://github.com/Gitlawb/openclaude/issues/593)) ([91e4cfb](https://github.com/Gitlawb/openclaude/commit/91e4cfb15b62c04615834fd3c417fe38b4feb914))
|
||||
126
CODE_OF_CONDUCT.md
Normal file
126
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,126 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and maintainers pledge to make participation in
|
||||
our community a harassment-free experience for everyone, regardless of age,
|
||||
body size, visible or invisible disability, ethnicity, sex characteristics,
|
||||
gender identity and expression, level of experience, education, socio-economic
|
||||
status, nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
- Demonstrating empathy and kindness toward other people
|
||||
- Being respectful of differing opinions, viewpoints, and experiences
|
||||
- Giving and gracefully accepting constructive feedback
|
||||
- Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
- Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
- The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
- Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for
|
||||
moderation decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official email address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the project maintainers through the repository maintainers or
|
||||
security/community contact paths available in the repository.
|
||||
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/),
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html](https://www.contributor-covenant.org/version/2/1/code_of_conduct.html).
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq).
|
||||
119
CONTRIBUTING.md
Normal file
119
CONTRIBUTING.md
Normal file
@@ -0,0 +1,119 @@
|
||||
# Contributing to OpenClaude
|
||||
|
||||
Thanks for contributing.
|
||||
|
||||
OpenClaude is a fast-moving open-source coding-agent CLI with support for multiple providers, local backends, MCP, and a terminal-first workflow. The best contributions here are focused, well-tested, and easy to review.
|
||||
|
||||
## Before You Start
|
||||
|
||||
- Search existing [issues](https://github.com/Gitlawb/openclaude/issues) and [discussions](https://github.com/Gitlawb/openclaude/discussions) before opening a new thread.
|
||||
- Use issues for confirmed bugs and actionable feature work.
|
||||
- Use discussions for setup help, ideas, and general community conversation.
|
||||
- For larger changes, open an issue first so the scope is clear before implementation.
|
||||
- For security reports, follow [SECURITY.md](SECURITY.md).
|
||||
|
||||
## Local Setup
|
||||
|
||||
Install dependencies:
|
||||
|
||||
```bash
|
||||
bun install
|
||||
```
|
||||
|
||||
Build the CLI:
|
||||
|
||||
```bash
|
||||
bun run build
|
||||
```
|
||||
|
||||
Smoke test:
|
||||
|
||||
```bash
|
||||
bun run smoke
|
||||
```
|
||||
|
||||
Run the app locally:
|
||||
|
||||
```bash
|
||||
bun run dev
|
||||
```
|
||||
|
||||
If you are working on provider setup or saved profiles, useful commands include:
|
||||
|
||||
```bash
|
||||
bun run profile:init
|
||||
bun run dev:profile
|
||||
```
|
||||
|
||||
## Development Workflow
|
||||
|
||||
- Keep PRs focused on one problem or feature.
|
||||
- Avoid mixing unrelated cleanup into the same change.
|
||||
- Preserve existing repo patterns unless the change is intentionally refactoring them.
|
||||
- Add or update tests when the change affects behavior.
|
||||
- Update docs when setup, commands, or user-facing behavior changes.
|
||||
|
||||
## Validation
|
||||
|
||||
At minimum, run the most relevant checks for your change.
|
||||
|
||||
Common checks:
|
||||
|
||||
```bash
|
||||
bun run build
|
||||
bun run smoke
|
||||
```
|
||||
|
||||
Focused tests:
|
||||
|
||||
```bash
|
||||
bun test ./path/to/test-file.test.ts
|
||||
```
|
||||
|
||||
When working on provider/runtime setup, this can also help:
|
||||
|
||||
```bash
|
||||
bun run doctor:runtime
|
||||
```
|
||||
|
||||
## Pull Requests
|
||||
|
||||
Good PRs usually include:
|
||||
|
||||
- a short explanation of what changed
|
||||
- why it changed
|
||||
- the user or developer impact
|
||||
- the exact checks you ran
|
||||
|
||||
If the PR touches UI, terminal presentation, or the VS Code extension, include screenshots when useful.
|
||||
|
||||
If the PR changes provider behavior, mention which provider path was tested.
|
||||
|
||||
## Code Style
|
||||
|
||||
- Follow the existing code style in the touched files.
|
||||
- Prefer small, readable changes over broad rewrites.
|
||||
- Do not reformat unrelated files just because they are nearby.
|
||||
- Keep comments useful and concise.
|
||||
|
||||
## Provider Changes
|
||||
|
||||
OpenClaude supports multiple provider paths. If you change provider logic:
|
||||
|
||||
- be explicit about which providers are affected
|
||||
- avoid breaking third-party providers while fixing first-party behavior
|
||||
- test the exact provider/model path you changed when possible
|
||||
- call out any limitations or follow-up work in the PR description
|
||||
|
||||
## Community
|
||||
|
||||
Please be respectful and constructive with other contributors.
|
||||
|
||||
Maintainers may ask for:
|
||||
|
||||
- narrower scope
|
||||
- focused follow-up PRs
|
||||
- stronger validation
|
||||
- docs updates for behavior changes
|
||||
|
||||
That is normal and helps keep the project reviewable as it grows.
|
||||
46
Dockerfile
Normal file
46
Dockerfile
Normal file
@@ -0,0 +1,46 @@
|
||||
# ---- build stage ----
|
||||
FROM node:22-slim AS build
|
||||
|
||||
# Install Bun
|
||||
RUN npm install -g bun@1.3.11
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy dependency manifests first for better layer caching
|
||||
COPY package.json bun.lock ./
|
||||
|
||||
# Install all dependencies (including devDependencies for build)
|
||||
RUN bun install --frozen-lockfile
|
||||
|
||||
# Copy source code
|
||||
COPY src/ src/
|
||||
COPY scripts/ scripts/
|
||||
COPY bin/ bin/
|
||||
COPY tsconfig.json ./
|
||||
|
||||
# Build the CLI bundle
|
||||
RUN bun run build
|
||||
|
||||
# Prune devDependencies
|
||||
RUN rm -rf node_modules && bun install --frozen-lockfile --production
|
||||
|
||||
# ---- runtime stage ----
|
||||
FROM node:22-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy only what's needed to run
|
||||
COPY --from=build /app/dist/cli.mjs dist/cli.mjs
|
||||
COPY --from=build /app/bin/ bin/
|
||||
COPY --from=build /app/node_modules/ node_modules/
|
||||
COPY --from=build /app/package.json package.json
|
||||
COPY README.md ./
|
||||
|
||||
# Install git and ripgrep — many CLI tool operations depend on them
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends git ripgrep \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Run as non-root user
|
||||
USER node
|
||||
|
||||
ENTRYPOINT ["node", "/app/dist/cli.mjs"]
|
||||
29
LICENSE
Normal file
29
LICENSE
Normal file
@@ -0,0 +1,29 @@
|
||||
NOTICE
|
||||
|
||||
This repository contains code derived from Anthropic's Claude Code CLI.
|
||||
|
||||
The original Claude Code source is proprietary software:
|
||||
Copyright (c) Anthropic PBC. All rights reserved.
|
||||
Subject to Anthropic's Commercial Terms of Service.
|
||||
|
||||
Modifications and additions by OpenClaude contributors are offered under
|
||||
the MIT License where legally permissible:
|
||||
|
||||
MIT License
|
||||
Copyright (c) 2026 OpenClaude contributors (modifications only)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of the modifications made by OpenClaude contributors, to deal
|
||||
in those modifications without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the modifications.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
|
||||
|
||||
The underlying derived code remains subject to Anthropic's copyright.
|
||||
This project does not have Anthropic's authorization to distribute
|
||||
their proprietary source. Users and contributors should evaluate their
|
||||
own legal position.
|
||||
@@ -132,7 +132,7 @@ Cause:
|
||||
Fix:
|
||||
|
||||
```powershell
|
||||
cd C:\Users\Lucas Pedry\Documents\openclaude\openclaude
|
||||
cd <PATH>
|
||||
bun run dev:profile
|
||||
```
|
||||
|
||||
@@ -189,7 +189,7 @@ Or pick a local Ollama profile automatically by goal:
|
||||
bun run profile:init -- --provider ollama --goal balanced
|
||||
```
|
||||
|
||||
## 6.5 Placeholder key (`SUA_CHAVE`) error
|
||||
## 6.5 Placeholder key (`YOUR_KEY`) error
|
||||
|
||||
Cause:
|
||||
|
||||
|
||||
580
README.md
580
README.md
@@ -1,377 +1,363 @@
|
||||
# OpenClaude
|
||||
|
||||
Use Claude Code with **any LLM** — not just Claude.
|
||||
OpenClaude is an open-source coding-agent CLI for cloud and local model providers.
|
||||
|
||||
OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API. It now also supports the ChatGPT Codex backend for `codexplan` and `codexspark`.
|
||||
Use OpenAI-compatible APIs, Gemini, GitHub Models, Codex OAuth, Codex, Ollama, Atomic Chat, and other supported backends while keeping one terminal-first workflow: prompts, tools, agents, MCP, slash commands, and streaming output.
|
||||
|
||||
All of Claude Code's tools work — bash, file read/write/edit, grep, glob, agents, tasks, MCP — just powered by whatever model you choose.
|
||||
[](https://github.com/Gitlawb/openclaude/actions/workflows/pr-checks.yml)
|
||||
[](https://github.com/Gitlawb/openclaude/tags)
|
||||
[](https://github.com/Gitlawb/openclaude/discussions)
|
||||
[](SECURITY.md)
|
||||
[](LICENSE)
|
||||
|
||||
---
|
||||
OpenClaude is also mirrored to GitLawb:
|
||||
[gitlawb.com/node/repos/z6MkqDnb/openclaude](https://gitlawb.com/node/repos/z6MkqDnb/openclaude)
|
||||
|
||||
## Install
|
||||
[Quick Start](#quick-start) | [Setup Guides](#setup-guides) | [Providers](#supported-providers) | [Source Build](#source-build-and-local-development) | [VS Code Extension](#vs-code-extension) | [Sponsors](#sponsors) | [Community](#community)
|
||||
|
||||
### Option A: npm (recommended)
|
||||
## Sponsors
|
||||
|
||||
<p align="center">
|
||||
<a href="https://gitlawb.com">
|
||||
<img src="https://gitlawb.com/logo.png" alt="GitLawb logo" width="96">
|
||||
</a>
|
||||
|
||||
<a href="https://bankr.bot">
|
||||
<img src="https://bankr.bot/favicon.svg" alt="Bankr.bot logo" width="96">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://gitlawb.com"><strong>GitLawb</strong></a>
|
||||
|
||||
<a href="https://bankr.bot"><strong>Bankr.bot</strong></a>
|
||||
</p>
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://www.star-history.com/?repos=gitlawb%2Fopenclaude&type=date&legend=top-left)
|
||||
|
||||
## Why OpenClaude
|
||||
|
||||
- Use one CLI across cloud APIs and local model backends
|
||||
- Save provider profiles inside the app with `/provider`
|
||||
- Run with OpenAI-compatible services, Gemini, GitHub Models, Codex OAuth, Codex, Ollama, Atomic Chat, and other supported providers
|
||||
- Keep coding-agent workflows in one place: bash, file tools, grep, glob, agents, tasks, MCP, and web tools
|
||||
- Use the bundled VS Code extension for launch integration and theme support
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Install
|
||||
|
||||
```bash
|
||||
npm install -g @gitlawb/openclaude
|
||||
```
|
||||
|
||||
### Option B: From source (requires Bun)
|
||||
If the install later reports `ripgrep not found`, install ripgrep system-wide and confirm `rg --version` works in the same terminal before starting OpenClaude.
|
||||
|
||||
Use Bun `1.3.11` or newer for source builds on Windows. Older Bun versions such as `1.3.4` can fail with a large batch of unresolved module errors during `bun run build`.
|
||||
### Start
|
||||
|
||||
```bash
|
||||
# Clone from gitlawb
|
||||
git clone https://node.gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude.git
|
||||
cd openclaude
|
||||
|
||||
# Install dependencies
|
||||
bun install
|
||||
|
||||
# Build
|
||||
bun run build
|
||||
|
||||
# Link globally (optional)
|
||||
npm link
|
||||
openclaude
|
||||
```
|
||||
|
||||
### Option C: Run directly with Bun (no build step)
|
||||
Inside OpenClaude:
|
||||
|
||||
```bash
|
||||
git clone https://node.gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude.git
|
||||
cd openclaude
|
||||
bun install
|
||||
bun run dev
|
||||
```
|
||||
- run `/provider` for guided provider setup and saved profiles
|
||||
- run `/onboard-github` for GitHub Models onboarding
|
||||
|
||||
---
|
||||
### Fastest OpenAI setup
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Set 3 environment variables
|
||||
macOS / Linux:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-your-key-here
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
```
|
||||
|
||||
### 2. Run it
|
||||
|
||||
```bash
|
||||
# If installed via npm
|
||||
openclaude
|
||||
|
||||
# If built from source
|
||||
bun run dev
|
||||
# or after build:
|
||||
node dist/cli.mjs
|
||||
```
|
||||
|
||||
That's it. The tool system, streaming, file editing, multi-step reasoning — everything works through the model you picked.
|
||||
|
||||
The npm package name is `@gitlawb/openclaude`, but the installed CLI command is still `openclaude`.
|
||||
|
||||
---
|
||||
|
||||
## Provider Examples
|
||||
|
||||
### OpenAI
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-...
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
```
|
||||
|
||||
### Codex via ChatGPT auth
|
||||
|
||||
`codexplan` maps to GPT-5.4 on the Codex backend with high reasoning.
|
||||
`codexspark` maps to GPT-5.3 Codex Spark for faster loops.
|
||||
|
||||
If you already use the Codex CLI, OpenClaude will read `~/.codex/auth.json`
|
||||
automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or
|
||||
override the token directly with `CODEX_API_KEY`.
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_MODEL=codexplan
|
||||
|
||||
# optional if you do not already have ~/.codex/auth.json
|
||||
export CODEX_API_KEY=...
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
### DeepSeek
|
||||
Windows PowerShell:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-...
|
||||
export OPENAI_BASE_URL=https://api.deepseek.com/v1
|
||||
export OPENAI_MODEL=deepseek-chat
|
||||
```powershell
|
||||
$env:CLAUDE_CODE_USE_OPENAI="1"
|
||||
$env:OPENAI_API_KEY="sk-your-key-here"
|
||||
$env:OPENAI_MODEL="gpt-4o"
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
### Google Gemini (via OpenRouter)
|
||||
### Fastest local Ollama setup
|
||||
|
||||
macOS / Linux:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-or-...
|
||||
export OPENAI_BASE_URL=https://openrouter.ai/api/v1
|
||||
export OPENAI_MODEL=google/gemini-2.0-flash
|
||||
```
|
||||
|
||||
### Ollama (local, free)
|
||||
|
||||
```bash
|
||||
ollama pull llama3.3:70b
|
||||
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:11434/v1
|
||||
export OPENAI_MODEL=llama3.3:70b
|
||||
# no API key needed for local models
|
||||
export OPENAI_MODEL=qwen2.5-coder:7b
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
### LM Studio (local)
|
||||
Windows PowerShell:
|
||||
|
||||
```powershell
|
||||
$env:CLAUDE_CODE_USE_OPENAI="1"
|
||||
$env:OPENAI_BASE_URL="http://localhost:11434/v1"
|
||||
$env:OPENAI_MODEL="qwen2.5-coder:7b"
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
### Using Ollama's launch command
|
||||
|
||||
If you have [Ollama](https://ollama.com) installed, you can skip the env var setup entirely:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:1234/v1
|
||||
export OPENAI_MODEL=your-model-name
|
||||
ollama launch openclaude --model qwen2.5-coder:7b
|
||||
```
|
||||
|
||||
### Together AI
|
||||
This automatically sets `ANTHROPIC_BASE_URL`, model routing, and auth so all API traffic goes through your local Ollama instance. Works with any model you have pulled — local or cloud.
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=...
|
||||
export OPENAI_BASE_URL=https://api.together.xyz/v1
|
||||
export OPENAI_MODEL=meta-llama/Llama-3.3-70B-Instruct-Turbo
|
||||
```
|
||||
## Setup Guides
|
||||
|
||||
### Groq
|
||||
Beginner-friendly guides:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=gsk_...
|
||||
export OPENAI_BASE_URL=https://api.groq.com/openai/v1
|
||||
export OPENAI_MODEL=llama-3.3-70b-versatile
|
||||
```
|
||||
- [Non-Technical Setup](docs/non-technical-setup.md)
|
||||
- [Windows Quick Start](docs/quick-start-windows.md)
|
||||
- [macOS / Linux Quick Start](docs/quick-start-mac-linux.md)
|
||||
|
||||
### Mistral
|
||||
Advanced and source-build guides:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=...
|
||||
export OPENAI_BASE_URL=https://api.mistral.ai/v1
|
||||
export OPENAI_MODEL=mistral-large-latest
|
||||
```
|
||||
- [Advanced Setup](docs/advanced-setup.md)
|
||||
- [Android Install](ANDROID_INSTALL.md)
|
||||
|
||||
### Azure OpenAI
|
||||
## Supported Providers
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=your-azure-key
|
||||
export OPENAI_BASE_URL=https://your-resource.openai.azure.com/openai/deployments/your-deployment/v1
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `CLAUDE_CODE_USE_OPENAI` | Yes | Set to `1` to enable the OpenAI provider |
|
||||
| `OPENAI_API_KEY` | Yes* | Your API key (*not needed for local models like Ollama) |
|
||||
| `OPENAI_MODEL` | Yes | Model name (e.g. `gpt-4o`, `deepseek-chat`, `llama3.3:70b`) |
|
||||
| `OPENAI_BASE_URL` | No | API endpoint (defaults to `https://api.openai.com/v1`) |
|
||||
| `CODEX_API_KEY` | Codex only | Codex/ChatGPT access token override |
|
||||
| `CODEX_AUTH_JSON_PATH` | Codex only | Path to a Codex CLI `auth.json` file |
|
||||
| `CODEX_HOME` | Codex only | Alternative Codex home directory (`auth.json` will be read from here) |
|
||||
| `OPENCLAUDE_DISABLE_CO_AUTHORED_BY` | No | Set to `1` to suppress the default `Co-Authored-By` trailer in generated git commit messages |
|
||||
|
||||
You can also use `ANTHROPIC_MODEL` to override the model name. `OPENAI_MODEL` takes priority.
|
||||
|
||||
OpenClaude PR bodies use OpenClaude branding by default. `OPENCLAUDE_DISABLE_CO_AUTHORED_BY` only affects the commit trailer, not PR attribution text.
|
||||
|
||||
---
|
||||
|
||||
## Runtime Hardening
|
||||
|
||||
Use these commands to keep the CLI stable and catch environment mistakes early:
|
||||
|
||||
```bash
|
||||
# quick startup sanity check
|
||||
bun run smoke
|
||||
|
||||
# validate provider env + reachability
|
||||
bun run doctor:runtime
|
||||
|
||||
# print machine-readable runtime diagnostics
|
||||
bun run doctor:runtime:json
|
||||
|
||||
# persist a diagnostics report to reports/doctor-runtime.json
|
||||
bun run doctor:report
|
||||
|
||||
# full local hardening check (smoke + runtime doctor)
|
||||
bun run hardening:check
|
||||
|
||||
# strict hardening (includes project-wide typecheck)
|
||||
bun run hardening:strict
|
||||
```
|
||||
|
||||
Notes:
|
||||
- `doctor:runtime` fails fast if `CLAUDE_CODE_USE_OPENAI=1` with a placeholder key (`SUA_CHAVE`) or a missing key for non-local providers.
|
||||
- Local providers (for example `http://localhost:11434/v1`) can run without `OPENAI_API_KEY`.
|
||||
- Codex profiles validate `CODEX_API_KEY` or the Codex CLI auth file and probe `POST /responses` instead of `GET /models`.
|
||||
|
||||
### Provider Launch Profiles
|
||||
|
||||
Use profile launchers to avoid repeated environment setup:
|
||||
|
||||
```bash
|
||||
# one-time profile bootstrap (prefer viable local Ollama, otherwise OpenAI)
|
||||
bun run profile:init
|
||||
|
||||
# preview the best provider/model for your goal
|
||||
bun run profile:recommend -- --goal coding --benchmark
|
||||
|
||||
# auto-apply the best available local/openai provider/model for your goal
|
||||
bun run profile:auto -- --goal latency
|
||||
|
||||
# codex bootstrap (defaults to codexplan and ~/.codex/auth.json)
|
||||
bun run profile:codex
|
||||
|
||||
# openai bootstrap with explicit key
|
||||
bun run profile:init -- --provider openai --api-key sk-...
|
||||
|
||||
# ollama bootstrap with custom model
|
||||
bun run profile:init -- --provider ollama --model llama3.1:8b
|
||||
|
||||
# ollama bootstrap with intelligent model auto-selection
|
||||
bun run profile:init -- --provider ollama --goal coding
|
||||
|
||||
# codex bootstrap with a fast model alias
|
||||
bun run profile:init -- --provider codex --model codexspark
|
||||
|
||||
# launch using persisted profile (.openclaude-profile.json)
|
||||
bun run dev:profile
|
||||
|
||||
# codex profile (uses CODEX_API_KEY or ~/.codex/auth.json)
|
||||
bun run dev:codex
|
||||
|
||||
# OpenAI profile (requires OPENAI_API_KEY in your shell)
|
||||
bun run dev:openai
|
||||
|
||||
# Ollama profile (defaults: localhost:11434, llama3.1:8b)
|
||||
bun run dev:ollama
|
||||
```
|
||||
|
||||
`profile:recommend` ranks installed Ollama models for `latency`, `balanced`, or `coding`, and `profile:auto` can persist the recommendation directly.
|
||||
If no profile exists yet, `dev:profile` now uses the same goal-aware defaults when picking the initial model.
|
||||
|
||||
Use `--provider ollama` when you want a local-only path. Auto mode falls back to OpenAI when no viable local chat model is installed.
|
||||
Goal-based Ollama selection only recommends among models that are already installed and reachable from Ollama.
|
||||
|
||||
Use `profile:codex` or `--provider codex` when you want the ChatGPT Codex backend.
|
||||
|
||||
`dev:openai`, `dev:ollama`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass.
|
||||
For `dev:ollama`, make sure Ollama is running locally before launch.
|
||||
|
||||
---
|
||||
| Provider | Setup Path | Notes |
|
||||
| --- | --- | --- |
|
||||
| OpenAI-compatible | `/provider` or env vars | Works with OpenAI, OpenRouter, DeepSeek, Groq, Mistral, LM Studio, and other compatible `/v1` servers |
|
||||
| Gemini | `/provider` or env vars | Supports API key, access token, or local ADC workflow on current `main` |
|
||||
| GitHub Models | `/onboard-github` | Interactive onboarding with saved credentials |
|
||||
| Codex OAuth | `/provider` | Opens ChatGPT sign-in in your browser and stores Codex credentials securely |
|
||||
| Codex | `/provider` | Uses existing Codex CLI auth, OpenClaude secure storage, or env credentials |
|
||||
| Ollama | `/provider`, env vars, or `ollama launch` | Local inference with no API key |
|
||||
| Atomic Chat | `/provider`, env vars, or `bun run dev:atomic-chat` | Local Model Provider; auto-detects loaded models |
|
||||
| Bedrock / Vertex / Foundry | env vars | Additional provider integrations for supported environments |
|
||||
|
||||
## What Works
|
||||
|
||||
- **All tools**: Bash, FileRead, FileWrite, FileEdit, Glob, Grep, WebFetch, WebSearch, Agent, MCP, LSP, NotebookEdit, Tasks
|
||||
- **Streaming**: Real-time token streaming
|
||||
- **Tool calling**: Multi-step tool chains (the model calls tools, gets results, continues)
|
||||
- **Images**: Base64 and URL images passed to vision models
|
||||
- **Slash commands**: /commit, /review, /compact, /diff, /doctor, etc.
|
||||
- **Sub-agents**: AgentTool spawns sub-agents using the same provider
|
||||
- **Memory**: Persistent memory system
|
||||
- **Tool-driven coding workflows**: Bash, file read/write/edit, grep, glob, agents, tasks, MCP, and slash commands
|
||||
- **Streaming responses**: Real-time token output and tool progress
|
||||
- **Tool calling**: Multi-step tool loops with model calls, tool execution, and follow-up responses
|
||||
- **Images**: URL and base64 image inputs for providers that support vision
|
||||
- **Provider profiles**: Guided setup plus saved `.openclaude-profile.json` support
|
||||
- **Local and remote model backends**: Cloud APIs, local servers, and Apple Silicon local inference
|
||||
|
||||
## What's Different
|
||||
## Provider Notes
|
||||
|
||||
- **No thinking mode**: Anthropic's extended thinking is disabled (OpenAI models use different reasoning)
|
||||
- **No prompt caching**: Anthropic-specific cache headers are skipped
|
||||
- **No beta features**: Anthropic-specific beta headers are ignored
|
||||
- **Token limits**: Defaults to 32K max output — some models may cap lower, which is handled gracefully
|
||||
OpenClaude supports multiple providers, but behavior is not identical across all of them.
|
||||
|
||||
---
|
||||
- Anthropic-specific features may not exist on other providers
|
||||
- Tool quality depends heavily on the selected model
|
||||
- Smaller local models can struggle with long multi-step tool flows
|
||||
- Some providers impose lower output caps than the CLI defaults, and OpenClaude adapts where possible
|
||||
|
||||
## How It Works
|
||||
For best results, use models with strong tool/function calling support.
|
||||
|
||||
The shim (`src/services/api/openaiShim.ts`) sits between Claude Code and the LLM API:
|
||||
## Agent Routing
|
||||
|
||||
```
|
||||
Claude Code Tool System
|
||||
|
|
||||
v
|
||||
Anthropic SDK interface (duck-typed)
|
||||
|
|
||||
v
|
||||
openaiShim.ts <-- translates formats
|
||||
|
|
||||
v
|
||||
OpenAI Chat Completions API
|
||||
|
|
||||
v
|
||||
Any compatible model
|
||||
OpenClaude can route different agents to different models through settings-based routing. This is useful for cost optimization or splitting work by model strength.
|
||||
|
||||
Add to `~/.openclaude.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"agentModels": {
|
||||
"deepseek-v4-flash": {
|
||||
"base_url": "https://api.deepseek.com/v1",
|
||||
"api_key": "sk-your-key"
|
||||
},
|
||||
"gpt-4o": {
|
||||
"base_url": "https://api.openai.com/v1",
|
||||
"api_key": "sk-your-key"
|
||||
}
|
||||
},
|
||||
"agentRouting": {
|
||||
"Explore": "deepseek-v4-flash",
|
||||
"Plan": "gpt-4o",
|
||||
"general-purpose": "gpt-4o",
|
||||
"frontend-dev": "deepseek-v4-flash",
|
||||
"default": "gpt-4o"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
It translates:
|
||||
- Anthropic message blocks → OpenAI messages
|
||||
- Anthropic tool_use/tool_result → OpenAI function calls
|
||||
- OpenAI SSE streaming → Anthropic stream events
|
||||
- Anthropic system prompt arrays → OpenAI system messages
|
||||
When no routing match is found, the global provider remains the fallback.
|
||||
|
||||
The rest of Claude Code doesn't know it's talking to a different model.
|
||||
> **Note:** `api_key` values in `settings.json` are stored in plaintext. Keep this file private and do not commit it to version control.
|
||||
|
||||
---
|
||||
## Web Search and Fetch
|
||||
|
||||
## Model Quality Notes
|
||||
By default, `WebSearch` works on non-Anthropic models using DuckDuckGo. This gives GPT-4o, DeepSeek, Gemini, Ollama, and other OpenAI-compatible providers a free web search path out of the box.
|
||||
|
||||
Not all models are equal at agentic tool use. Here's a rough guide:
|
||||
> **Note:** DuckDuckGo fallback works by scraping search results and may be rate-limited, blocked, or subject to DuckDuckGo's Terms of Service. If you want a more reliable supported option, configure Firecrawl.
|
||||
|
||||
| Model | Tool Calling | Code Quality | Speed |
|
||||
|-------|-------------|-------------|-------|
|
||||
| GPT-4o | Excellent | Excellent | Fast |
|
||||
| DeepSeek-V3 | Great | Great | Fast |
|
||||
| Gemini 2.0 Flash | Great | Good | Very Fast |
|
||||
| Llama 3.3 70B | Good | Good | Medium |
|
||||
| Mistral Large | Good | Good | Fast |
|
||||
| GPT-4o-mini | Good | Good | Very Fast |
|
||||
| Qwen 2.5 72B | Good | Good | Medium |
|
||||
| Smaller models (<7B) | Limited | Limited | Very Fast |
|
||||
For Anthropic-native backends and Codex responses, OpenClaude keeps the native provider web search behavior.
|
||||
|
||||
For best results, use models with strong function/tool calling support.
|
||||
`WebFetch` works, but its basic HTTP plus HTML-to-markdown path can still fail on JavaScript-rendered sites or sites that block plain HTTP requests.
|
||||
|
||||
---
|
||||
Set a [Firecrawl](https://firecrawl.dev) API key if you want Firecrawl-powered search/fetch behavior:
|
||||
|
||||
## Files Changed from Original
|
||||
|
||||
```
|
||||
src/services/api/openaiShim.ts — NEW: OpenAI-compatible API shim (724 lines)
|
||||
src/services/api/client.ts — Routes to shim when CLAUDE_CODE_USE_OPENAI=1
|
||||
src/utils/model/providers.ts — Added 'openai' provider type
|
||||
src/utils/model/configs.ts — Added openai model mappings
|
||||
src/utils/model/model.ts — Respects OPENAI_MODEL for defaults
|
||||
src/utils/auth.ts — Recognizes OpenAI as valid 3P provider
|
||||
```bash
|
||||
export FIRECRAWL_API_KEY=your-key-here
|
||||
```
|
||||
|
||||
6 files changed. 786 lines added. Zero dependencies added.
|
||||
With Firecrawl enabled:
|
||||
|
||||
- `WebSearch` can use Firecrawl's search API while DuckDuckGo remains the default free path for non-Claude models
|
||||
- `WebFetch` uses Firecrawl's scrape endpoint instead of raw HTTP, handling JS-rendered pages correctly
|
||||
|
||||
Free tier at [firecrawl.dev](https://firecrawl.dev) includes 500 credits. The key is optional.
|
||||
|
||||
---
|
||||
|
||||
## Origin
|
||||
## Headless gRPC Server
|
||||
|
||||
This is a fork of [instructkr/claude-code](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code), which mirrored the Claude Code source snapshot that became publicly accessible through an npm source map exposure on March 31, 2026.
|
||||
OpenClaude can be run as a headless gRPC service, allowing you to integrate its agentic capabilities (tools, bash, file editing) into other applications, CI/CD pipelines, or custom user interfaces. The server uses bidirectional streaming to send real-time text chunks, tool calls, and request permissions for sensitive commands.
|
||||
|
||||
The original Claude Code source is the property of Anthropic. This repository is not affiliated with or endorsed by Anthropic.
|
||||
### 1. Start the gRPC Server
|
||||
|
||||
Start the core engine as a gRPC service on `localhost:50051`:
|
||||
|
||||
```bash
|
||||
npm run dev:grpc
|
||||
```
|
||||
|
||||
#### Configuration
|
||||
|
||||
| Variable | Default | Description |
|
||||
|-----------|-------------|------------------------------------------------|
|
||||
| `GRPC_PORT` | `50051` | Port the gRPC server listens on |
|
||||
| `GRPC_HOST` | `localhost` | Bind address. Use `0.0.0.0` to expose on all interfaces (not recommended without authentication) |
|
||||
|
||||
### 2. Run the Test CLI Client
|
||||
|
||||
We provide a lightweight CLI client that communicates exclusively over gRPC. It acts just like the main interactive CLI, rendering colors, streaming tokens, and prompting you for tool permissions (y/n) via the gRPC `action_required` event.
|
||||
|
||||
In a separate terminal, run:
|
||||
|
||||
```bash
|
||||
npm run dev:grpc:cli
|
||||
```
|
||||
|
||||
*Note: The gRPC definitions are located in `src/proto/openclaude.proto`. You can use this file to generate clients in Python, Go, Rust, or any other language.*
|
||||
|
||||
---
|
||||
|
||||
## Source Build And Local Development
|
||||
|
||||
```bash
|
||||
bun install
|
||||
bun run build
|
||||
node dist/cli.mjs
|
||||
```
|
||||
|
||||
Helpful commands:
|
||||
|
||||
- `bun run dev`
|
||||
- `bun test`
|
||||
- `bun run test:coverage`
|
||||
- `bun run security:pr-scan -- --base origin/main`
|
||||
- `bun run smoke`
|
||||
- `bun run doctor:runtime`
|
||||
- `bun run verify:privacy`
|
||||
- focused `bun test ...` runs for the areas you touch
|
||||
|
||||
## Testing And Coverage
|
||||
|
||||
OpenClaude uses Bun's built-in test runner for unit tests.
|
||||
|
||||
Run the full unit suite:
|
||||
|
||||
```bash
|
||||
bun test
|
||||
```
|
||||
|
||||
Generate unit test coverage:
|
||||
|
||||
```bash
|
||||
bun run test:coverage
|
||||
```
|
||||
|
||||
Open the visual coverage report:
|
||||
|
||||
```bash
|
||||
open coverage/index.html
|
||||
```
|
||||
|
||||
If you already have `coverage/lcov.info` and only want to rebuild the UI:
|
||||
|
||||
```bash
|
||||
bun run test:coverage:ui
|
||||
```
|
||||
|
||||
Use focused test runs when you only touch one area:
|
||||
|
||||
- `bun run test:provider`
|
||||
- `bun run test:provider-recommendation`
|
||||
- `bun test path/to/file.test.ts`
|
||||
|
||||
Recommended contributor validation before opening a PR:
|
||||
|
||||
- `bun run build`
|
||||
- `bun run smoke`
|
||||
- `bun run test:coverage` for broader unit coverage when your change affects shared runtime or provider logic
|
||||
- focused `bun test ...` runs for the files and flows you changed
|
||||
|
||||
Coverage output is written to `coverage/lcov.info`, and OpenClaude also generates a git-activity-style heatmap at `coverage/index.html`.
|
||||
## Repository Structure
|
||||
|
||||
- `src/` - core CLI/runtime
|
||||
- `scripts/` - build, verification, and maintenance scripts
|
||||
- `docs/` - setup, contributor, and project documentation
|
||||
- `python/` - standalone Python helpers and their tests
|
||||
- `vscode-extension/openclaude-vscode/` - VS Code extension
|
||||
- `.github/` - repo automation, templates, and CI configuration
|
||||
- `bin/` - CLI launcher entrypoints
|
||||
|
||||
## VS Code Extension
|
||||
|
||||
The repo includes a VS Code extension in [`vscode-extension/openclaude-vscode`](vscode-extension/openclaude-vscode) for OpenClaude launch integration, provider-aware control-center UI, and theme support.
|
||||
|
||||
## Security
|
||||
|
||||
If you believe you found a security issue, see [SECURITY.md](SECURITY.md).
|
||||
|
||||
## Community
|
||||
|
||||
- Use [GitHub Discussions](https://github.com/Gitlawb/openclaude/discussions) for Q&A, ideas, and community conversation
|
||||
- Use [GitHub Issues](https://github.com/Gitlawb/openclaude/issues) for confirmed bugs and actionable feature work
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome.
|
||||
|
||||
For larger changes, open an issue first so the scope is clear before implementation. Helpful validation commands include:
|
||||
|
||||
- `bun run build`
|
||||
- `bun run test:coverage`
|
||||
- `bun run smoke`
|
||||
- focused `bun test ...` runs for files and flows you changed
|
||||
|
||||
|
||||
## Disclaimer
|
||||
|
||||
OpenClaude is an independent community project and is not affiliated with, endorsed by, or sponsored by Anthropic.
|
||||
|
||||
OpenClaude originated from the Claude Code codebase and has since been substantially modified to support multiple providers and open use. "Claude" and "Claude Code" are trademarks of Anthropic PBC. See [LICENSE](LICENSE) for details.
|
||||
|
||||
## License
|
||||
|
||||
This repository is provided for educational and research purposes. The original source code is subject to Anthropic's terms. The OpenAI shim additions are public domain.
|
||||
See [LICENSE](LICENSE).
|
||||
|
||||
69
SECURITY.md
Normal file
69
SECURITY.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Open Claude is currently maintained on the latest `main` branch and the latest
|
||||
npm release only.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | --------- |
|
||||
| Latest release | :white_check_mark: |
|
||||
| Older releases | :x: |
|
||||
| Unreleased forks / modified builds | :x: |
|
||||
|
||||
Security fixes are generally released in the next patch version and may also be
|
||||
landed directly on `main` before a package release is published.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe you have found a security vulnerability in Open Claude, please
|
||||
report it privately.
|
||||
|
||||
Preferred reporting channel:
|
||||
|
||||
- GitHub Security Advisories / private vulnerability reporting for this
|
||||
repository
|
||||
|
||||
Please include:
|
||||
|
||||
- a clear description of the issue
|
||||
- affected version, commit, or environment
|
||||
- reproduction steps or a proof of concept
|
||||
- impact assessment
|
||||
- any suggested remediation, if available
|
||||
|
||||
Please do **not** open a public issue for an unpatched vulnerability.
|
||||
|
||||
## Response Process
|
||||
|
||||
Our general goals are:
|
||||
|
||||
- initial triage acknowledgment within 7 days
|
||||
- follow-up after validation when we can reproduce the issue
|
||||
- coordinated disclosure after a fix is available
|
||||
|
||||
Severity, exploitability, and maintenance bandwidth may affect timelines.
|
||||
|
||||
## Disclosure and CVEs
|
||||
|
||||
Valid reports may be fixed privately first and disclosed after a patch is
|
||||
available.
|
||||
|
||||
If a report is accepted and the issue is significant enough to warrant formal
|
||||
tracking, we may publish a GitHub Security Advisory and request or assign a CVE
|
||||
through the appropriate channel. CVE issuance is not guaranteed for every
|
||||
report.
|
||||
|
||||
## Scope
|
||||
|
||||
This policy applies to:
|
||||
|
||||
- the Open Claude source code in this repository
|
||||
- official release artifacts published from this repository
|
||||
- the `@gitlawb/openclaude` npm package
|
||||
|
||||
This policy does not cover:
|
||||
|
||||
- third-party model providers, endpoints, or hosted services
|
||||
- local misconfiguration on the reporter's machine
|
||||
- vulnerabilities in unofficial forks, mirrors, or downstream repackages
|
||||
@@ -1,7 +1,13 @@
|
||||
import { join } from 'path'
|
||||
import { join, win32 } from 'path'
|
||||
import { pathToFileURL } from 'url'
|
||||
|
||||
export function getDistImportSpecifier(baseDir) {
|
||||
const distPath = join(baseDir, '..', 'dist', 'cli.mjs')
|
||||
if (/^[A-Za-z]:\\/.test(baseDir)) {
|
||||
const distPath = win32.join(baseDir, '..', 'dist', 'cli.mjs')
|
||||
return `file:///${distPath.replace(/\\/g, '/')}`
|
||||
}
|
||||
|
||||
const joinImpl = join
|
||||
const distPath = joinImpl(baseDir, '..', 'dist', 'cli.mjs')
|
||||
return pathToFileURL(distPath).href
|
||||
}
|
||||
|
||||
373
bun.lock
373
bun.lock
@@ -5,85 +5,96 @@
|
||||
"": {
|
||||
"name": "openclaude",
|
||||
"dependencies": {
|
||||
"@alcalzone/ansi-tokenize": "^0.3.0",
|
||||
"@anthropic-ai/bedrock-sdk": "^0.26.0",
|
||||
"@anthropic-ai/foundry-sdk": "^0.2.0",
|
||||
"@anthropic-ai/sandbox-runtime": "^0.0.46",
|
||||
"@anthropic-ai/sdk": "^0.81.0",
|
||||
"@anthropic-ai/vertex-sdk": "^0.14.0",
|
||||
"@commander-js/extra-typings": "^12.0.0",
|
||||
"@growthbook/growthbook": "^1.3.0",
|
||||
"@modelcontextprotocol/sdk": "^1.12.0",
|
||||
"@opentelemetry/api": "^1.9.1",
|
||||
"@opentelemetry/api-logs": "^0.214.0",
|
||||
"@opentelemetry/core": "^2.6.1",
|
||||
"@opentelemetry/exporter-logs-otlp-http": "^0.214.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "^0.57.0",
|
||||
"@opentelemetry/resources": "^2.6.1",
|
||||
"@opentelemetry/sdk-logs": "^0.214.0",
|
||||
"@opentelemetry/sdk-metrics": "^2.6.1",
|
||||
"@opentelemetry/sdk-trace-base": "^2.6.1",
|
||||
"@opentelemetry/sdk-trace-node": "^2.6.1",
|
||||
"@opentelemetry/semantic-conventions": "^1.40.0",
|
||||
"ajv": "^8.17.0",
|
||||
"auto-bind": "^5.0.1",
|
||||
"axios": "^1.14.0",
|
||||
"bidi-js": "^1.0.3",
|
||||
"chalk": "^5.4.0",
|
||||
"chokidar": "^4.0.0",
|
||||
"cli-boxes": "^3.0.0",
|
||||
"cli-highlight": "^2.1.0",
|
||||
"code-excerpt": "^4.0.0",
|
||||
"commander": "^12.0.0",
|
||||
"diff": "^7.0.0",
|
||||
"emoji-regex": "^10.4.0",
|
||||
"env-paths": "^3.0.0",
|
||||
"execa": "^9.5.0",
|
||||
"fflate": "^0.8.2",
|
||||
"figures": "^6.1.0",
|
||||
"fuse.js": "^7.1.0",
|
||||
"get-east-asian-width": "^1.3.0",
|
||||
"google-auth-library": "^9.15.0",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ignore": "^7.0.0",
|
||||
"indent-string": "^5.0.0",
|
||||
"jsonc-parser": "^3.3.1",
|
||||
"lodash-es": "^4.17.21",
|
||||
"lru-cache": "^11.0.0",
|
||||
"marked": "^15.0.0",
|
||||
"p-map": "^7.0.3",
|
||||
"picomatch": "^4.0.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"qrcode": "^1.5.4",
|
||||
"react": "^19.2.4",
|
||||
"react-compiler-runtime": "^1.0.0",
|
||||
"react-reconciler": "^0.33.0",
|
||||
"semver": "^7.6.3",
|
||||
"shell-quote": "^1.8.2",
|
||||
"signal-exit": "^4.1.0",
|
||||
"stack-utils": "^2.0.6",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"supports-hyperlinks": "^3.1.0",
|
||||
"tree-kill": "^1.2.2",
|
||||
"turndown": "^7.2.0",
|
||||
"type-fest": "^4.30.0",
|
||||
"undici": "^7.3.0",
|
||||
"usehooks-ts": "^3.1.1",
|
||||
"vscode-languageserver-protocol": "^3.17.5",
|
||||
"wrap-ansi": "^9.0.0",
|
||||
"ws": "^8.18.0",
|
||||
"xss": "^1.0.15",
|
||||
"yaml": "^2.7.0",
|
||||
"zod": "^3.24.0",
|
||||
"@alcalzone/ansi-tokenize": "0.3.0",
|
||||
"@anthropic-ai/bedrock-sdk": "0.26.4",
|
||||
"@anthropic-ai/foundry-sdk": "0.2.3",
|
||||
"@anthropic-ai/sandbox-runtime": "0.0.46",
|
||||
"@anthropic-ai/sdk": "0.81.0",
|
||||
"@anthropic-ai/vertex-sdk": "0.14.4",
|
||||
"@commander-js/extra-typings": "12.1.0",
|
||||
"@growthbook/growthbook": "1.6.5",
|
||||
"@grpc/grpc-js": "^1.14.3",
|
||||
"@grpc/proto-loader": "^0.8.0",
|
||||
"@mendable/firecrawl-js": "4.18.1",
|
||||
"@modelcontextprotocol/sdk": "1.29.0",
|
||||
"@opentelemetry/api": "1.9.1",
|
||||
"@opentelemetry/api-logs": "0.214.0",
|
||||
"@opentelemetry/core": "2.6.1",
|
||||
"@opentelemetry/exporter-logs-otlp-http": "0.214.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "0.57.2",
|
||||
"@opentelemetry/resources": "2.6.1",
|
||||
"@opentelemetry/sdk-logs": "0.214.0",
|
||||
"@opentelemetry/sdk-metrics": "2.6.1",
|
||||
"@opentelemetry/sdk-trace-base": "2.6.1",
|
||||
"@opentelemetry/sdk-trace-node": "2.6.1",
|
||||
"@opentelemetry/semantic-conventions": "1.40.0",
|
||||
"@vscode/ripgrep": "^1.17.1",
|
||||
"ajv": "8.18.0",
|
||||
"auto-bind": "5.0.1",
|
||||
"axios": "1.15.0",
|
||||
"bidi-js": "1.0.3",
|
||||
"chalk": "5.6.2",
|
||||
"chokidar": "4.0.3",
|
||||
"cli-boxes": "3.0.0",
|
||||
"cli-highlight": "2.1.11",
|
||||
"code-excerpt": "4.0.0",
|
||||
"commander": "12.1.0",
|
||||
"cross-spawn": "7.0.6",
|
||||
"diff": "8.0.3",
|
||||
"duck-duck-scrape": "^2.2.7",
|
||||
"emoji-regex": "10.6.0",
|
||||
"env-paths": "3.0.0",
|
||||
"execa": "9.6.1",
|
||||
"fflate": "0.8.2",
|
||||
"figures": "6.1.0",
|
||||
"fuse.js": "7.1.0",
|
||||
"get-east-asian-width": "1.5.0",
|
||||
"google-auth-library": "9.15.1",
|
||||
"https-proxy-agent": "7.0.6",
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.18.1",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
"picomatch": "4.0.4",
|
||||
"proper-lockfile": "4.1.2",
|
||||
"qrcode": "1.5.4",
|
||||
"react": "19.2.4",
|
||||
"react-compiler-runtime": "1.0.0",
|
||||
"react-reconciler": "0.33.0",
|
||||
"semver": "7.7.4",
|
||||
"sharp": "^0.34.5",
|
||||
"shell-quote": "1.8.3",
|
||||
"signal-exit": "4.1.0",
|
||||
"stack-utils": "2.0.6",
|
||||
"strip-ansi": "7.2.0",
|
||||
"supports-hyperlinks": "3.2.0",
|
||||
"tree-kill": "1.2.2",
|
||||
"turndown": "7.2.2",
|
||||
"type-fest": "4.41.0",
|
||||
"undici": "7.24.6",
|
||||
"usehooks-ts": "3.1.1",
|
||||
"vscode-languageserver-protocol": "3.17.5",
|
||||
"wrap-ansi": "9.0.2",
|
||||
"ws": "8.20.0",
|
||||
"xss": "1.0.15",
|
||||
"yaml": "2.8.3",
|
||||
"zod": "3.25.76",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.2.0",
|
||||
"@types/node": "^25.5.0",
|
||||
"@types/react": "^19.2.14",
|
||||
"typescript": "^5.7.0",
|
||||
"@types/bun": "1.3.11",
|
||||
"@types/node": "25.5.0",
|
||||
"@types/react": "19.2.14",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "5.9.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
"overrides": {
|
||||
"lodash-es": "4.18.1",
|
||||
},
|
||||
"packages": {
|
||||
"@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.3.0", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA=="],
|
||||
|
||||
@@ -175,6 +186,60 @@
|
||||
|
||||
"@commander-js/extra-typings": ["@commander-js/extra-typings@12.1.0", "", { "peerDependencies": { "commander": "~12.1.0" } }, "sha512-wf/lwQvWAA0goIghcb91dQYpkLBcyhOhQNqG/VgWhnKzgt+UOMvra7EX/2fv70arm5RW+PUHoQHHDa6/p77Eqg=="],
|
||||
|
||||
"@emnapi/runtime": ["@emnapi/runtime@1.9.2", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw=="],
|
||||
|
||||
"@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.7", "", { "os": "aix", "cpu": "ppc64" }, "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg=="],
|
||||
|
||||
"@esbuild/android-arm": ["@esbuild/android-arm@0.27.7", "", { "os": "android", "cpu": "arm" }, "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ=="],
|
||||
|
||||
"@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.7", "", { "os": "android", "cpu": "arm64" }, "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ=="],
|
||||
|
||||
"@esbuild/android-x64": ["@esbuild/android-x64@0.27.7", "", { "os": "android", "cpu": "x64" }, "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg=="],
|
||||
|
||||
"@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.7", "", { "os": "darwin", "cpu": "arm64" }, "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw=="],
|
||||
|
||||
"@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.7", "", { "os": "darwin", "cpu": "x64" }, "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ=="],
|
||||
|
||||
"@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.7", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w=="],
|
||||
|
||||
"@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.7", "", { "os": "freebsd", "cpu": "x64" }, "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ=="],
|
||||
|
||||
"@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.7", "", { "os": "linux", "cpu": "arm" }, "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA=="],
|
||||
|
||||
"@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A=="],
|
||||
|
||||
"@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.7", "", { "os": "linux", "cpu": "ia32" }, "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg=="],
|
||||
|
||||
"@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q=="],
|
||||
|
||||
"@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw=="],
|
||||
|
||||
"@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.7", "", { "os": "linux", "cpu": "ppc64" }, "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ=="],
|
||||
|
||||
"@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.7", "", { "os": "linux", "cpu": "none" }, "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ=="],
|
||||
|
||||
"@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.7", "", { "os": "linux", "cpu": "s390x" }, "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw=="],
|
||||
|
||||
"@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.7", "", { "os": "linux", "cpu": "x64" }, "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA=="],
|
||||
|
||||
"@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.7", "", { "os": "none", "cpu": "arm64" }, "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w=="],
|
||||
|
||||
"@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.7", "", { "os": "none", "cpu": "x64" }, "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw=="],
|
||||
|
||||
"@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.7", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A=="],
|
||||
|
||||
"@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.7", "", { "os": "openbsd", "cpu": "x64" }, "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg=="],
|
||||
|
||||
"@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.7", "", { "os": "none", "cpu": "arm64" }, "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw=="],
|
||||
|
||||
"@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.7", "", { "os": "sunos", "cpu": "x64" }, "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA=="],
|
||||
|
||||
"@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.7", "", { "os": "win32", "cpu": "arm64" }, "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA=="],
|
||||
|
||||
"@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.7", "", { "os": "win32", "cpu": "ia32" }, "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw=="],
|
||||
|
||||
"@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.7", "", { "os": "win32", "cpu": "x64" }, "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg=="],
|
||||
|
||||
"@growthbook/growthbook": ["@growthbook/growthbook@1.6.5", "", { "dependencies": { "dom-mutator": "^0.6.0" } }, "sha512-mUaMsgeUTpRIUOTn33EUXHRK6j7pxBjwqH4WpQyq+pukjd1AIzWlEa6w7i6bInJUcweGgP2beXZmaP6b6UPn7A=="],
|
||||
|
||||
"@grpc/grpc-js": ["@grpc/grpc-js@1.14.3", "", { "dependencies": { "@grpc/proto-loader": "^0.8.0", "@js-sdsl/ordered-map": "^4.4.2" } }, "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA=="],
|
||||
@@ -183,8 +248,60 @@
|
||||
|
||||
"@hono/node-server": ["@hono/node-server@1.19.12", "", { "peerDependencies": { "hono": "^4" } }, "sha512-txsUW4SQ1iilgE0l9/e9VQWmELXifEFvmdA1j6WFh/aFPj99hIntrSsq/if0UWyGVkmrRPKA1wCeP+UCr1B9Uw=="],
|
||||
|
||||
"@img/colour": ["@img/colour@1.1.0", "", {}, "sha512-Td76q7j57o/tLVdgS746cYARfSyxk8iEfRxewL9h4OMzYhbW4TAcppl0mT4eyqXddh6L/jwoM75mo7ixa/pCeQ=="],
|
||||
|
||||
"@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.2.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w=="],
|
||||
|
||||
"@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.2.4" }, "os": "darwin", "cpu": "x64" }, "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw=="],
|
||||
|
||||
"@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.2.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g=="],
|
||||
|
||||
"@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.2.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg=="],
|
||||
|
||||
"@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.2.4", "", { "os": "linux", "cpu": "arm" }, "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A=="],
|
||||
|
||||
"@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.2.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw=="],
|
||||
|
||||
"@img/sharp-libvips-linux-ppc64": ["@img/sharp-libvips-linux-ppc64@1.2.4", "", { "os": "linux", "cpu": "ppc64" }, "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA=="],
|
||||
|
||||
"@img/sharp-libvips-linux-riscv64": ["@img/sharp-libvips-linux-riscv64@1.2.4", "", { "os": "linux", "cpu": "none" }, "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA=="],
|
||||
|
||||
"@img/sharp-libvips-linux-s390x": ["@img/sharp-libvips-linux-s390x@1.2.4", "", { "os": "linux", "cpu": "s390x" }, "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ=="],
|
||||
|
||||
"@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.2.4", "", { "os": "linux", "cpu": "x64" }, "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw=="],
|
||||
|
||||
"@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.2.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw=="],
|
||||
|
||||
"@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.2.4", "", { "os": "linux", "cpu": "x64" }, "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg=="],
|
||||
|
||||
"@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.2.4" }, "os": "linux", "cpu": "arm" }, "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw=="],
|
||||
|
||||
"@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg=="],
|
||||
|
||||
"@img/sharp-linux-ppc64": ["@img/sharp-linux-ppc64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-ppc64": "1.2.4" }, "os": "linux", "cpu": "ppc64" }, "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA=="],
|
||||
|
||||
"@img/sharp-linux-riscv64": ["@img/sharp-linux-riscv64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-riscv64": "1.2.4" }, "os": "linux", "cpu": "none" }, "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw=="],
|
||||
|
||||
"@img/sharp-linux-s390x": ["@img/sharp-linux-s390x@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-s390x": "1.2.4" }, "os": "linux", "cpu": "s390x" }, "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg=="],
|
||||
|
||||
"@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ=="],
|
||||
|
||||
"@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg=="],
|
||||
|
||||
"@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q=="],
|
||||
|
||||
"@img/sharp-wasm32": ["@img/sharp-wasm32@0.34.5", "", { "dependencies": { "@emnapi/runtime": "^1.7.0" }, "cpu": "none" }, "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw=="],
|
||||
|
||||
"@img/sharp-win32-arm64": ["@img/sharp-win32-arm64@0.34.5", "", { "os": "win32", "cpu": "arm64" }, "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g=="],
|
||||
|
||||
"@img/sharp-win32-ia32": ["@img/sharp-win32-ia32@0.34.5", "", { "os": "win32", "cpu": "ia32" }, "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg=="],
|
||||
|
||||
"@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.34.5", "", { "os": "win32", "cpu": "x64" }, "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw=="],
|
||||
|
||||
"@js-sdsl/ordered-map": ["@js-sdsl/ordered-map@4.4.2", "", {}, "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw=="],
|
||||
|
||||
"@mendable/firecrawl-js": ["@mendable/firecrawl-js@4.18.1", "", { "dependencies": { "axios": "1.14.0", "firecrawl": "4.16.0", "typescript-event-target": "^1.1.1", "zod": "^3.23.8", "zod-to-json-schema": "^3.23.0" } }, "sha512-NfmJv+xcHoZthj8I3NP/8KAgO8EWcvOcTvCAvszxqs7/6sCs1CRss6Tum6RycZNSwJkr5RzQossN89IlixRfng=="],
|
||||
|
||||
"@mixmark-io/domino": ["@mixmark-io/domino@2.2.0", "", {}, "sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw=="],
|
||||
|
||||
"@modelcontextprotocol/sdk": ["@modelcontextprotocol/sdk@1.29.0", "", { "dependencies": { "@hono/node-server": "^1.19.9", "ajv": "^8.17.1", "ajv-formats": "^3.0.1", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "eventsource-parser": "^3.0.0", "express": "^5.2.1", "express-rate-limit": "^8.2.1", "hono": "^4.11.4", "jose": "^6.1.3", "json-schema-typed": "^8.0.2", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.25 || ^4.0", "zod-to-json-schema": "^3.25.1" }, "peerDependencies": { "@cfworker/json-schema": "^4.1.1" }, "optionalPeers": ["@cfworker/json-schema"] }, "sha512-zo37mZA9hJWpULgkRpowewez1y6ML5GsXJPY8FI0tBBCd77HEvza4jDqRKOXgHNn867PVGCyTdzqpz0izu5ZjQ=="],
|
||||
@@ -345,6 +462,8 @@
|
||||
|
||||
"@types/react": ["@types/react@19.2.14", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w=="],
|
||||
|
||||
"@vscode/ripgrep": ["@vscode/ripgrep@1.17.1", "", { "dependencies": { "https-proxy-agent": "^7.0.2", "proxy-from-env": "^1.1.0", "yauzl": "^2.9.2" } }, "sha512-xTs7DGyAO3IsJYOCTBP8LnTvPiYVKEuyv8s0xyJDBXfs8rhBfqnZPvb6xDT+RnwWzcXqW27xLS/aGrkjX7lNWw=="],
|
||||
|
||||
"accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="],
|
||||
|
||||
"agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="],
|
||||
@@ -363,7 +482,7 @@
|
||||
|
||||
"auto-bind": ["auto-bind@5.0.1", "", {}, "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg=="],
|
||||
|
||||
"axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
|
||||
"axios": ["axios@1.15.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-wWyJDlAatxk30ZJer+GeCWS209sA42X+N5jU2jy6oHTp7ufw8uzUTVFBX9+wTfAlhiJXGS0Bq7X6efruWjuK9Q=="],
|
||||
|
||||
"base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="],
|
||||
|
||||
@@ -375,6 +494,8 @@
|
||||
|
||||
"bowser": ["bowser@2.14.1", "", {}, "sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg=="],
|
||||
|
||||
"buffer-crc32": ["buffer-crc32@0.2.13", "", {}, "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ=="],
|
||||
|
||||
"buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="],
|
||||
|
||||
"bun-types": ["bun-types@1.3.11", "", { "dependencies": { "@types/node": "*" } }, "sha512-1KGPpoxQWl9f6wcZh57LvrPIInQMn2TQ7jsgxqpRzg+l0QPOFvJVH7HmvHo/AiPgwXy+/Thf6Ov3EdVn1vOabg=="],
|
||||
@@ -395,7 +516,7 @@
|
||||
|
||||
"cli-highlight": ["cli-highlight@2.1.11", "", { "dependencies": { "chalk": "^4.0.0", "highlight.js": "^10.7.1", "mz": "^2.4.0", "parse5": "^5.1.1", "parse5-htmlparser2-tree-adapter": "^6.0.0", "yargs": "^16.0.0" }, "bin": { "highlight": "bin/highlight" } }, "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg=="],
|
||||
|
||||
"cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="],
|
||||
"cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
|
||||
|
||||
"code-excerpt": ["code-excerpt@4.0.0", "", { "dependencies": { "convert-to-spaces": "^2.0.1" } }, "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA=="],
|
||||
|
||||
@@ -433,12 +554,16 @@
|
||||
|
||||
"depd": ["depd@2.0.0", "", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="],
|
||||
|
||||
"diff": ["diff@7.0.0", "", {}, "sha512-PJWHUb1RFevKCwaFA9RlG5tCd+FO5iRh9A8HEtkmBH2Li03iJriB6m6JIN4rGz3K3JLawI7/veA1xzRKP6ISBw=="],
|
||||
"detect-libc": ["detect-libc@2.1.2", "", {}, "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ=="],
|
||||
|
||||
"diff": ["diff@8.0.3", "", {}, "sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ=="],
|
||||
|
||||
"dijkstrajs": ["dijkstrajs@1.0.3", "", {}, "sha512-qiSlmBq9+BCdCA/L46dw8Uy93mloxsPSbwnm5yrKn2vMPiy8KyAskTF6zuV/j5BMsmOGZDPs7KjU+mjb670kfA=="],
|
||||
|
||||
"dom-mutator": ["dom-mutator@0.6.0", "", {}, "sha512-iCt9o0aYfXMUkz/43ZOAUFQYotjGB+GNbYJiJdz4TgXkyToXbbRy5S6FbTp72lRBtfpUMwEc1KmpFEU4CZeoNg=="],
|
||||
|
||||
"duck-duck-scrape": ["duck-duck-scrape@2.2.7", "", { "dependencies": { "html-entities": "^2.3.3", "needle": "^3.2.0" } }, "sha512-BEcglwnfx5puJl90KQfX+Q2q5vCguqyMpZcSRPBWk8OY55qWwV93+E+7DbIkrGDW4qkqPfUvtOUdi0lXz6lEMQ=="],
|
||||
|
||||
"dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="],
|
||||
|
||||
"ecdsa-sig-formatter": ["ecdsa-sig-formatter@1.0.11", "", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ=="],
|
||||
@@ -459,6 +584,8 @@
|
||||
|
||||
"es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="],
|
||||
|
||||
"esbuild": ["esbuild@0.27.7", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.7", "@esbuild/android-arm": "0.27.7", "@esbuild/android-arm64": "0.27.7", "@esbuild/android-x64": "0.27.7", "@esbuild/darwin-arm64": "0.27.7", "@esbuild/darwin-x64": "0.27.7", "@esbuild/freebsd-arm64": "0.27.7", "@esbuild/freebsd-x64": "0.27.7", "@esbuild/linux-arm": "0.27.7", "@esbuild/linux-arm64": "0.27.7", "@esbuild/linux-ia32": "0.27.7", "@esbuild/linux-loong64": "0.27.7", "@esbuild/linux-mips64el": "0.27.7", "@esbuild/linux-ppc64": "0.27.7", "@esbuild/linux-riscv64": "0.27.7", "@esbuild/linux-s390x": "0.27.7", "@esbuild/linux-x64": "0.27.7", "@esbuild/netbsd-arm64": "0.27.7", "@esbuild/netbsd-x64": "0.27.7", "@esbuild/openbsd-arm64": "0.27.7", "@esbuild/openbsd-x64": "0.27.7", "@esbuild/openharmony-arm64": "0.27.7", "@esbuild/sunos-x64": "0.27.7", "@esbuild/win32-arm64": "0.27.7", "@esbuild/win32-ia32": "0.27.7", "@esbuild/win32-x64": "0.27.7" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w=="],
|
||||
|
||||
"escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="],
|
||||
|
||||
"escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="],
|
||||
@@ -487,6 +614,8 @@
|
||||
|
||||
"fast-xml-parser": ["fast-xml-parser@5.5.8", "", { "dependencies": { "fast-xml-builder": "^1.1.4", "path-expression-matcher": "^1.2.0", "strnum": "^2.2.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-Z7Fh2nVQSb2d+poDViM063ix2ZGt9jmY1nWhPfHBOK2Hgnb/OW3P4Et3P/81SEej0J7QbWtJqxO05h8QYfK7LQ=="],
|
||||
|
||||
"fd-slicer": ["fd-slicer@1.1.0", "", { "dependencies": { "pend": "~1.2.0" } }, "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g=="],
|
||||
|
||||
"fflate": ["fflate@0.8.2", "", {}, "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A=="],
|
||||
|
||||
"figures": ["figures@6.1.0", "", { "dependencies": { "is-unicode-supported": "^2.0.0" } }, "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg=="],
|
||||
@@ -495,6 +624,8 @@
|
||||
|
||||
"find-up": ["find-up@4.1.0", "", { "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" } }, "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw=="],
|
||||
|
||||
"firecrawl": ["firecrawl@4.16.0", "", { "dependencies": { "axios": "^1.13.5", "typescript-event-target": "^1.1.1", "zod": "^3.23.8", "zod-to-json-schema": "^3.23.0" } }, "sha512-7SJ/FWhZBtW2gTCE/BsvU+gbfIpfTq+D9IH82l9MacauLVptaY6EdYAhrK3YSMC9yr5NxvxRcpZKcXG/nqjiiQ=="],
|
||||
|
||||
"follow-redirects": ["follow-redirects@1.15.11", "", {}, "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ=="],
|
||||
|
||||
"form-data": ["form-data@4.0.5", "", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", "hasown": "^2.0.2", "mime-types": "^2.1.12" } }, "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w=="],
|
||||
@@ -503,6 +634,8 @@
|
||||
|
||||
"fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="],
|
||||
|
||||
"fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
|
||||
|
||||
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
|
||||
|
||||
"fuse.js": ["fuse.js@7.1.0", "", {}, "sha512-trLf4SzuuUxfusZADLINj+dE8clK1frKdmqiJNb1Es75fmI5oY6X2mxLVUciLLjxqw/xr72Dhy+lER6dGd02FQ=="],
|
||||
@@ -521,6 +654,8 @@
|
||||
|
||||
"get-stream": ["get-stream@9.0.1", "", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="],
|
||||
|
||||
"get-tsconfig": ["get-tsconfig@4.13.7", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q=="],
|
||||
|
||||
"google-auth-library": ["google-auth-library@9.15.1", "", { "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", "gaxios": "^6.1.1", "gcp-metadata": "^6.1.0", "gtoken": "^7.0.0", "jws": "^4.0.0" } }, "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng=="],
|
||||
|
||||
"google-logging-utils": ["google-logging-utils@0.0.2", "", {}, "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ=="],
|
||||
@@ -543,6 +678,8 @@
|
||||
|
||||
"hono": ["hono@4.12.9", "", {}, "sha512-wy3T8Zm2bsEvxKZM5w21VdHDDcwVS1yUFFY6i8UobSsKfFceT7TOwhbhfKsDyx7tYQlmRM5FLpIuYvNFyjctiA=="],
|
||||
|
||||
"html-entities": ["html-entities@2.6.0", "", {}, "sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ=="],
|
||||
|
||||
"http-errors": ["http-errors@2.0.1", "", { "dependencies": { "depd": "~2.0.0", "inherits": "~2.0.4", "setprototypeof": "~1.2.0", "statuses": "~2.0.2", "toidentifier": "~1.0.1" } }, "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ=="],
|
||||
|
||||
"https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="],
|
||||
@@ -591,7 +728,7 @@
|
||||
|
||||
"locate-path": ["locate-path@5.0.0", "", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="],
|
||||
|
||||
"lodash-es": ["lodash-es@4.17.23", "", {}, "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="],
|
||||
"lodash-es": ["lodash-es@4.18.1", "", {}, "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A=="],
|
||||
|
||||
"lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="],
|
||||
|
||||
@@ -617,6 +754,8 @@
|
||||
|
||||
"mz": ["mz@2.7.0", "", { "dependencies": { "any-promise": "^1.0.0", "object-assign": "^4.0.1", "thenify-all": "^1.0.0" } }, "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q=="],
|
||||
|
||||
"needle": ["needle@3.5.0", "", { "dependencies": { "iconv-lite": "^0.6.3", "sax": "^1.2.4" }, "bin": { "needle": "bin/needle" } }, "sha512-jaQyPKKk2YokHrEg+vFDYxXIHTCBgiZwSHOoVx/8V3GIBS8/VN6NdVRmg8q1ERtPkMvmOvebsgga4sAj5hls/w=="],
|
||||
|
||||
"negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="],
|
||||
|
||||
"node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||
@@ -655,6 +794,8 @@
|
||||
|
||||
"path-to-regexp": ["path-to-regexp@8.4.1", "", {}, "sha512-fvU78fIjZ+SBM9YwCknCvKOUKkLVqtWDVctl0s7xIqfmfb38t2TT4ZU2gHm+Z8xGwgW+QWEU3oQSAzIbo89Ggw=="],
|
||||
|
||||
"pend": ["pend@1.2.0", "", {}, "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg=="],
|
||||
|
||||
"picomatch": ["picomatch@4.0.4", "", {}, "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A=="],
|
||||
|
||||
"pkce-challenge": ["pkce-challenge@5.0.1", "", {}, "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ=="],
|
||||
@@ -669,7 +810,7 @@
|
||||
|
||||
"proxy-addr": ["proxy-addr@2.0.7", "", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="],
|
||||
|
||||
"proxy-from-env": ["proxy-from-env@2.1.0", "", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="],
|
||||
"proxy-from-env": ["proxy-from-env@1.1.0", "", {}, "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="],
|
||||
|
||||
"qrcode": ["qrcode@1.5.4", "", { "dependencies": { "dijkstrajs": "^1.0.1", "pngjs": "^5.0.0", "yargs": "^15.3.1" }, "bin": { "qrcode": "bin/qrcode" } }, "sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg=="],
|
||||
|
||||
@@ -693,6 +834,8 @@
|
||||
|
||||
"require-main-filename": ["require-main-filename@2.0.0", "", {}, "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="],
|
||||
|
||||
"resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="],
|
||||
|
||||
"retry": ["retry@0.12.0", "", {}, "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow=="],
|
||||
|
||||
"router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="],
|
||||
@@ -701,6 +844,8 @@
|
||||
|
||||
"safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="],
|
||||
|
||||
"sax": ["sax@1.6.0", "", {}, "sha512-6R3J5M4AcbtLUdZmRv2SygeVaM7IhrLXu9BmnOGmmACak8fiUtOsYNWUS4uK7upbmHIBbLBeFeI//477BKLBzA=="],
|
||||
|
||||
"scheduler": ["scheduler@0.27.0", "", {}, "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="],
|
||||
|
||||
"semver": ["semver@7.7.4", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="],
|
||||
@@ -713,6 +858,8 @@
|
||||
|
||||
"setprototypeof": ["setprototypeof@1.2.0", "", {}, "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="],
|
||||
|
||||
"sharp": ["sharp@0.34.5", "", { "dependencies": { "@img/colour": "^1.0.0", "detect-libc": "^2.1.2", "semver": "^7.7.3" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "0.34.5", "@img/sharp-darwin-x64": "0.34.5", "@img/sharp-libvips-darwin-arm64": "1.2.4", "@img/sharp-libvips-darwin-x64": "1.2.4", "@img/sharp-libvips-linux-arm": "1.2.4", "@img/sharp-libvips-linux-arm64": "1.2.4", "@img/sharp-libvips-linux-ppc64": "1.2.4", "@img/sharp-libvips-linux-riscv64": "1.2.4", "@img/sharp-libvips-linux-s390x": "1.2.4", "@img/sharp-libvips-linux-x64": "1.2.4", "@img/sharp-libvips-linuxmusl-arm64": "1.2.4", "@img/sharp-libvips-linuxmusl-x64": "1.2.4", "@img/sharp-linux-arm": "0.34.5", "@img/sharp-linux-arm64": "0.34.5", "@img/sharp-linux-ppc64": "0.34.5", "@img/sharp-linux-riscv64": "0.34.5", "@img/sharp-linux-s390x": "0.34.5", "@img/sharp-linux-x64": "0.34.5", "@img/sharp-linuxmusl-arm64": "0.34.5", "@img/sharp-linuxmusl-x64": "0.34.5", "@img/sharp-wasm32": "0.34.5", "@img/sharp-win32-arm64": "0.34.5", "@img/sharp-win32-ia32": "0.34.5", "@img/sharp-win32-x64": "0.34.5" } }, "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg=="],
|
||||
|
||||
"shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="],
|
||||
|
||||
"shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="],
|
||||
@@ -759,6 +906,8 @@
|
||||
|
||||
"tslib": ["tslib@1.14.1", "", {}, "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="],
|
||||
|
||||
"tsx": ["tsx@4.21.0", "", { "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "bin": { "tsx": "dist/cli.mjs" } }, "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw=="],
|
||||
|
||||
"turndown": ["turndown@7.2.2", "", { "dependencies": { "@mixmark-io/domino": "^2.2.0" } }, "sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ=="],
|
||||
|
||||
"type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="],
|
||||
@@ -767,6 +916,8 @@
|
||||
|
||||
"typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],
|
||||
|
||||
"typescript-event-target": ["typescript-event-target@1.1.2", "", {}, "sha512-TvkrTUpv7gCPlcnSoEwUVUBwsdheKm+HF5u2tPAKubkIGMfovdSizCTaZRY/NhR8+Ijy8iZZUapbVQAsNrkFrw=="],
|
||||
|
||||
"undici": ["undici@7.24.6", "", {}, "sha512-Xi4agocCbRzt0yYMZGMA6ApD7gvtUFaxm4ZmeacWI4cZxaF6C+8I8QfofC20NAePiB/IcvZmzkJ7XPa471AEtA=="],
|
||||
|
||||
"undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="],
|
||||
@@ -807,9 +958,11 @@
|
||||
|
||||
"yaml": ["yaml@2.8.3", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg=="],
|
||||
|
||||
"yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
|
||||
"yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
|
||||
|
||||
"yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="],
|
||||
"yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
|
||||
|
||||
"yauzl": ["yauzl@2.10.0", "", { "dependencies": { "buffer-crc32": "~0.2.3", "fd-slicer": "~1.1.0" } }, "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g=="],
|
||||
|
||||
"yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="],
|
||||
|
||||
@@ -1007,7 +1160,9 @@
|
||||
|
||||
"@aws-sdk/xml-builder/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"@grpc/proto-loader/yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
|
||||
"@emnapi/runtime/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"@mendable/firecrawl-js/axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core": ["@opentelemetry/core@1.30.1", "", { "dependencies": { "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ=="],
|
||||
|
||||
@@ -1225,18 +1380,26 @@
|
||||
|
||||
"@smithy/uuid/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"axios/proxy-from-env": ["proxy-from-env@2.1.0", "", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="],
|
||||
|
||||
"cli-highlight/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
|
||||
|
||||
"cli-highlight/yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="],
|
||||
|
||||
"cliui/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
|
||||
"firecrawl/axios": ["axios@1.14.0", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
|
||||
|
||||
"form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
|
||||
|
||||
"gaxios/is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="],
|
||||
|
||||
"needle/iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="],
|
||||
|
||||
"npm-run-path/path-key": ["path-key@4.0.0", "", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="],
|
||||
|
||||
"parse5-htmlparser2-tree-adapter/parse5": ["parse5@6.0.1", "", {}, "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw=="],
|
||||
@@ -1279,11 +1442,7 @@
|
||||
|
||||
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from": ["@smithy/util-buffer-from@4.2.2", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
|
||||
"@mendable/firecrawl-js/axios/proxy-from-env": ["proxy-from-env@2.1.0", "", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="],
|
||||
|
||||
"@opentelemetry/exporter-trace-otlp-grpc/@opentelemetry/core/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
|
||||
@@ -1351,6 +1510,12 @@
|
||||
|
||||
"cli-highlight/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"cli-highlight/yargs/cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="],
|
||||
|
||||
"cli-highlight/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"cli-highlight/yargs/yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="],
|
||||
|
||||
"cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"cliui/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
@@ -1359,6 +1524,8 @@
|
||||
|
||||
"cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"firecrawl/axios/proxy-from-env": ["proxy-from-env@2.1.0", "", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="],
|
||||
|
||||
"form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="],
|
||||
|
||||
"qrcode/yargs/cliui": ["cliui@6.0.0", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^6.2.0" } }, "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ=="],
|
||||
@@ -1391,16 +1558,6 @@
|
||||
|
||||
"@aws-sdk/nested-clients/@smithy/util-base64/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@4.2.2", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/resources/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
|
||||
"@opentelemetry/otlp-grpc-exporter-base/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base/@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.28.0", "", {}, "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA=="],
|
||||
@@ -1421,6 +1578,16 @@
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder": ["@smithy/querystring-builder@2.2.0", "", { "dependencies": { "@smithy/types": "^2.12.0", "@smithy/util-uri-escape": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-L1kSeviUWL+emq3CUVSgdogoM/D9QMFaqxL/dd0X7PCNWmPXqt+ExtrBjqT0V7HLN03Vs9SuiLrG3zy3JGnE5A=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"qrcode/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"qrcode/yargs/cliui/wrap-ansi": ["wrap-ansi@6.2.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="],
|
||||
@@ -1433,16 +1600,16 @@
|
||||
|
||||
"yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"@grpc/proto-loader/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/fetch-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="],
|
||||
|
||||
"@smithy/smithy-client/@smithy/util-stream/@smithy/node-http-handler/@smithy/querystring-builder/@smithy/util-uri-escape": ["@smithy/util-uri-escape@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"cli-highlight/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"cli-highlight/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"qrcode/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"qrcode/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
276
docs/advanced-setup.md
Normal file
276
docs/advanced-setup.md
Normal file
@@ -0,0 +1,276 @@
|
||||
# OpenClaude Advanced Setup
|
||||
|
||||
This guide is for users who want source builds, Bun workflows, provider profiles, diagnostics, or more control over runtime behavior.
|
||||
|
||||
## Install Options
|
||||
|
||||
### Option A: npm
|
||||
|
||||
```bash
|
||||
npm install -g @gitlawb/openclaude
|
||||
```
|
||||
|
||||
### Option B: From source with Bun
|
||||
|
||||
Use Bun `1.3.11` or newer for source builds on Windows. Older Bun versions can fail during `bun run build`.
|
||||
|
||||
```bash
|
||||
git clone https://node.gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude.git
|
||||
cd openclaude
|
||||
|
||||
bun install
|
||||
bun run build
|
||||
npm link
|
||||
```
|
||||
|
||||
### Option C: Run directly with Bun
|
||||
|
||||
```bash
|
||||
git clone https://node.gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude.git
|
||||
cd openclaude
|
||||
|
||||
bun install
|
||||
bun run dev
|
||||
```
|
||||
|
||||
## Provider Examples
|
||||
|
||||
### OpenAI
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-...
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
```
|
||||
|
||||
### Codex via ChatGPT auth
|
||||
|
||||
`codexplan` maps to GPT-5.4 on the Codex backend with high reasoning.
|
||||
`codexspark` maps to GPT-5.3 Codex Spark for faster loops.
|
||||
|
||||
If you use the in-app provider wizard, choose `Codex OAuth` to open ChatGPT sign-in in your browser and let OpenClaude store Codex credentials securely.
|
||||
|
||||
If you already use the Codex CLI, OpenClaude reads `~/.codex/auth.json` automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or override the token directly with `CODEX_API_KEY`.
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_MODEL=codexplan
|
||||
|
||||
# optional if you do not already have ~/.codex/auth.json
|
||||
export CODEX_API_KEY=...
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
### DeepSeek
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-...
|
||||
export OPENAI_BASE_URL=https://api.deepseek.com/v1
|
||||
export OPENAI_MODEL=deepseek-v4-flash
|
||||
```
|
||||
|
||||
Use `deepseek-v4-pro` when you want the stronger model. `deepseek-chat` and `deepseek-reasoner` remain available as DeepSeek's legacy API aliases.
|
||||
|
||||
### Google Gemini via OpenRouter
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-or-...
|
||||
export OPENAI_BASE_URL=https://openrouter.ai/api/v1
|
||||
export OPENAI_MODEL=google/gemini-2.0-flash-001
|
||||
```
|
||||
|
||||
OpenRouter model availability changes over time. If a model stops working, try another current OpenRouter model before assuming the integration is broken.
|
||||
|
||||
### Ollama
|
||||
|
||||
Using `ollama launch` (recommended if you have Ollama installed):
|
||||
|
||||
```bash
|
||||
ollama launch openclaude --model llama3.3:70b
|
||||
```
|
||||
|
||||
This handles all environment setup automatically — no env vars needed. Works with any local or cloud model available in your Ollama instance.
|
||||
|
||||
Using environment variables manually:
|
||||
|
||||
```bash
|
||||
ollama pull llama3.3:70b
|
||||
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:11434/v1
|
||||
export OPENAI_MODEL=llama3.3:70b
|
||||
```
|
||||
|
||||
### Atomic Chat (local, Apple Silicon)
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://127.0.0.1:1337/v1
|
||||
export OPENAI_MODEL=your-model-name
|
||||
```
|
||||
|
||||
No API key is needed for Atomic Chat local models.
|
||||
|
||||
Or use the profile launcher:
|
||||
|
||||
```bash
|
||||
bun run dev:atomic-chat
|
||||
```
|
||||
|
||||
Download Atomic Chat from [atomic.chat](https://atomic.chat/). The app must be running with a model loaded before launching.
|
||||
|
||||
### LM Studio
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:1234/v1
|
||||
export OPENAI_MODEL=your-model-name
|
||||
```
|
||||
|
||||
### Together AI
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=...
|
||||
export OPENAI_BASE_URL=https://api.together.xyz/v1
|
||||
export OPENAI_MODEL=meta-llama/Llama-3.3-70B-Instruct-Turbo
|
||||
```
|
||||
|
||||
### Groq
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=gsk_...
|
||||
export OPENAI_BASE_URL=https://api.groq.com/openai/v1
|
||||
export OPENAI_MODEL=llama-3.3-70b-versatile
|
||||
```
|
||||
|
||||
### Mistral
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_MISTRAL=1
|
||||
export MISTRAL_API_KEY=...
|
||||
export MISTRAL_MODEL=mistral-large-latest
|
||||
```
|
||||
|
||||
### Azure OpenAI
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=your-azure-key
|
||||
export OPENAI_BASE_URL=https://your-resource.openai.azure.com/openai/deployments/your-deployment/v1
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `CLAUDE_CODE_USE_OPENAI` | Yes | Set to `1` to enable the OpenAI provider |
|
||||
| `OPENAI_API_KEY` | Yes* | Your API key (`*` not needed for local models like Ollama or Atomic Chat) |
|
||||
| `OPENAI_MODEL` | Yes | Model name such as `gpt-4o`, `deepseek-v4-flash`, or `llama3.3:70b` |
|
||||
| `OPENAI_BASE_URL` | No | API endpoint, defaulting to `https://api.openai.com/v1` |
|
||||
| `CODEX_API_KEY` | Codex only | Codex or ChatGPT access token override |
|
||||
| `CODEX_AUTH_JSON_PATH` | Codex only | Path to a Codex CLI `auth.json` file |
|
||||
| `CODEX_HOME` | Codex only | Alternative Codex home directory |
|
||||
| `OPENCLAUDE_DISABLE_CO_AUTHORED_BY` | No | Suppress the default `Co-Authored-By` trailer in generated git commits |
|
||||
| `OPENCLAUDE_LOG_TOKEN_USAGE` | No | When truthy (e.g. `verbose`), emits one JSON line on stderr per API request with input/output/cache tokens and the resolved provider. **User-facing debug output** — complements the REPL display controlled by `/config showCacheStats`. Distinct from `CLAUDE_CODE_ENABLE_TOKEN_USAGE_ATTACHMENT`, which is **model-facing** (injects context usage info into the prompt itself). Both can run together. |
|
||||
|
||||
You can also use `ANTHROPIC_MODEL` to override the model name. `OPENAI_MODEL` takes priority.
|
||||
|
||||
## Runtime Hardening
|
||||
|
||||
Use these commands to validate your setup and catch mistakes early:
|
||||
|
||||
```bash
|
||||
# quick startup sanity check
|
||||
bun run smoke
|
||||
|
||||
# validate provider env + reachability
|
||||
bun run doctor:runtime
|
||||
|
||||
# print machine-readable runtime diagnostics
|
||||
bun run doctor:runtime:json
|
||||
|
||||
# persist a diagnostics report to reports/doctor-runtime.json
|
||||
bun run doctor:report
|
||||
|
||||
# full local hardening check (smoke + runtime doctor)
|
||||
bun run hardening:check
|
||||
|
||||
# strict hardening (includes project-wide typecheck)
|
||||
bun run hardening:strict
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- `doctor:runtime` fails fast if `CLAUDE_CODE_USE_OPENAI=1` with a placeholder key or a missing key for non-local providers.
|
||||
- Local providers such as `http://localhost:11434/v1`, `http://10.0.0.1:11434/v1`, and `http://127.0.0.1:1337/v1` can run without `OPENAI_API_KEY`.
|
||||
- Codex profiles validate `CODEX_API_KEY` or the Codex CLI auth file and probe `POST /responses` instead of `GET /models`.
|
||||
|
||||
## Provider Launch Profiles
|
||||
|
||||
Use profile launchers to avoid repeated environment setup:
|
||||
|
||||
```bash
|
||||
# one-time profile bootstrap (prefer viable local Ollama, otherwise OpenAI)
|
||||
bun run profile:init
|
||||
|
||||
# preview the best provider/model for your goal
|
||||
bun run profile:recommend -- --goal coding --benchmark
|
||||
|
||||
# auto-apply the best available local/openai provider/model for your goal
|
||||
bun run profile:auto -- --goal latency
|
||||
|
||||
# codex bootstrap (defaults to codexplan and ~/.codex/auth.json)
|
||||
bun run profile:codex
|
||||
|
||||
# openai bootstrap with explicit key
|
||||
bun run profile:init -- --provider openai --api-key sk-...
|
||||
|
||||
# ollama bootstrap with custom model
|
||||
bun run profile:init -- --provider ollama --model llama3.1:8b
|
||||
|
||||
# ollama bootstrap with intelligent model auto-selection
|
||||
bun run profile:init -- --provider ollama --goal coding
|
||||
|
||||
# atomic-chat bootstrap (auto-detects running model)
|
||||
bun run profile:init -- --provider atomic-chat
|
||||
|
||||
# codex bootstrap with a fast model alias
|
||||
bun run profile:init -- --provider codex --model codexspark
|
||||
|
||||
# launch using persisted profile (.openclaude-profile.json)
|
||||
bun run dev:profile
|
||||
|
||||
# codex profile (uses CODEX_API_KEY or ~/.codex/auth.json)
|
||||
bun run dev:codex
|
||||
|
||||
# OpenAI profile (requires OPENAI_API_KEY in your shell)
|
||||
bun run dev:openai
|
||||
|
||||
# Ollama profile (defaults: localhost:11434, llama3.1:8b)
|
||||
bun run dev:ollama
|
||||
|
||||
# Atomic Chat profile (Apple Silicon local LLMs at 127.0.0.1:1337)
|
||||
bun run dev:atomic-chat
|
||||
```
|
||||
|
||||
`profile:recommend` ranks installed Ollama models for `latency`, `balanced`, or `coding`, and `profile:auto` can persist the recommendation directly.
|
||||
|
||||
If no profile exists yet, `dev:profile` uses the same goal-aware defaults when picking the initial model.
|
||||
|
||||
Use `--provider ollama` when you want a local-only path. Auto mode falls back to OpenAI when no viable local chat model is installed.
|
||||
|
||||
Use `--provider atomic-chat` when you want Atomic Chat as the local Apple Silicon provider.
|
||||
|
||||
Use `profile:codex` or `--provider codex` when you want the ChatGPT Codex backend.
|
||||
|
||||
`dev:openai`, `dev:ollama`, `dev:atomic-chat`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass.
|
||||
|
||||
For `dev:ollama`, make sure Ollama is running locally before launch.
|
||||
|
||||
For `dev:atomic-chat`, make sure Atomic Chat is running with a model loaded before launch.
|
||||
333
docs/hook-chains.md
Normal file
333
docs/hook-chains.md
Normal file
@@ -0,0 +1,333 @@
|
||||
# Hook Chains (Self-Healing Agent Mesh MVP)
|
||||
|
||||
Hook Chains provide an event-driven recovery layer for important workflow failures.
|
||||
When a matching hook event occurs, OpenClaude evaluates declarative rules and can dispatch remediation actions such as:
|
||||
|
||||
- `spawn_fallback_agent`
|
||||
- `notify_team`
|
||||
- `warm_remote_capacity`
|
||||
|
||||
## Disabled-By-Default Rollout
|
||||
|
||||
> **Rollout recommendation:** keep Hook Chains disabled until you validate rules in your environment.
|
||||
>
|
||||
> - Set top-level config to `"enabled": false` initially.
|
||||
> - Enable per environment when ready.
|
||||
> - Dispatch is gated by `feature('HOOK_CHAINS')`.
|
||||
> - Env gate defaults to off unless `CLAUDE_CODE_ENABLE_HOOK_CHAINS=1` is set.
|
||||
|
||||
This keeps existing workflows unchanged while you tune guard windows and action behavior.
|
||||
|
||||
## Feature Overview
|
||||
|
||||
Hook Chains are loaded from a deterministic config file and evaluated on dispatched hook events.
|
||||
|
||||
MVP runtime trigger wiring:
|
||||
|
||||
- `PostToolUseFailure` hooks dispatch Hook Chains with outcome `failed`.
|
||||
- `TaskCompleted` hooks dispatch Hook Chains with outcome:
|
||||
- `success` when completion hooks did not block.
|
||||
- `failed` when completion hooks returned blocking errors or prevented continuation.
|
||||
|
||||
Default config path:
|
||||
|
||||
- `.openclaude/hook-chains.json`
|
||||
|
||||
Override path:
|
||||
|
||||
- `CLAUDE_CODE_HOOK_CHAINS_CONFIG_PATH=/abs/or/relative/path/to/hook-chains.json`
|
||||
|
||||
Global gate:
|
||||
|
||||
- `feature('HOOK_CHAINS')` must be enabled in the build
|
||||
- `CLAUDE_CODE_ENABLE_HOOK_CHAINS=0|1` (defaults to disabled when unset)
|
||||
|
||||
## Safety Guarantees
|
||||
|
||||
The runtime is intentionally conservative:
|
||||
|
||||
- **Depth guard:** chain dispatch is blocked when `chainDepth >= maxChainDepth`.
|
||||
- **Rule cooldown:** each rule can only re-fire after cooldown expires.
|
||||
- **Dedup window:** identical event/action combinations are suppressed for a window.
|
||||
- **Abort-safe behavior:** if the current signal is aborted, actions skip safely.
|
||||
- **Policy-aware remote warm:** `warm_remote_capacity` skips when remote sessions are policy denied.
|
||||
- **Bridge inactive no-op:** `warm_remote_capacity` safely skips when no active bridge handle exists.
|
||||
- **Missing team context safety:** `notify_team` skips with structured reason if no team context/team file is available.
|
||||
- **Fallback launcher safety:** `spawn_fallback_agent` fails with a structured reason when launch permissions/context are unavailable.
|
||||
|
||||
## Configuration Schema Reference
|
||||
|
||||
Top-level object:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"enabled": true,
|
||||
"maxChainDepth": 2,
|
||||
"defaultCooldownMs": 30000,
|
||||
"defaultDedupWindowMs": 30000,
|
||||
"rules": []
|
||||
}
|
||||
```
|
||||
|
||||
### Top-Level Fields
|
||||
|
||||
| Field | Type | Required | Notes |
|
||||
|---|---|---:|---|
|
||||
| `version` | `1` | No | Defaults to `1`. |
|
||||
| `enabled` | `boolean` | No | Global feature switch for this config file. |
|
||||
| `maxChainDepth` | `integer` | No | Global depth guard (default `2`, max `10`). |
|
||||
| `defaultCooldownMs` | `integer` | No | Default rule cooldown in ms (default `30000`). |
|
||||
| `defaultDedupWindowMs` | `integer` | No | Default action dedup window in ms (default `30000`). |
|
||||
| `rules` | `HookChainRule[]` | No | Defaults to `[]`. May be omitted or empty; when no rules are present, dispatch is a no-op and returns `enabled: false`. |
|
||||
|
||||
> **Note:** An empty ruleset is valid and can be used to keep Hook Chains configured but effectively disabled until rules are added.
|
||||
### Rule Object (`HookChainRule`)
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "task-failure-recovery",
|
||||
"enabled": true,
|
||||
"trigger": {
|
||||
"event": "TaskCompleted",
|
||||
"outcome": "failed"
|
||||
},
|
||||
"condition": {
|
||||
"toolNames": ["Edit"],
|
||||
"taskStatuses": ["failed"],
|
||||
"errorIncludes": ["timeout", "permission denied"],
|
||||
"eventFieldEquals": {
|
||||
"meta.source": "scheduler"
|
||||
}
|
||||
},
|
||||
"cooldownMs": 60000,
|
||||
"dedupWindowMs": 30000,
|
||||
"maxDepth": 2,
|
||||
"actions": []
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Required | Notes |
|
||||
|---|---|---:|---|
|
||||
| `id` | `string` | Yes | Stable identifier used in telemetry/guards. |
|
||||
| `enabled` | `boolean` | No | Per-rule switch. |
|
||||
| `trigger.event` | `HookEvent` | Yes | Event name to match. |
|
||||
| `trigger.outcome` | `"success"|"failed"|"timeout"|"unknown"` | No | Single outcome matcher. |
|
||||
| `trigger.outcomes` | `Outcome[]` | No | Multi-outcome matcher. Use either `outcome` or `outcomes`. |
|
||||
| `condition` | `object` | No | Optional extra matching constraints. |
|
||||
| `cooldownMs` | `integer` | No | Overrides global cooldown for this rule. |
|
||||
| `dedupWindowMs` | `integer` | No | Overrides global dedup for this rule. |
|
||||
| `maxDepth` | `integer` | No | Per-rule depth cap. |
|
||||
| `actions` | `HookChainAction[]` | Yes | One or more actions to execute in order. |
|
||||
|
||||
### Condition Fields
|
||||
|
||||
| Field | Type | Notes |
|
||||
|---|---|---|
|
||||
| `toolNames` | `string[]` | Matches `tool_name` / `toolName` in event payload. |
|
||||
| `taskStatuses` | `string[]` | Matches `task_status` / `taskStatus` / `status`. |
|
||||
| `errorIncludes` | `string[]` | Case-insensitive substring match against `error` / `reason` / `message`. |
|
||||
| `eventFieldEquals` | `Record<string, string\|number\|boolean>` | Dot-path equality against payload (example: `"meta.source": "scheduler"`). |
|
||||
|
||||
### Actions
|
||||
|
||||
#### `spawn_fallback_agent`
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "spawn_fallback_agent",
|
||||
"id": "fallback-1",
|
||||
"enabled": true,
|
||||
"dedupWindowMs": 30000,
|
||||
"description": "Fallback recovery for failed task",
|
||||
"promptTemplate": "Recover task ${TASK_SUBJECT}. Event=${EVENT_NAME}, outcome=${OUTCOME}, error=${ERROR}. Payload=${PAYLOAD_JSON}",
|
||||
"agentType": "general-purpose",
|
||||
"model": "sonnet"
|
||||
}
|
||||
```
|
||||
|
||||
#### `notify_team`
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "notify_team",
|
||||
"id": "notify-ops",
|
||||
"enabled": true,
|
||||
"dedupWindowMs": 30000,
|
||||
"teamName": "mesh-team",
|
||||
"recipients": ["*"],
|
||||
"summary": "Hook chain ${RULE_ID} fired",
|
||||
"messageTemplate": "Event=${EVENT_NAME} outcome=${OUTCOME}\nTask=${TASK_ID}\nError=${ERROR}\nPayload=${PAYLOAD_JSON}"
|
||||
}
|
||||
```
|
||||
|
||||
#### `warm_remote_capacity`
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "warm_remote_capacity",
|
||||
"id": "warm-bridge",
|
||||
"enabled": true,
|
||||
"dedupWindowMs": 60000,
|
||||
"createDefaultEnvironmentIfMissing": false
|
||||
}
|
||||
```
|
||||
|
||||
## Complete Example Configs
|
||||
|
||||
### 1) Retry via Fallback Agent
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"enabled": true,
|
||||
"maxChainDepth": 2,
|
||||
"defaultCooldownMs": 30000,
|
||||
"defaultDedupWindowMs": 30000,
|
||||
"rules": [
|
||||
{
|
||||
"id": "retry-task-via-fallback",
|
||||
"trigger": {
|
||||
"event": "TaskCompleted",
|
||||
"outcome": "failed"
|
||||
},
|
||||
"cooldownMs": 60000,
|
||||
"actions": [
|
||||
{
|
||||
"type": "spawn_fallback_agent",
|
||||
"id": "spawn-retry-agent",
|
||||
"description": "Retry failed task with fallback agent",
|
||||
"promptTemplate": "A task failed. Recover it safely.\nTask=${TASK_SUBJECT}\nDescription=${TASK_DESCRIPTION}\nError=${ERROR}\nPayload=${PAYLOAD_JSON}",
|
||||
"agentType": "general-purpose",
|
||||
"model": "sonnet"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 2) Notify Only
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"enabled": true,
|
||||
"maxChainDepth": 2,
|
||||
"defaultCooldownMs": 30000,
|
||||
"defaultDedupWindowMs": 30000,
|
||||
"rules": [
|
||||
{
|
||||
"id": "notify-on-tool-failure",
|
||||
"trigger": {
|
||||
"event": "PostToolUseFailure",
|
||||
"outcome": "failed"
|
||||
},
|
||||
"condition": {
|
||||
"toolNames": ["Edit", "Write", "Bash"]
|
||||
},
|
||||
"actions": [
|
||||
{
|
||||
"type": "notify_team",
|
||||
"id": "notify-team-failure",
|
||||
"recipients": ["*"],
|
||||
"summary": "Tool failure detected",
|
||||
"messageTemplate": "Tool failure detected.\nEvent=${EVENT_NAME} outcome=${OUTCOME}\nError=${ERROR}\nPayload=${PAYLOAD_JSON}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 3) Combined Fallback + Notify + Bridge Warm
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"enabled": true,
|
||||
"maxChainDepth": 2,
|
||||
"defaultCooldownMs": 45000,
|
||||
"defaultDedupWindowMs": 30000,
|
||||
"rules": [
|
||||
{
|
||||
"id": "full-recovery-chain",
|
||||
"trigger": {
|
||||
"event": "TaskCompleted",
|
||||
"outcomes": ["failed", "timeout"]
|
||||
},
|
||||
"condition": {
|
||||
"errorIncludes": ["timeout", "capacity", "connection"]
|
||||
},
|
||||
"cooldownMs": 90000,
|
||||
"actions": [
|
||||
{
|
||||
"type": "spawn_fallback_agent",
|
||||
"id": "fallback-agent",
|
||||
"description": "Recover failed task execution",
|
||||
"promptTemplate": "Recover failed task and produce a concise fix summary.\nTask=${TASK_SUBJECT}\nError=${ERROR}\nPayload=${PAYLOAD_JSON}"
|
||||
},
|
||||
{
|
||||
"type": "notify_team",
|
||||
"id": "notify-team",
|
||||
"recipients": ["*"],
|
||||
"summary": "Recovery chain triggered",
|
||||
"messageTemplate": "Recovery chain ${RULE_ID} fired.\nOutcome=${OUTCOME}\nTask=${TASK_SUBJECT}\nError=${ERROR}"
|
||||
},
|
||||
{
|
||||
"type": "warm_remote_capacity",
|
||||
"id": "warm-capacity",
|
||||
"createDefaultEnvironmentIfMissing": false
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Template Variables
|
||||
|
||||
The following placeholders are supported by `promptTemplate`, `summary`, and `messageTemplate`:
|
||||
|
||||
- `${EVENT_NAME}`
|
||||
- `${OUTCOME}`
|
||||
- `${RULE_ID}`
|
||||
- `${TASK_SUBJECT}`
|
||||
- `${TASK_DESCRIPTION}`
|
||||
- `${TASK_ID}`
|
||||
- `${ERROR}`
|
||||
- `${PAYLOAD_JSON}`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Rule never triggers
|
||||
|
||||
- Verify `trigger.event` and `trigger.outcome`/`trigger.outcomes` exactly match dispatched event data.
|
||||
- Check `condition` filters (especially `toolNames` and `eventFieldEquals` dot-path keys).
|
||||
- Confirm the config file is valid JSON and schema-valid.
|
||||
|
||||
### Actions show as skipped
|
||||
|
||||
Common skip reasons:
|
||||
|
||||
- `action disabled`
|
||||
- `rule cooldown active ...`
|
||||
- `dedup window active ...`
|
||||
- `max chain depth reached ...`
|
||||
- `No team context is available ...`
|
||||
- `Team file not found ...`
|
||||
- `Remote sessions are blocked by policy`
|
||||
- `Bridge is not active; warm_remote_capacity is a safe no-op`
|
||||
- `No fallback agent launcher is registered in runtime context`
|
||||
|
||||
### Config changes not reflected
|
||||
|
||||
- Loader uses memoization by file mtime/size.
|
||||
- Ensure your editor writes the file fully and updates mtime.
|
||||
- If needed, force reload from the caller side with `forceReloadConfig: true`.
|
||||
|
||||
### Existing workflows changed unexpectedly
|
||||
|
||||
- Set `"enabled": false` at top-level.
|
||||
- Or globally disable with `CLAUDE_CODE_ENABLE_HOOK_CHAINS=0`.
|
||||
- Re-enable gradually after validating one rule at a time.
|
||||
144
docs/litellm-setup.md
Normal file
144
docs/litellm-setup.md
Normal file
@@ -0,0 +1,144 @@
|
||||
# LiteLLM Setup
|
||||
|
||||
OpenClaude can connect to LiteLLM through LiteLLM's OpenAI-compatible proxy.
|
||||
|
||||
## Overview
|
||||
|
||||
LiteLLM is an open-source LLM gateway that provides a unified API to 100+ model providers. By running the LiteLLM Proxy, you can route OpenClaude requests through LiteLLM to access any of its supported providers — all while using OpenClaude's existing OpenAI-compatible provider path.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- LiteLLM installed (`pip install litellm[proxy]`)
|
||||
- A `litellm_config.yaml` or equivalent LiteLLM configuration
|
||||
- LiteLLM Proxy running on a local or remote port
|
||||
|
||||
## 1. Start the LiteLLM Proxy
|
||||
|
||||
### Basic installation
|
||||
|
||||
```bash
|
||||
pip install litellm[proxy]
|
||||
```
|
||||
|
||||
### Configure LiteLLM
|
||||
|
||||
Create a `litellm_config.yaml` with your desired model aliases:
|
||||
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gpt-4o
|
||||
litellm_params:
|
||||
model: openai/gpt-4o
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
|
||||
- model_name: claude-sonnet-4
|
||||
litellm_params:
|
||||
model: anthropic/claude-sonnet-4-5-20250929
|
||||
api_key: os.environ/ANTHROPIC_API_KEY
|
||||
|
||||
- model_name: gemini-2.5-flash
|
||||
litellm_params:
|
||||
model: gemini/gemini-2.5-flash
|
||||
api_key: os.environ/GEMINI_API_KEY
|
||||
|
||||
- model_name: llama-3.3-70b
|
||||
litellm_params:
|
||||
model: together_ai/meta-llama/Llama-3.3-70B-Instruct-Turbo
|
||||
api_key: os.environ/TOGETHER_API_KEY
|
||||
```
|
||||
|
||||
### Run the proxy
|
||||
|
||||
```bash
|
||||
litellm --config litellm_config.yaml --port 4000
|
||||
```
|
||||
|
||||
The proxy will start at `http://localhost:4000` by default.
|
||||
|
||||
## 2. Point OpenClaude to LiteLLM
|
||||
|
||||
### Option A: Environment Variables
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:4000
|
||||
export OPENAI_API_KEY=<your-master-key-or-placeholder>
|
||||
export OPENAI_MODEL=<your-litellm-model-alias>
|
||||
openclaude
|
||||
```
|
||||
|
||||
Replace `<your-litellm-model-alias>` with a model name from your `litellm_config.yaml` (e.g., `gpt-4o`, `claude-sonnet-4`, `gemini-2.5-flash`).
|
||||
|
||||
### Option B: Using /provider
|
||||
|
||||
1. Run `openclaude`
|
||||
2. Type `/provider` to open the provider setup flow
|
||||
3. Choose the **OpenAI-compatible** option
|
||||
4. When prompted for the API key, enter the key required by your LiteLLM proxy
|
||||
If your local LiteLLM setup does not enforce auth, you may still need to enter a placeholder value
|
||||
- 5. When prompted for the base URL, enter `http://localhost:4000`
|
||||
6. 6. When prompted for the model, enter the LiteLLM model name or alias you configured
|
||||
7. 7. Save the provider configuration
|
||||
|
||||
## 3. Example LiteLLM Configs
|
||||
|
||||
### Multi-provider routing with spend tracking
|
||||
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gpt-4o
|
||||
litellm_params:
|
||||
model: openai/gpt-4o
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
|
||||
- model_name: claude-sonnet-4
|
||||
litellm_params:
|
||||
model: anthropic/claude-sonnet-4-5-20250929
|
||||
api_key: os.environ/ANTHROPIC_API_KEY
|
||||
|
||||
- model_name: deepseek-chat
|
||||
litellm_params:
|
||||
model: deepseek/deepseek-chat
|
||||
api_key: os.environ/DEEPSEEK_API_KEY
|
||||
|
||||
litellm_settings:
|
||||
set_verbose: false
|
||||
num_retries: 3
|
||||
```
|
||||
|
||||
### With a master key for auth
|
||||
|
||||
```bash
|
||||
# Start proxy with a master key
|
||||
litellm --config litellm_config.yaml --port 4000 --master_key sk-my-master-key
|
||||
|
||||
# Connect OpenClaude
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:4000
|
||||
export OPENAI_API_KEY=sk-my-master-key
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
openclaude
|
||||
```
|
||||
|
||||
## 4. Notes
|
||||
|
||||
- `OPENAI_MODEL` must match the **LiteLLM model alias** defined in your config, not the upstream raw provider model name.
|
||||
- If your proxy requires authentication, use the proxy key (or `master_key`) in `OPENAI_API_KEY`.
|
||||
- LiteLLM's OpenAI-compatible endpoint accepts the same request format as OpenAI, so OpenClaude works without any code changes.
|
||||
- You can switch between any provider configured in LiteLLM by simply changing the `OPENAI_MODEL` value — no need to reconfigure OpenClaude.
|
||||
|
||||
## 5. Troubleshooting
|
||||
|
||||
| Issue | Likely Cause | Fix |
|
||||
|-------|--------------|-----|
|
||||
| 404 or Model Not Found | Model alias doesn't exist in LiteLLM config | Verify the `model_name` in `litellm_config.yaml` matches `OPENAI_MODEL` |
|
||||
| Connection Refused | LiteLLM proxy isn't running | Start the proxy with `litellm --config litellm_config.yaml --port 4000` |
|
||||
| Auth Failed | Missing or wrong `master_key` | Set the correct key in `OPENAI_API_KEY` |
|
||||
| Upstream provider error | The backend provider key is missing or invalid | Ensure the upstream API key (e.g., `OPENAI_API_KEY`) is set in your LiteLLM proxy process environment |
|
||||
| Tools fail but chat works | The selected model has weak function/tool calling support | Switch to a model with strong tool support (e.g., GPT-4o, Claude Sonnet) |
|
||||
|
||||
## 6. Resources
|
||||
|
||||
- [LiteLLM Proxy Docs](https://docs.litellm.ai/docs/proxy/quick_start)
|
||||
- [LiteLLM Provider List](https://docs.litellm.ai/docs/providers)
|
||||
- [LiteLLM OpenAI-Compatible Endpoints](https://docs.litellm.ai/docs/proxy/openai_compatible_proxy)
|
||||
116
docs/non-technical-setup.md
Normal file
116
docs/non-technical-setup.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# OpenClaude for Non-Technical Users
|
||||
|
||||
This guide is for people who want the easiest setup path.
|
||||
|
||||
You do not need to build from source. You do not need Bun. You do not need to understand the full codebase.
|
||||
|
||||
If you can copy and paste commands into a terminal, you can set this up.
|
||||
|
||||
## What OpenClaude Does
|
||||
|
||||
OpenClaude lets you use an AI coding assistant with different model providers such as:
|
||||
|
||||
- OpenAI
|
||||
- DeepSeek
|
||||
- Gemini
|
||||
- Ollama
|
||||
- Codex
|
||||
|
||||
For most first-time users, OpenAI is the easiest option.
|
||||
|
||||
## Before You Start
|
||||
|
||||
You need:
|
||||
|
||||
1. Node.js 20 or newer installed
|
||||
2. A terminal window
|
||||
3. An API key from your provider, unless you are using a local model like Ollama
|
||||
|
||||
## Fastest Path
|
||||
|
||||
1. Install OpenClaude with npm
|
||||
2. Set 3 environment variables
|
||||
3. Run `openclaude`
|
||||
|
||||
## Choose Your Operating System
|
||||
|
||||
- Windows: [Windows Quick Start](quick-start-windows.md)
|
||||
- macOS / Linux: [macOS / Linux Quick Start](quick-start-mac-linux.md)
|
||||
|
||||
## Which Provider Should You Choose?
|
||||
|
||||
### OpenAI
|
||||
|
||||
Choose this if:
|
||||
|
||||
- you want the easiest setup
|
||||
- you already have an OpenAI API key
|
||||
|
||||
### Ollama
|
||||
|
||||
Choose this if:
|
||||
|
||||
- you want to run models locally
|
||||
- you do not want to depend on a cloud API for testing
|
||||
|
||||
### Codex
|
||||
|
||||
Choose this if:
|
||||
|
||||
- you already use the Codex CLI
|
||||
- you already have Codex or ChatGPT auth configured
|
||||
|
||||
## What Success Looks Like
|
||||
|
||||
After you run `openclaude`, the CLI should start and wait for your prompt.
|
||||
|
||||
At that point, you can ask it to:
|
||||
|
||||
- explain code
|
||||
- edit files
|
||||
- run commands
|
||||
- review changes
|
||||
|
||||
## Common Problems
|
||||
|
||||
### `openclaude` command not found
|
||||
|
||||
Cause:
|
||||
|
||||
- npm installed the package, but your terminal has not refreshed yet
|
||||
|
||||
Fix:
|
||||
|
||||
1. Close the terminal
|
||||
2. Open a new terminal
|
||||
3. Run `openclaude` again
|
||||
|
||||
### Invalid API key
|
||||
|
||||
Cause:
|
||||
|
||||
- the key is wrong, expired, or copied incorrectly
|
||||
|
||||
Fix:
|
||||
|
||||
1. Get a fresh key from your provider
|
||||
2. Paste it again carefully
|
||||
3. Re-run `openclaude`
|
||||
|
||||
### Ollama not working
|
||||
|
||||
Cause:
|
||||
|
||||
- Ollama is not installed or not running
|
||||
|
||||
Fix:
|
||||
|
||||
1. Install Ollama from `https://ollama.com/download`
|
||||
2. Start Ollama
|
||||
3. Try again
|
||||
|
||||
## Want More Control?
|
||||
|
||||
If you want source builds, advanced provider profiles, diagnostics, or Bun-based workflows, use:
|
||||
|
||||
- [Advanced Setup](advanced-setup.md)
|
||||
145
docs/quick-start-mac-linux.md
Normal file
145
docs/quick-start-mac-linux.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# OpenClaude Quick Start for macOS and Linux
|
||||
|
||||
This guide uses a standard shell such as Terminal, iTerm, bash, or zsh.
|
||||
|
||||
## 1. Install Node.js
|
||||
|
||||
Install Node.js 20 or newer from:
|
||||
|
||||
- `https://nodejs.org/`
|
||||
|
||||
Then check it:
|
||||
|
||||
```bash
|
||||
node --version
|
||||
npm --version
|
||||
```
|
||||
|
||||
## 2. Install OpenClaude
|
||||
|
||||
```bash
|
||||
npm install -g @gitlawb/openclaude
|
||||
```
|
||||
|
||||
## 3. Pick One Provider
|
||||
|
||||
### Option A: OpenAI
|
||||
|
||||
Replace `sk-your-key-here` with your real key.
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-your-key-here
|
||||
export OPENAI_MODEL=gpt-4o
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
### Option B: DeepSeek
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_API_KEY=sk-your-key-here
|
||||
export OPENAI_BASE_URL=https://api.deepseek.com/v1
|
||||
export OPENAI_MODEL=deepseek-v4-flash
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
Use `deepseek-v4-pro` when you want the stronger model. `deepseek-chat` and `deepseek-reasoner` still work as DeepSeek's legacy API aliases.
|
||||
|
||||
### Option C: Ollama
|
||||
|
||||
Install Ollama first from:
|
||||
|
||||
- `https://ollama.com/download`
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
ollama pull llama3.1:8b
|
||||
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:11434/v1
|
||||
export OPENAI_MODEL=llama3.1:8b
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
No API key is needed for Ollama local models.
|
||||
|
||||
### Option D: LM Studio
|
||||
|
||||
Install LM Studio first from:
|
||||
|
||||
- `https://lmstudio.ai/`
|
||||
|
||||
Then in LM Studio:
|
||||
|
||||
1. Download a model (e.g., Llama 3.1 8B, Mistral 7B)
|
||||
2. Go to the "Developer" tab
|
||||
3. Select your model and enable the server via the toggle
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_USE_OPENAI=1
|
||||
export OPENAI_BASE_URL=http://localhost:1234/v1
|
||||
export OPENAI_MODEL=your-model-name
|
||||
# export OPENAI_API_KEY=lmstudio # optional: some users need a dummy key
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
Replace `your-model-name` with the model name shown in LM Studio.
|
||||
|
||||
No API key is needed for LM Studio local models (but uncomment the `OPENAI_API_KEY` line if you hit auth errors).
|
||||
|
||||
## 4. If `openclaude` Is Not Found
|
||||
|
||||
Close the terminal, open a new one, and try again:
|
||||
|
||||
```bash
|
||||
openclaude
|
||||
```
|
||||
|
||||
## 5. If Your Provider Fails
|
||||
|
||||
Check the basics:
|
||||
|
||||
### For OpenAI or DeepSeek
|
||||
|
||||
- make sure the key is real
|
||||
- make sure you copied it fully
|
||||
|
||||
### For Ollama
|
||||
|
||||
- make sure Ollama is installed
|
||||
- make sure Ollama is running
|
||||
- make sure the model was pulled successfully
|
||||
|
||||
### For LM Studio
|
||||
|
||||
- make sure LM Studio is installed
|
||||
- make sure LM Studio is running
|
||||
- make sure the server is enabled (toggle on in the "Developer" tab)
|
||||
- make sure a model is loaded in LM Studio
|
||||
- make sure the model name matches what you set in `OPENAI_MODEL`
|
||||
|
||||
## 6. Updating OpenClaude
|
||||
|
||||
```bash
|
||||
npm install -g @gitlawb/openclaude@latest
|
||||
```
|
||||
|
||||
## 7. Uninstalling OpenClaude
|
||||
|
||||
```bash
|
||||
npm uninstall -g @gitlawb/openclaude
|
||||
```
|
||||
|
||||
## Need Advanced Setup?
|
||||
|
||||
Use:
|
||||
|
||||
- [Advanced Setup](advanced-setup.md)
|
||||
145
docs/quick-start-windows.md
Normal file
145
docs/quick-start-windows.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# OpenClaude Quick Start for Windows
|
||||
|
||||
This guide uses Windows PowerShell.
|
||||
|
||||
## 1. Install Node.js
|
||||
|
||||
Install Node.js 20 or newer from:
|
||||
|
||||
- `https://nodejs.org/`
|
||||
|
||||
Then open PowerShell and check it:
|
||||
|
||||
```powershell
|
||||
node --version
|
||||
npm --version
|
||||
```
|
||||
|
||||
## 2. Install OpenClaude
|
||||
|
||||
```powershell
|
||||
npm install -g @gitlawb/openclaude
|
||||
```
|
||||
|
||||
## 3. Pick One Provider
|
||||
|
||||
### Option A: OpenAI
|
||||
|
||||
Replace `sk-your-key-here` with your real key.
|
||||
|
||||
```powershell
|
||||
$env:CLAUDE_CODE_USE_OPENAI="1"
|
||||
$env:OPENAI_API_KEY="sk-your-key-here"
|
||||
$env:OPENAI_MODEL="gpt-4o"
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
### Option B: DeepSeek
|
||||
|
||||
```powershell
|
||||
$env:CLAUDE_CODE_USE_OPENAI="1"
|
||||
$env:OPENAI_API_KEY="sk-your-key-here"
|
||||
$env:OPENAI_BASE_URL="https://api.deepseek.com/v1"
|
||||
$env:OPENAI_MODEL="deepseek-v4-flash"
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
Use `deepseek-v4-pro` when you want the stronger model. `deepseek-chat` and `deepseek-reasoner` still work as DeepSeek's legacy API aliases.
|
||||
|
||||
### Option C: Ollama
|
||||
|
||||
Install Ollama first from:
|
||||
|
||||
- `https://ollama.com/download/windows`
|
||||
|
||||
Then run:
|
||||
|
||||
```powershell
|
||||
ollama pull llama3.1:8b
|
||||
|
||||
$env:CLAUDE_CODE_USE_OPENAI="1"
|
||||
$env:OPENAI_BASE_URL="http://localhost:11434/v1"
|
||||
$env:OPENAI_MODEL="llama3.1:8b"
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
No API key is needed for Ollama local models.
|
||||
|
||||
### Option D: LM Studio
|
||||
|
||||
Install LM Studio first from:
|
||||
|
||||
- `https://lmstudio.ai/`
|
||||
|
||||
Then in LM Studio:
|
||||
|
||||
1. Download a model (e.g., Llama 3.1 8B, Mistral 7B)
|
||||
2. Go to the "Developer" tab
|
||||
3. Select your model and enable the server via the toggle
|
||||
|
||||
Then run:
|
||||
|
||||
```powershell
|
||||
$env:CLAUDE_CODE_USE_OPENAI="1"
|
||||
$env:OPENAI_BASE_URL="http://localhost:1234/v1"
|
||||
$env:OPENAI_MODEL="your-model-name"
|
||||
# $env:OPENAI_API_KEY="lmstudio" # optional: some users need a dummy key
|
||||
|
||||
openclaude
|
||||
```
|
||||
|
||||
Replace `your-model-name` with the model name shown in LM Studio.
|
||||
|
||||
No API key is needed for LM Studio local models (but uncomment the `OPENAI_API_KEY` line if you hit auth errors).
|
||||
|
||||
## 4. If `openclaude` Is Not Found
|
||||
|
||||
Close PowerShell, open a new one, and try again:
|
||||
|
||||
```powershell
|
||||
openclaude
|
||||
```
|
||||
|
||||
## 5. If Your Provider Fails
|
||||
|
||||
Check the basics:
|
||||
|
||||
### For OpenAI or DeepSeek
|
||||
|
||||
- make sure the key is real
|
||||
- make sure you copied it fully
|
||||
|
||||
### For Ollama
|
||||
|
||||
- make sure Ollama is installed
|
||||
- make sure Ollama is running
|
||||
- make sure the model was pulled successfully
|
||||
|
||||
### For LM Studio
|
||||
|
||||
- make sure LM Studio is installed
|
||||
- make sure LM Studio is running
|
||||
- make sure the server is enabled (toggle on in the "Developer" tab)
|
||||
- make sure a model is loaded in LM Studio
|
||||
- make sure the model name matches what you set in `OPENAI_MODEL`
|
||||
|
||||
## 6. Updating OpenClaude
|
||||
|
||||
```powershell
|
||||
npm install -g @gitlawb/openclaude@latest
|
||||
```
|
||||
|
||||
## 7. Uninstalling OpenClaude
|
||||
|
||||
```powershell
|
||||
npm uninstall -g @gitlawb/openclaude
|
||||
```
|
||||
|
||||
## Need Advanced Setup?
|
||||
|
||||
Use:
|
||||
|
||||
- [Advanced Setup](advanced-setup.md)
|
||||
178
package.json
178
package.json
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@gitlawb/openclaude",
|
||||
"version": "0.1.6",
|
||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"version": "0.7.0",
|
||||
"description": "OpenClaude opens coding-agent workflows to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
"openclaude": "./bin/openclaude"
|
||||
@@ -21,6 +21,7 @@
|
||||
"dev:gemini": "bun run scripts/provider-launch.ts gemini",
|
||||
"dev:ollama": "bun run scripts/provider-launch.ts ollama",
|
||||
"dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare",
|
||||
"dev:atomic-chat": "bun run scripts/provider-launch.ts atomic-chat",
|
||||
"profile:init": "bun run scripts/provider-bootstrap.ts",
|
||||
"profile:recommend": "bun run scripts/provider-recommend.ts",
|
||||
"profile:auto": "bun run scripts/provider-recommend.ts --apply",
|
||||
@@ -29,10 +30,18 @@
|
||||
"profile:code": "bun run profile:init -- --provider ollama --model qwen2.5-coder:7b",
|
||||
"dev:fast": "bun run profile:fast && bun run dev:ollama:fast",
|
||||
"dev:code": "bun run profile:code && bun run dev:profile",
|
||||
"dev:grpc": "bun run scripts/start-grpc.ts",
|
||||
"dev:grpc:cli": "bun run scripts/grpc-cli.ts",
|
||||
"start": "node dist/cli.mjs",
|
||||
"test:provider-recommendation": "node --test --experimental-strip-types src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts",
|
||||
"test": "bun test",
|
||||
"test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-dir=coverage --max-concurrency=1 && bun run scripts/render-coverage-heatmap.ts",
|
||||
"test:coverage:ui": "bun run scripts/render-coverage-heatmap.ts",
|
||||
"security:pr-scan": "bun run scripts/pr-intent-scan.ts",
|
||||
"test:provider-recommendation": "bun test src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"smoke": "bun run build && node dist/cli.mjs --version",
|
||||
"verify:privacy": "bun run scripts/verify-no-phone-home.ts",
|
||||
"build:verified": "bun run build && bun run verify:privacy",
|
||||
"test:provider": "bun test src/services/api/*.test.ts src/utils/context.test.ts",
|
||||
"doctor:runtime": "bun run scripts/system-check.ts",
|
||||
"doctor:runtime:json": "bun run scripts/system-check.ts --json",
|
||||
@@ -42,89 +51,97 @@
|
||||
"prepack": "npm run build"
|
||||
},
|
||||
"dependencies": {
|
||||
"@alcalzone/ansi-tokenize": "^0.3.0",
|
||||
"@anthropic-ai/bedrock-sdk": "^0.26.0",
|
||||
"@anthropic-ai/foundry-sdk": "^0.2.0",
|
||||
"@anthropic-ai/sandbox-runtime": "^0.0.46",
|
||||
"@anthropic-ai/sdk": "^0.81.0",
|
||||
"@anthropic-ai/vertex-sdk": "^0.14.0",
|
||||
"@commander-js/extra-typings": "^12.0.0",
|
||||
"@growthbook/growthbook": "^1.3.0",
|
||||
"@modelcontextprotocol/sdk": "^1.12.0",
|
||||
"@opentelemetry/api": "^1.9.1",
|
||||
"@opentelemetry/api-logs": "^0.214.0",
|
||||
"@opentelemetry/core": "^2.6.1",
|
||||
"@opentelemetry/exporter-logs-otlp-http": "^0.214.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "^0.57.0",
|
||||
"@opentelemetry/resources": "^2.6.1",
|
||||
"@opentelemetry/sdk-logs": "^0.214.0",
|
||||
"@opentelemetry/sdk-metrics": "^2.6.1",
|
||||
"@opentelemetry/sdk-trace-base": "^2.6.1",
|
||||
"@opentelemetry/sdk-trace-node": "^2.6.1",
|
||||
"@opentelemetry/semantic-conventions": "^1.40.0",
|
||||
"ajv": "^8.17.0",
|
||||
"auto-bind": "^5.0.1",
|
||||
"axios": "^1.14.0",
|
||||
"bidi-js": "^1.0.3",
|
||||
"chalk": "^5.4.0",
|
||||
"chokidar": "^4.0.0",
|
||||
"cli-boxes": "^3.0.0",
|
||||
"cli-highlight": "^2.1.0",
|
||||
"code-excerpt": "^4.0.0",
|
||||
"commander": "^12.0.0",
|
||||
"diff": "^7.0.0",
|
||||
"emoji-regex": "^10.4.0",
|
||||
"env-paths": "^3.0.0",
|
||||
"execa": "^9.5.0",
|
||||
"fflate": "^0.8.2",
|
||||
"figures": "^6.1.0",
|
||||
"fuse.js": "^7.1.0",
|
||||
"get-east-asian-width": "^1.3.0",
|
||||
"google-auth-library": "^9.15.0",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ignore": "^7.0.0",
|
||||
"indent-string": "^5.0.0",
|
||||
"jsonc-parser": "^3.3.1",
|
||||
"lodash-es": "^4.17.21",
|
||||
"lru-cache": "^11.0.0",
|
||||
"marked": "^15.0.0",
|
||||
"p-map": "^7.0.3",
|
||||
"picomatch": "^4.0.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"qrcode": "^1.5.4",
|
||||
"react": "^19.2.4",
|
||||
"react-compiler-runtime": "^1.0.0",
|
||||
"react-reconciler": "^0.33.0",
|
||||
"semver": "^7.6.3",
|
||||
"shell-quote": "^1.8.2",
|
||||
"signal-exit": "^4.1.0",
|
||||
"stack-utils": "^2.0.6",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"supports-hyperlinks": "^3.1.0",
|
||||
"tree-kill": "^1.2.2",
|
||||
"turndown": "^7.2.0",
|
||||
"type-fest": "^4.30.0",
|
||||
"undici": "^7.3.0",
|
||||
"usehooks-ts": "^3.1.1",
|
||||
"vscode-languageserver-protocol": "^3.17.5",
|
||||
"wrap-ansi": "^9.0.0",
|
||||
"ws": "^8.18.0",
|
||||
"xss": "^1.0.15",
|
||||
"yaml": "^2.7.0",
|
||||
"zod": "^3.24.0"
|
||||
"@alcalzone/ansi-tokenize": "0.3.0",
|
||||
"@anthropic-ai/bedrock-sdk": "0.26.4",
|
||||
"@anthropic-ai/foundry-sdk": "0.2.3",
|
||||
"@anthropic-ai/sandbox-runtime": "0.0.46",
|
||||
"@anthropic-ai/sdk": "0.81.0",
|
||||
"@anthropic-ai/vertex-sdk": "0.14.4",
|
||||
"@commander-js/extra-typings": "12.1.0",
|
||||
"@growthbook/growthbook": "1.6.5",
|
||||
"@grpc/grpc-js": "^1.14.3",
|
||||
"@grpc/proto-loader": "^0.8.0",
|
||||
"@mendable/firecrawl-js": "4.18.1",
|
||||
"@modelcontextprotocol/sdk": "1.29.0",
|
||||
"@opentelemetry/api": "1.9.1",
|
||||
"@opentelemetry/api-logs": "0.214.0",
|
||||
"@opentelemetry/core": "2.6.1",
|
||||
"@opentelemetry/exporter-logs-otlp-http": "0.214.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "0.57.2",
|
||||
"@opentelemetry/resources": "2.6.1",
|
||||
"@opentelemetry/sdk-logs": "0.214.0",
|
||||
"@opentelemetry/sdk-metrics": "2.6.1",
|
||||
"@opentelemetry/sdk-trace-base": "2.6.1",
|
||||
"@opentelemetry/sdk-trace-node": "2.6.1",
|
||||
"@opentelemetry/semantic-conventions": "1.40.0",
|
||||
"@vscode/ripgrep": "^1.17.1",
|
||||
"ajv": "8.18.0",
|
||||
"auto-bind": "5.0.1",
|
||||
"axios": "1.15.0",
|
||||
"bidi-js": "1.0.3",
|
||||
"chalk": "5.6.2",
|
||||
"chokidar": "4.0.3",
|
||||
"cli-boxes": "3.0.0",
|
||||
"cli-highlight": "2.1.11",
|
||||
"code-excerpt": "4.0.0",
|
||||
"commander": "12.1.0",
|
||||
"cross-spawn": "7.0.6",
|
||||
"diff": "8.0.3",
|
||||
"duck-duck-scrape": "^2.2.7",
|
||||
"emoji-regex": "10.6.0",
|
||||
"env-paths": "3.0.0",
|
||||
"execa": "9.6.1",
|
||||
"fflate": "0.8.2",
|
||||
"figures": "6.1.0",
|
||||
"fuse.js": "7.1.0",
|
||||
"get-east-asian-width": "1.5.0",
|
||||
"google-auth-library": "9.15.1",
|
||||
"https-proxy-agent": "7.0.6",
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.18.1",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
"picomatch": "4.0.4",
|
||||
"proper-lockfile": "4.1.2",
|
||||
"qrcode": "1.5.4",
|
||||
"react": "19.2.4",
|
||||
"react-compiler-runtime": "1.0.0",
|
||||
"react-reconciler": "0.33.0",
|
||||
"semver": "7.7.4",
|
||||
"sharp": "^0.34.5",
|
||||
"shell-quote": "1.8.3",
|
||||
"signal-exit": "4.1.0",
|
||||
"stack-utils": "2.0.6",
|
||||
"strip-ansi": "7.2.0",
|
||||
"supports-hyperlinks": "3.2.0",
|
||||
"tree-kill": "1.2.2",
|
||||
"turndown": "7.2.2",
|
||||
"type-fest": "4.41.0",
|
||||
"undici": "7.24.6",
|
||||
"usehooks-ts": "3.1.1",
|
||||
"vscode-languageserver-protocol": "3.17.5",
|
||||
"wrap-ansi": "9.0.2",
|
||||
"ws": "8.20.0",
|
||||
"xss": "1.0.15",
|
||||
"yaml": "2.8.3",
|
||||
"zod": "3.25.76"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.2.0",
|
||||
"@types/node": "^25.5.0",
|
||||
"@types/react": "^19.2.14",
|
||||
"typescript": "^5.7.0"
|
||||
"@types/bun": "1.3.11",
|
||||
"@types/node": "25.5.0",
|
||||
"@types/react": "19.2.14",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "5.9.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude"
|
||||
"url": "https://github.com/Gitlawb/openclaude.git"
|
||||
},
|
||||
"keywords": [
|
||||
"claude-code",
|
||||
@@ -136,8 +153,11 @@
|
||||
"ollama",
|
||||
"gemini"
|
||||
],
|
||||
"license": "MIT",
|
||||
"license": "SEE LICENSE FILE",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"overrides": {
|
||||
"lodash-es": "4.18.1"
|
||||
}
|
||||
}
|
||||
|
||||
1
python/__init__.py
Normal file
1
python/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Python helper package for standalone provider-side utilities.
|
||||
146
python/atomic_chat_provider.py
Normal file
146
python/atomic_chat_provider.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
atomic_chat_provider.py
|
||||
-----------------------
|
||||
Adds native Atomic Chat support to openclaude.
|
||||
Lets Claude Code route requests to any locally-running model via
|
||||
Atomic Chat (Apple Silicon only) at 127.0.0.1:1337.
|
||||
|
||||
Atomic Chat exposes an OpenAI-compatible API, so messages are forwarded
|
||||
directly without translation.
|
||||
|
||||
Usage (.env):
|
||||
PREFERRED_PROVIDER=atomic-chat
|
||||
ATOMIC_CHAT_BASE_URL=http://127.0.0.1:1337
|
||||
"""
|
||||
|
||||
import httpx
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import AsyncIterator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ATOMIC_CHAT_BASE_URL = os.getenv("ATOMIC_CHAT_BASE_URL", "http://127.0.0.1:1337")
|
||||
|
||||
|
||||
def _api_url(path: str) -> str:
|
||||
return f"{ATOMIC_CHAT_BASE_URL}/v1{path}"
|
||||
|
||||
|
||||
async def check_atomic_chat_running() -> bool:
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=3.0) as client:
|
||||
resp = await client.get(_api_url("/models"))
|
||||
return resp.status_code == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
async def list_atomic_chat_models() -> list[str]:
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||
resp = await client.get(_api_url("/models"))
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
return [m["id"] for m in data.get("data", [])]
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not list Atomic Chat models: {e}")
|
||||
return []
|
||||
|
||||
|
||||
async def atomic_chat(
|
||||
model: str,
|
||||
messages: list[dict],
|
||||
system: str | None = None,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 1.0,
|
||||
) -> dict:
|
||||
chat_messages = list(messages)
|
||||
if system:
|
||||
chat_messages.insert(0, {"role": "system", "content": system})
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": chat_messages,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
resp = await client.post(_api_url("/chat/completions"), json=payload)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
choice = data.get("choices", [{}])[0]
|
||||
assistant_text = choice.get("message", {}).get("content", "")
|
||||
usage = data.get("usage", {})
|
||||
|
||||
return {
|
||||
"id": data.get("id", "msg_atomic_chat"),
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{"type": "text", "text": assistant_text}],
|
||||
"model": model,
|
||||
"stop_reason": "end_turn",
|
||||
"stop_sequence": None,
|
||||
"usage": {
|
||||
"input_tokens": usage.get("prompt_tokens", 0),
|
||||
"output_tokens": usage.get("completion_tokens", 0),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
async def atomic_chat_stream(
|
||||
model: str,
|
||||
messages: list[dict],
|
||||
system: str | None = None,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 1.0,
|
||||
) -> AsyncIterator[str]:
|
||||
chat_messages = list(messages)
|
||||
if system:
|
||||
chat_messages.insert(0, {"role": "system", "content": system})
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": chat_messages,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"stream": True,
|
||||
}
|
||||
|
||||
yield "event: message_start\n"
|
||||
yield f'data: {json.dumps({"type": "message_start", "message": {"id": "msg_atomic_chat_stream", "type": "message", "role": "assistant", "content": [], "model": model, "stop_reason": None, "usage": {"input_tokens": 0, "output_tokens": 0}}})}\n\n'
|
||||
yield "event: content_block_start\n"
|
||||
yield f'data: {json.dumps({"type": "content_block_start", "index": 0, "content_block": {"type": "text", "text": ""}})}\n\n'
|
||||
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
async with client.stream("POST", _api_url("/chat/completions"), json=payload) as resp:
|
||||
resp.raise_for_status()
|
||||
async for line in resp.aiter_lines():
|
||||
if not line or not line.startswith("data: "):
|
||||
continue
|
||||
raw = line[len("data: "):]
|
||||
if raw.strip() == "[DONE]":
|
||||
break
|
||||
try:
|
||||
chunk = json.loads(raw)
|
||||
delta = chunk.get("choices", [{}])[0].get("delta", {})
|
||||
delta_text = delta.get("content", "")
|
||||
if delta_text:
|
||||
yield "event: content_block_delta\n"
|
||||
yield f'data: {json.dumps({"type": "content_block_delta", "index": 0, "delta": {"type": "text_delta", "text": delta_text}})}\n\n'
|
||||
|
||||
finish_reason = chunk.get("choices", [{}])[0].get("finish_reason")
|
||||
if finish_reason:
|
||||
usage = chunk.get("usage", {})
|
||||
yield "event: content_block_stop\n"
|
||||
yield f'data: {json.dumps({"type": "content_block_stop", "index": 0})}\n\n'
|
||||
yield "event: message_delta\n"
|
||||
yield f'data: {json.dumps({"type": "message_delta", "delta": {"stop_reason": "end_turn", "stop_sequence": None}, "usage": {"output_tokens": usage.get("completion_tokens", 0)}})}\n\n'
|
||||
yield "event: message_stop\n"
|
||||
yield f'data: {json.dumps({"type": "message_stop"})}\n\n'
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
@@ -49,6 +49,18 @@ def normalize_ollama_model(model_name: str) -> str:
|
||||
return model_name
|
||||
|
||||
|
||||
def _extract_ollama_image_data(block: dict) -> str | None:
|
||||
source = block.get("source")
|
||||
if not isinstance(source, dict):
|
||||
return None
|
||||
if source.get("type") != "base64":
|
||||
return None
|
||||
data = source.get("data")
|
||||
if isinstance(data, str) and data:
|
||||
return data
|
||||
return None
|
||||
|
||||
|
||||
def anthropic_to_ollama_messages(messages: list[dict]) -> list[dict]:
|
||||
ollama_messages = []
|
||||
for msg in messages:
|
||||
@@ -58,15 +70,23 @@ def anthropic_to_ollama_messages(messages: list[dict]) -> list[dict]:
|
||||
ollama_messages.append({"role": role, "content": content})
|
||||
elif isinstance(content, list):
|
||||
text_parts = []
|
||||
image_parts = []
|
||||
for block in content:
|
||||
if isinstance(block, dict):
|
||||
if block.get("type") == "text":
|
||||
text_parts.append(block.get("text", ""))
|
||||
elif block.get("type") == "image":
|
||||
text_parts.append("[image]")
|
||||
image_data = _extract_ollama_image_data(block)
|
||||
if image_data:
|
||||
image_parts.append(image_data)
|
||||
else:
|
||||
text_parts.append("[image]")
|
||||
elif isinstance(block, str):
|
||||
text_parts.append(block)
|
||||
ollama_messages.append({"role": role, "content": "\n".join(text_parts)})
|
||||
ollama_message = {"role": role, "content": "\n".join(text_parts)}
|
||||
if image_parts:
|
||||
ollama_message["images"] = image_parts
|
||||
ollama_messages.append(ollama_message)
|
||||
return ollama_messages
|
||||
|
||||
|
||||
3
python/requirements.txt
Normal file
3
python/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
pytest==7.4.4
|
||||
pytest-asyncio==0.23.3
|
||||
httpx==0.25.2
|
||||
@@ -57,8 +57,8 @@ class Provider:
|
||||
@property
|
||||
def is_configured(self) -> bool:
|
||||
"""True if the provider has an API key set."""
|
||||
if self.name == "ollama":
|
||||
return True # Ollama needs no API key
|
||||
if self.name in ("ollama", "atomic-chat"):
|
||||
return True # Local providers need no API key
|
||||
return bool(self.api_key)
|
||||
|
||||
@property
|
||||
@@ -93,6 +93,7 @@ def build_default_providers() -> list[Provider]:
|
||||
big = os.getenv("BIG_MODEL", "gpt-4.1")
|
||||
small = os.getenv("SMALL_MODEL", "gpt-4.1-mini")
|
||||
ollama_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||
atomic_chat_url = os.getenv("ATOMIC_CHAT_BASE_URL", "http://127.0.0.1:1337")
|
||||
|
||||
return [
|
||||
Provider(
|
||||
@@ -111,6 +112,14 @@ def build_default_providers() -> list[Provider]:
|
||||
big_model=big if "gemini" in big else "gemini-2.5-pro",
|
||||
small_model=small if "gemini" in small else "gemini-2.0-flash",
|
||||
),
|
||||
Provider(
|
||||
name="mistral",
|
||||
ping_url="",
|
||||
api_key_env="MISTRAL_API_KEY",
|
||||
cost_per_1k_tokens=0.0001,
|
||||
big_model=big if "mistral" in big else "devstral-latest",
|
||||
small_model=small if "small" in small else "ministral-3b-latest",
|
||||
),
|
||||
Provider(
|
||||
name="ollama",
|
||||
ping_url=f"{ollama_url}/api/tags",
|
||||
@@ -119,6 +128,14 @@ def build_default_providers() -> list[Provider]:
|
||||
big_model=big if "gemini" not in big and "gpt" not in big else "llama3:8b",
|
||||
small_model=small if "gemini" not in small and "gpt" not in small else "llama3:8b",
|
||||
),
|
||||
Provider(
|
||||
name="atomic-chat",
|
||||
ping_url=f"{atomic_chat_url}/v1/models",
|
||||
api_key_env="",
|
||||
cost_per_1k_tokens=0.0, # free — local (Apple Silicon)
|
||||
big_model=big if "gemini" not in big and "gpt" not in big else "llama3:8b",
|
||||
small_model=small if "gemini" not in small and "gpt" not in small else "llama3:8b",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@@ -219,9 +236,14 @@ class SmartRouter:
|
||||
return min(available, key=lambda p: p.score(self.strategy))
|
||||
|
||||
def get_model_for_provider(
|
||||
self, provider: Provider, claude_model: str
|
||||
self,
|
||||
provider: Provider,
|
||||
claude_model: str,
|
||||
is_large_request: bool = False,
|
||||
) -> str:
|
||||
"""Map a Claude model name to the provider's actual model."""
|
||||
if is_large_request:
|
||||
return provider.big_model
|
||||
is_large = any(
|
||||
keyword in claude_model.lower()
|
||||
for keyword in ["opus", "sonnet", "large", "big"]
|
||||
@@ -280,7 +302,11 @@ class SmartRouter:
|
||||
)
|
||||
|
||||
provider = min(available, key=lambda p: p.score(self.strategy))
|
||||
model = self.get_model_for_provider(provider, claude_model)
|
||||
model = self.get_model_for_provider(
|
||||
provider,
|
||||
claude_model,
|
||||
is_large_request=large,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"SmartRouter: routing to {provider.name}/{model} "
|
||||
1
python/tests/__init__.py
Normal file
1
python/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Pytest package marker for the Python helper test suite.
|
||||
5
python/tests/conftest.py
Normal file
5
python/tests/conftest.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Make the sibling `python/` helper modules importable from this test package.
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
|
||||
130
python/tests/test_atomic_chat_provider.py
Normal file
130
python/tests/test_atomic_chat_provider.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
test_atomic_chat_provider.py
|
||||
Run: pytest python/tests/test_atomic_chat_provider.py -v
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from atomic_chat_provider import (
|
||||
atomic_chat,
|
||||
list_atomic_chat_models,
|
||||
check_atomic_chat_running,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_running_true():
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response)
|
||||
result = await check_atomic_chat_running()
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_running_false_on_exception():
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.get = AsyncMock(side_effect=Exception("refused"))
|
||||
result = await check_atomic_chat_running()
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_models_returns_ids():
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"data": [{"id": "llama-3.1-8b"}, {"id": "mistral-7b"}],
|
||||
}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response)
|
||||
models = await list_atomic_chat_models()
|
||||
assert "llama-3.1-8b" in models
|
||||
assert "mistral-7b" in models
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_models_empty_on_failure():
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.get = AsyncMock(side_effect=Exception("down"))
|
||||
models = await list_atomic_chat_models()
|
||||
assert models == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_returns_anthropic_format():
|
||||
mock_response = MagicMock()
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"id": "chatcmpl-abc123",
|
||||
"choices": [{"message": {"content": "42 is the answer."}}],
|
||||
"usage": {"prompt_tokens": 10, "completion_tokens": 8},
|
||||
}
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response)
|
||||
result = await atomic_chat(
|
||||
model="llama-3.1-8b",
|
||||
messages=[{"role": "user", "content": "What is 6*7?"}],
|
||||
)
|
||||
assert result["type"] == "message"
|
||||
assert result["role"] == "assistant"
|
||||
assert "42" in result["content"][0]["text"]
|
||||
assert result["usage"]["input_tokens"] == 10
|
||||
assert result["usage"]["output_tokens"] == 8
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_prepends_system():
|
||||
captured = {}
|
||||
|
||||
async def mock_post(url, json=None, **kwargs):
|
||||
captured.update(json or {})
|
||||
m = MagicMock()
|
||||
m.raise_for_status = MagicMock()
|
||||
m.json.return_value = {
|
||||
"id": "chatcmpl-xyz",
|
||||
"choices": [{"message": {"content": "ok"}}],
|
||||
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
||||
}
|
||||
return m
|
||||
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.post = mock_post
|
||||
await atomic_chat(
|
||||
model="llama-3.1-8b",
|
||||
messages=[{"role": "user", "content": "Hi"}],
|
||||
system="Be helpful.",
|
||||
)
|
||||
assert captured["messages"][0]["role"] == "system"
|
||||
assert "helpful" in captured["messages"][0]["content"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atomic_chat_sends_correct_payload():
|
||||
captured = {}
|
||||
|
||||
async def mock_post(url, json=None, **kwargs):
|
||||
captured.update(json or {})
|
||||
m = MagicMock()
|
||||
m.raise_for_status = MagicMock()
|
||||
m.json.return_value = {
|
||||
"id": "chatcmpl-xyz",
|
||||
"choices": [{"message": {"content": "ok"}}],
|
||||
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
||||
}
|
||||
return m
|
||||
|
||||
with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.post = mock_post
|
||||
await atomic_chat(
|
||||
model="test-model",
|
||||
messages=[{"role": "user", "content": "Test"}],
|
||||
max_tokens=2048,
|
||||
temperature=0.5,
|
||||
)
|
||||
assert captured["model"] == "test-model"
|
||||
assert captured["max_tokens"] == 2048
|
||||
assert captured["temperature"] == 0.5
|
||||
assert captured["stream"] is False
|
||||
@@ -1,6 +1,6 @@
|
||||
"""
|
||||
test_ollama_provider.py
|
||||
Run: pytest test_ollama_provider.py -v
|
||||
Run: pytest python/tests/test_ollama_provider.py -v
|
||||
"""
|
||||
|
||||
import pytest
|
||||
@@ -13,31 +13,57 @@ from ollama_provider import (
|
||||
check_ollama_running,
|
||||
)
|
||||
|
||||
|
||||
def test_normalize_strips_prefix():
|
||||
assert normalize_ollama_model("ollama/llama3:8b") == "llama3:8b"
|
||||
|
||||
|
||||
def test_normalize_no_prefix():
|
||||
assert normalize_ollama_model("codellama:34b") == "codellama:34b"
|
||||
|
||||
|
||||
def test_normalize_empty():
|
||||
assert normalize_ollama_model("") == ""
|
||||
|
||||
|
||||
def test_converts_string_content():
|
||||
messages = [{"role": "user", "content": "Hello!"}]
|
||||
result = anthropic_to_ollama_messages(messages)
|
||||
assert result == [{"role": "user", "content": "Hello!"}]
|
||||
|
||||
|
||||
def test_converts_text_block_list():
|
||||
messages = [{"role": "user", "content": [{"type": "text", "text": "What is Python?"}]}]
|
||||
result = anthropic_to_ollama_messages(messages)
|
||||
assert result[0]["content"] == "What is Python?"
|
||||
|
||||
|
||||
def test_converts_image_block_to_placeholder():
|
||||
messages = [{"role": "user", "content": [{"type": "image", "source": {}}, {"type": "text", "text": "Describe this"}]}]
|
||||
result = anthropic_to_ollama_messages(messages)
|
||||
assert "[image]" in result[0]["content"]
|
||||
assert "Describe this" in result[0]["content"]
|
||||
|
||||
|
||||
def test_converts_base64_image_block_to_ollama_images():
|
||||
messages = [{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image",
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": "image/png",
|
||||
"data": "YWJjMTIz",
|
||||
},
|
||||
},
|
||||
{"type": "text", "text": "Describe this"},
|
||||
],
|
||||
}]
|
||||
result = anthropic_to_ollama_messages(messages)
|
||||
assert result[0]["images"] == ["YWJjMTIz"]
|
||||
assert "Describe this" in result[0]["content"]
|
||||
|
||||
def test_converts_multi_turn():
|
||||
messages = [
|
||||
{"role": "user", "content": "Hi"},
|
||||
@@ -48,6 +74,7 @@ def test_converts_multi_turn():
|
||||
assert len(result) == 3
|
||||
assert result[1]["role"] == "assistant"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_running_true():
|
||||
mock_response = MagicMock()
|
||||
@@ -57,6 +84,7 @@ async def test_ollama_running_true():
|
||||
result = await check_ollama_running()
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_running_false_on_exception():
|
||||
with patch("ollama_provider.httpx.AsyncClient") as MockClient:
|
||||
@@ -64,6 +92,7 @@ async def test_ollama_running_false_on_exception():
|
||||
result = await check_ollama_running()
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_models_returns_names():
|
||||
mock_response = MagicMock()
|
||||
@@ -75,6 +104,7 @@ async def test_list_models_returns_names():
|
||||
models = await list_ollama_models()
|
||||
assert "llama3:8b" in models
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_chat_returns_anthropic_format():
|
||||
mock_response = MagicMock()
|
||||
@@ -95,9 +125,11 @@ async def test_ollama_chat_returns_anthropic_format():
|
||||
assert result["role"] == "assistant"
|
||||
assert "42" in result["content"][0]["text"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_chat_prepends_system():
|
||||
captured = {}
|
||||
|
||||
async def mock_post(url, json=None, **kwargs):
|
||||
captured.update(json or {})
|
||||
m = MagicMock()
|
||||
@@ -114,7 +146,47 @@ async def test_ollama_chat_prepends_system():
|
||||
await ollama_chat(
|
||||
model="llama3:8b",
|
||||
messages=[{"role": "user", "content": "Hi"}],
|
||||
system="Be helpful."
|
||||
system="Be helpful.",
|
||||
)
|
||||
assert captured["messages"][0]["role"] == "system"
|
||||
assert "helpful" in captured["messages"][0]["content"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ollama_chat_includes_base64_images_in_payload():
|
||||
captured = {}
|
||||
|
||||
async def mock_post(url, json=None, **kwargs):
|
||||
captured.update(json or {})
|
||||
m = MagicMock()
|
||||
m.raise_for_status = MagicMock()
|
||||
m.json.return_value = {
|
||||
"message": {"content": "ok"},
|
||||
"created_at": "",
|
||||
"prompt_eval_count": 1,
|
||||
"eval_count": 1,
|
||||
}
|
||||
return m
|
||||
|
||||
with patch("ollama_provider.httpx.AsyncClient") as MockClient:
|
||||
MockClient.return_value.__aenter__.return_value.post = mock_post
|
||||
await ollama_chat(
|
||||
model="llama3:8b",
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image",
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": "image/jpeg",
|
||||
"data": "ZHVtbXk=",
|
||||
},
|
||||
},
|
||||
{"type": "text", "text": "What is in this image?"},
|
||||
],
|
||||
}],
|
||||
)
|
||||
|
||||
assert captured["messages"][0]["images"] == ["ZHVtbXk="]
|
||||
assert "What is in this image?" in captured["messages"][0]["content"]
|
||||
@@ -2,7 +2,7 @@
|
||||
test_smart_router.py
|
||||
--------------------
|
||||
Tests for the SmartRouter.
|
||||
Run: pytest test_smart_router.py -v
|
||||
Run: pytest python/tests/test_smart_router.py -v
|
||||
"""
|
||||
|
||||
import pytest
|
||||
@@ -13,6 +13,12 @@ from smart_router import SmartRouter, Provider
|
||||
|
||||
# ── Fixtures ──────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def fake_api_key(monkeypatch):
|
||||
monkeypatch.setenv("FAKE_KEY", "test-key")
|
||||
|
||||
|
||||
def make_provider(name, healthy=True, configured=True,
|
||||
latency=100.0, cost=0.002, errors=0, requests=0):
|
||||
p = Provider(
|
||||
@@ -28,7 +34,7 @@ def make_provider(name, healthy=True, configured=True,
|
||||
p.error_count = errors
|
||||
p.request_count = requests
|
||||
if not configured:
|
||||
p.api_key_env = "" # makes is_configured False for non-ollama
|
||||
p.api_key_env = "" # makes is_configured False for non-local providers
|
||||
return p
|
||||
|
||||
|
||||
@@ -122,6 +128,13 @@ def test_get_model_large_request():
|
||||
assert model == "openai-big"
|
||||
|
||||
|
||||
def test_get_model_large_message_overrides_claude_label():
|
||||
p = make_provider("openai")
|
||||
r = make_router()
|
||||
model = r.get_model_for_provider(p, "claude-haiku", is_large_request=True)
|
||||
assert model == "openai-big"
|
||||
|
||||
|
||||
def test_get_model_small_request():
|
||||
p = make_provider("openai")
|
||||
r = make_router()
|
||||
@@ -140,6 +153,16 @@ async def test_route_returns_best_provider():
|
||||
assert result["provider"] == "cheap"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_route_uses_big_model_for_large_message_bodies():
|
||||
p = make_provider("openai")
|
||||
r = make_router(providers=[p])
|
||||
result = await r.route([
|
||||
{"role": "user", "content": "x" * 3001},
|
||||
], "claude-haiku")
|
||||
assert result["model"] == "openai-big"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_route_raises_when_no_providers():
|
||||
p = make_provider("a", healthy=False)
|
||||
11
release-please-config.json
Normal file
11
release-please-config.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
|
||||
"packages": {
|
||||
".": {
|
||||
"release-type": "node",
|
||||
"package-name": "@gitlawb/openclaude",
|
||||
"bump-minor-pre-major": true,
|
||||
"include-v-in-tag": true
|
||||
}
|
||||
}
|
||||
}
|
||||
287
scripts/build.ts
287
scripts/build.ts
@@ -3,44 +3,122 @@
|
||||
* distributable JS file using Bun's bundler.
|
||||
*
|
||||
* Handles:
|
||||
* - bun:bundle feature() flags → all false (disables internal-only features)
|
||||
* - bun:bundle feature() flags for the open build
|
||||
* - MACRO.* globals → inlined version/build-time constants
|
||||
* - src/ path aliases
|
||||
*/
|
||||
|
||||
import { readFileSync } from 'fs'
|
||||
import { readFileSync, readdirSync, writeFileSync } from 'fs'
|
||||
import { join } from 'path'
|
||||
import { noTelemetryPlugin } from './no-telemetry-plugin'
|
||||
|
||||
const pkg = JSON.parse(readFileSync('./package.json', 'utf-8'))
|
||||
const version = pkg.version
|
||||
|
||||
// Feature flags — all disabled for the open build.
|
||||
// These gate Anthropic-internal features (voice, proactive, kairos, etc.)
|
||||
// Feature flags for the open build.
|
||||
// Most Anthropic-internal features stay off; open-build features can be
|
||||
// selectively enabled here when their full source exists in the mirror.
|
||||
const featureFlags: Record<string, boolean> = {
|
||||
VOICE_MODE: false,
|
||||
PROACTIVE: false,
|
||||
KAIROS: false,
|
||||
BRIDGE_MODE: false,
|
||||
DAEMON: false,
|
||||
AGENT_TRIGGERS: false,
|
||||
MONITOR_TOOL: false,
|
||||
ABLATION_BASELINE: false,
|
||||
DUMP_SYSTEM_PROMPT: false,
|
||||
CACHED_MICROCOMPACT: false,
|
||||
COORDINATOR_MODE: false,
|
||||
CONTEXT_COLLAPSE: false,
|
||||
COMMIT_ATTRIBUTION: false,
|
||||
TEAMMEM: false,
|
||||
UDS_INBOX: false,
|
||||
BG_SESSIONS: false,
|
||||
AWAY_SUMMARY: false,
|
||||
TRANSCRIPT_CLASSIFIER: false,
|
||||
WEB_BROWSER_TOOL: false,
|
||||
MESSAGE_ACTIONS: false,
|
||||
BUDDY: false,
|
||||
CHICAGO_MCP: false,
|
||||
COWORKER_TYPE_TELEMETRY: false,
|
||||
// ── Disabled: require Anthropic infrastructure or missing source ─────
|
||||
VOICE_MODE: false, // Push-to-talk STT via claude.ai OAuth endpoint
|
||||
PROACTIVE: false, // Autonomous agent mode (missing proactive/ module)
|
||||
KAIROS: false, // Persistent assistant/session mode (cloud backend)
|
||||
BRIDGE_MODE: false, // Remote desktop bridge via CCR infrastructure
|
||||
DAEMON: false, // Background daemon process (stubbed in open build)
|
||||
AGENT_TRIGGERS: false, // Scheduled remote agent triggers
|
||||
ABLATION_BASELINE: false, // A/B testing harness for eval experiments
|
||||
CONTEXT_COLLAPSE: false, // Context collapsing optimization (stubbed)
|
||||
COMMIT_ATTRIBUTION: false, // Co-Authored-By metadata in git commits
|
||||
UDS_INBOX: false, // Unix Domain Socket inter-session messaging
|
||||
BG_SESSIONS: false, // Background sessions via tmux (stubbed)
|
||||
WEB_BROWSER_TOOL: false, // Built-in browser automation (source not mirrored)
|
||||
CHICAGO_MCP: false, // Computer-use MCP (native Swift modules stubbed)
|
||||
COWORKER_TYPE_TELEMETRY: false, // Telemetry for agent/coworker type classification
|
||||
MCP_SKILLS: false, // Dynamic MCP skill discovery (src/skills/mcpSkills.ts not mirrored; enabling this causes "fetchMcpSkillsForClient is not a function" when MCP servers with resources connect — see #856)
|
||||
|
||||
// ── Enabled: upstream defaults ──────────────────────────────────────
|
||||
COORDINATOR_MODE: true, // Multi-agent coordinator with worker delegation
|
||||
BUILTIN_EXPLORE_PLAN_AGENTS: true, // Built-in Explore/Plan specialized subagents
|
||||
BUDDY: true, // Buddy mode for paired programming
|
||||
MONITOR_TOOL: true, // MCP server monitoring/streaming tool
|
||||
TEAMMEM: true, // Team memory management
|
||||
MESSAGE_ACTIONS: true, // Message action buttons in the UI
|
||||
|
||||
// ── Enabled: new activations ────────────────────────────────────────
|
||||
DUMP_SYSTEM_PROMPT: true, // --dump-system-prompt CLI flag for debugging
|
||||
CACHED_MICROCOMPACT: true, // Cache-aware tool result truncation optimization
|
||||
AWAY_SUMMARY: true, // "While you were away" recap after 5min blur
|
||||
TRANSCRIPT_CLASSIFIER: true, // Auto-approval classifier for safe tool uses
|
||||
ULTRATHINK: true, // Deep thinking mode — type "ultrathink" to boost reasoning
|
||||
TOKEN_BUDGET: true, // Token budget tracking with usage warnings
|
||||
HISTORY_PICKER: true, // Enhanced interactive prompt history picker
|
||||
QUICK_SEARCH: true, // Ctrl+G quick search across prompts
|
||||
SHOT_STATS: true, // Shot distribution stats in session summary
|
||||
EXTRACT_MEMORIES: true, // Auto-extract durable memories from conversations
|
||||
FORK_SUBAGENT: true, // Implicit context-forking when omitting subagent_type
|
||||
VERIFICATION_AGENT: true, // Built-in read-only agent for test/verification
|
||||
PROMPT_CACHE_BREAK_DETECTION: true, // Detect & log unexpected prompt cache invalidations
|
||||
HOOK_PROMPTS: true, // Allow tools to request interactive user prompts
|
||||
}
|
||||
|
||||
// ── Pre-process: replace feature() calls with boolean literals ──────
|
||||
// Bun v1.3.9+ resolves `import { feature } from 'bun:bundle'` natively
|
||||
// before plugins can intercept it via onResolve. The bun: namespace is
|
||||
// handled by Bun's C++ resolver which runs before the JS plugin phase,
|
||||
// so the previous onResolve/onLoad shim was silently ineffective — ALL
|
||||
// feature() calls evaluated to false regardless of the featureFlags map.
|
||||
//
|
||||
// Fix: pre-process source files to strip the bun:bundle import and
|
||||
// replace feature('FLAG') calls with their boolean literal. Files are
|
||||
// modified in-place before Bun.build() and restored in a finally block.
|
||||
|
||||
// Match feature('FLAG') calls, including multi-line: feature(\n 'FLAG',\n)
|
||||
const featureCallRe = /\bfeature\(\s*['"](\w+)['"][,\s]*\)/gs
|
||||
const featureImportRe = /import\s*\{[^}]*\bfeature\b[^}]*\}\s*from\s*['"]bun:bundle['"];?\s*\n?/g
|
||||
const modifiedFiles = new Map<string, string>() // path → original content
|
||||
|
||||
function preProcessFeatureFlags(dir: string) {
|
||||
for (const ent of readdirSync(dir, { withFileTypes: true })) {
|
||||
const full = join(dir, ent.name)
|
||||
if (ent.isDirectory()) { preProcessFeatureFlags(full); continue }
|
||||
if (!/\.(ts|tsx)$/.test(ent.name)) continue
|
||||
|
||||
const raw = readFileSync(full, 'utf-8')
|
||||
if (!raw.includes('feature(')) continue
|
||||
|
||||
let contents = raw
|
||||
contents = contents.replace(featureImportRe, '')
|
||||
contents = contents.replace(featureCallRe, (_match, name) =>
|
||||
String((featureFlags as Record<string, boolean>)[name] ?? false),
|
||||
)
|
||||
|
||||
if (contents !== raw) {
|
||||
modifiedFiles.set(full, raw)
|
||||
writeFileSync(full, contents)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function restoreModifiedFiles() {
|
||||
for (const [path, original] of modifiedFiles) {
|
||||
writeFileSync(path, original)
|
||||
}
|
||||
modifiedFiles.clear()
|
||||
}
|
||||
|
||||
preProcessFeatureFlags(join(import.meta.dir, '..', 'src'))
|
||||
const numModified = modifiedFiles.size
|
||||
|
||||
// Restore source files on abrupt termination (Ctrl+C, kill, etc.)
|
||||
for (const signal of ['SIGINT', 'SIGTERM'] as const) {
|
||||
process.on(signal, () => {
|
||||
restoreModifiedFiles()
|
||||
process.exit(signal === 'SIGINT' ? 130 : 143)
|
||||
})
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
const result = await Bun.build({
|
||||
entrypoints: ['./src/entrypoints/cli.tsx'],
|
||||
outdir: './dist',
|
||||
@@ -64,6 +142,7 @@ const result = await Bun.build({
|
||||
'MACRO.NATIVE_PACKAGE_URL': 'undefined',
|
||||
},
|
||||
plugins: [
|
||||
noTelemetryPlugin,
|
||||
{
|
||||
name: 'bun-bundle-shim',
|
||||
setup(build) {
|
||||
@@ -100,18 +179,11 @@ export async function handleBgFlag() { throw new Error("Background sessions are
|
||||
],
|
||||
] as const)
|
||||
|
||||
// Resolve `import { feature } from 'bun:bundle'` to a shim
|
||||
build.onResolve({ filter: /^bun:bundle$/ }, () => ({
|
||||
path: 'bun:bundle',
|
||||
namespace: 'bun-bundle-shim',
|
||||
}))
|
||||
build.onLoad(
|
||||
{ filter: /.*/, namespace: 'bun-bundle-shim' },
|
||||
() => ({
|
||||
contents: `export function feature(name) { return false; }`,
|
||||
loader: 'js',
|
||||
}),
|
||||
)
|
||||
// bun:bundle feature() replacement is handled by the source
|
||||
// pre-processing step above (see preProcessFeatureFlags).
|
||||
// The previous onResolve/onLoad shim was ineffective in Bun
|
||||
// v1.3.9+ because the bun: namespace is resolved natively
|
||||
// before the JS plugin phase runs.
|
||||
|
||||
build.onResolve(
|
||||
{ filter: /^\.\.\/(daemon\/workerRegistry|daemon\/main|cli\/bg|cli\/handlers\/templateJobs|environment-runner\/main|self-hosted-runner\/main)\.js$/ },
|
||||
@@ -156,7 +228,6 @@ export async function handleBgFlag() { throw new Error("Background sessions are
|
||||
'modifiers-napi',
|
||||
'url-handler-napi',
|
||||
'color-diff-napi',
|
||||
'sharp',
|
||||
'@anthropic-ai/mcpb',
|
||||
'@ant/claude-for-chrome-mcp',
|
||||
'@anthropic-ai/sandbox-runtime',
|
||||
@@ -249,6 +320,125 @@ export const SeverityNumber = {};
|
||||
loader: 'js',
|
||||
}),
|
||||
)
|
||||
|
||||
// Pre-scan: find all missing modules that need stubbing
|
||||
// (Bun's onResolve corrupts module graph even when returning null,
|
||||
// so we use exact-match resolvers instead of catch-all patterns)
|
||||
const fs = require('fs')
|
||||
const pathMod = require('path')
|
||||
const srcDir = pathMod.resolve(__dirname, '..', 'src')
|
||||
const missingModules = new Set<string>()
|
||||
const missingModuleExports = new Map<string, Set<string>>()
|
||||
|
||||
// Known missing external packages
|
||||
for (const pkg of [
|
||||
'@ant/computer-use-mcp',
|
||||
'@ant/computer-use-mcp/sentinelApps',
|
||||
'@ant/computer-use-mcp/types',
|
||||
'@ant/computer-use-swift',
|
||||
'@ant/computer-use-input',
|
||||
]) {
|
||||
missingModules.add(pkg)
|
||||
}
|
||||
|
||||
// Scan source to find imports that can't resolve
|
||||
function scanForMissingImports() {
|
||||
function checkAndRegister(specifier: string, fileDir: string, namedPart: string) {
|
||||
const names = namedPart.split(',')
|
||||
.map((s: string) => s.trim().replace(/^type\s+/, ''))
|
||||
.filter((s: string) => s && !s.startsWith('type '))
|
||||
|
||||
// Check src/tasks/ non-relative imports
|
||||
if (specifier.startsWith('src/tasks/')) {
|
||||
const resolved = pathMod.resolve(__dirname, '..', specifier)
|
||||
const candidates = [
|
||||
resolved,
|
||||
`${resolved}.ts`, `${resolved}.tsx`,
|
||||
resolved.replace(/\.js$/, '.ts'), resolved.replace(/\.js$/, '.tsx'),
|
||||
pathMod.join(resolved, 'index.ts'), pathMod.join(resolved, 'index.tsx'),
|
||||
]
|
||||
if (!candidates.some((c: string) => fs.existsSync(c))) {
|
||||
missingModules.add(specifier)
|
||||
}
|
||||
}
|
||||
// Check relative .js imports
|
||||
else if (specifier.endsWith('.js') && (specifier.startsWith('./') || specifier.startsWith('../'))) {
|
||||
const resolved = pathMod.resolve(fileDir, specifier)
|
||||
const tsVariant = resolved.replace(/\.js$/, '.ts')
|
||||
const tsxVariant = resolved.replace(/\.js$/, '.tsx')
|
||||
if (!fs.existsSync(resolved) && !fs.existsSync(tsVariant) && !fs.existsSync(tsxVariant)) {
|
||||
missingModules.add(specifier)
|
||||
}
|
||||
}
|
||||
|
||||
// Track named exports for missing modules
|
||||
if (names.length > 0) {
|
||||
if (!missingModuleExports.has(specifier)) missingModuleExports.set(specifier, new Set())
|
||||
for (const n of names) missingModuleExports.get(specifier)!.add(n)
|
||||
}
|
||||
}
|
||||
|
||||
function walk(dir: string) {
|
||||
for (const ent of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
const full = pathMod.join(dir, ent.name)
|
||||
if (ent.isDirectory()) { walk(full); continue }
|
||||
if (!/\.(ts|tsx)$/.test(ent.name)) continue
|
||||
const rawCode: string = fs.readFileSync(full, 'utf-8')
|
||||
const fileDir = pathMod.dirname(full)
|
||||
|
||||
// Strip comments before scanning for imports/requires.
|
||||
// The regex scanner matches require()/import() patterns
|
||||
// inside JSDoc comments, causing false-positive missing
|
||||
// module detection that breaks the build with noop stubs.
|
||||
const code = rawCode
|
||||
.replace(/\/\*[\s\S]*?\*\//g, '') // block comments
|
||||
.replace(/\/\/.*$/gm, '') // line comments
|
||||
|
||||
// Collect static imports: import { X } from '...'
|
||||
for (const m of code.matchAll(/import\s+(?:\{([^}]*)\}|(\w+))?\s*(?:,\s*\{([^}]*)\})?\s*from\s+['"](.*?)['"]/g)) {
|
||||
checkAndRegister(m[4], fileDir, m[1] || m[3] || '')
|
||||
}
|
||||
|
||||
// Collect dynamic requires: require('...') — these are used
|
||||
// behind feature() gates and become live when flags are enabled.
|
||||
for (const m of code.matchAll(/require\(\s*['"](\.\.?\/[^'"]+)['"]\s*\)/g)) {
|
||||
checkAndRegister(m[1], fileDir, '')
|
||||
}
|
||||
|
||||
// Collect dynamic imports: import('...')
|
||||
for (const m of code.matchAll(/import\(\s*['"](\.\.?\/[^'"]+)['"]\s*\)/g)) {
|
||||
checkAndRegister(m[1], fileDir, '')
|
||||
}
|
||||
}
|
||||
}
|
||||
walk(srcDir)
|
||||
}
|
||||
scanForMissingImports()
|
||||
|
||||
// Register exact-match resolvers for each missing module
|
||||
for (const mod of missingModules) {
|
||||
const escaped = mod.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
|
||||
build.onResolve({ filter: new RegExp(`^${escaped}$`) }, () => ({
|
||||
path: mod,
|
||||
namespace: 'missing-module-stub',
|
||||
}))
|
||||
}
|
||||
|
||||
build.onLoad(
|
||||
{ filter: /.*/, namespace: 'missing-module-stub' },
|
||||
(args) => {
|
||||
const names = missingModuleExports.get(args.path) ?? new Set()
|
||||
const exports = [...names].map(n => `export const ${n} = noop;`).join('\n')
|
||||
return {
|
||||
contents: `
|
||||
const noop = () => null;
|
||||
export default noop;
|
||||
${exports}
|
||||
`,
|
||||
loader: 'js',
|
||||
}
|
||||
},
|
||||
)
|
||||
},
|
||||
},
|
||||
],
|
||||
@@ -273,6 +463,8 @@ export const SeverityNumber = {};
|
||||
'@opentelemetry/sdk-logs',
|
||||
'@opentelemetry/sdk-metrics',
|
||||
'@opentelemetry/semantic-conventions',
|
||||
// Native image processing
|
||||
'sharp',
|
||||
// Cloud provider SDKs
|
||||
'@aws-sdk/client-bedrock',
|
||||
'@aws-sdk/client-bedrock-runtime',
|
||||
@@ -280,6 +472,11 @@ export const SeverityNumber = {};
|
||||
'@aws-sdk/credential-providers',
|
||||
'@azure/identity',
|
||||
'google-auth-library',
|
||||
// @vscode/ripgrep ships a platform-specific binary alongside its
|
||||
// index.js and resolves the path via __dirname at runtime. Bundling
|
||||
// would freeze the build host's absolute path into dist/cli.mjs, so we
|
||||
// keep it external and rely on the npm package being installed.
|
||||
'@vscode/ripgrep',
|
||||
],
|
||||
})
|
||||
|
||||
@@ -288,7 +485,13 @@ if (!result.success) {
|
||||
for (const log of result.logs) {
|
||||
console.error(log)
|
||||
}
|
||||
process.exit(1)
|
||||
process.exitCode = 1
|
||||
} else {
|
||||
console.log(`✓ Built openclaude v${version} → dist/cli.mjs`)
|
||||
}
|
||||
|
||||
console.log(`✓ Built openclaude v${version} → dist/cli.mjs`)
|
||||
} finally {
|
||||
// Always restore source files, even if Bun.build() throws
|
||||
restoreModifiedFiles()
|
||||
console.log(` 🔄 feature-flags: pre-processed ${numModified} files (restored)`)
|
||||
}
|
||||
|
||||
47
scripts/feature-flags-source-guard.test.ts
Normal file
47
scripts/feature-flags-source-guard.test.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { existsSync, readFileSync } from 'fs'
|
||||
import { join } from 'path'
|
||||
import { expect, test } from 'bun:test'
|
||||
|
||||
// Regression guard for #856. Several build feature flags require source files
|
||||
// that are not mirrored into the open build. When such a flag is set to `true`
|
||||
// without the source present, the bundler falls back to a missing-module stub
|
||||
// that only exports `default`, which causes runtime errors like
|
||||
// `fetchMcpSkillsForClient is not a function` when downstream code reaches
|
||||
// through the `require()` to a named export.
|
||||
//
|
||||
// This test fails fast at test-time if someone re-enables one of these flags
|
||||
// without first mirroring the corresponding source file.
|
||||
|
||||
const BUILD_SCRIPT = join(import.meta.dir, 'build.ts')
|
||||
const REPO_ROOT = join(import.meta.dir, '..')
|
||||
|
||||
type FlagGuard = {
|
||||
flag: string
|
||||
source: string // path relative to repo root
|
||||
}
|
||||
|
||||
const FLAG_REQUIRES_SOURCE: FlagGuard[] = [
|
||||
{ flag: 'MCP_SKILLS', source: 'src/skills/mcpSkills.ts' },
|
||||
]
|
||||
|
||||
test('build feature flags are not enabled without their source files', () => {
|
||||
const buildScript = readFileSync(BUILD_SCRIPT, 'utf-8')
|
||||
|
||||
for (const { flag, source } of FLAG_REQUIRES_SOURCE) {
|
||||
const enabledRe = new RegExp(`^\\s*${flag}\\s*:\\s*true\\b`, 'm')
|
||||
const isEnabled = enabledRe.test(buildScript)
|
||||
const sourceExists = existsSync(join(REPO_ROOT, source))
|
||||
|
||||
if (isEnabled && !sourceExists) {
|
||||
throw new Error(
|
||||
`Feature flag ${flag} is enabled in scripts/build.ts, but its required source file "${source}" does not exist. ` +
|
||||
`Enabling this flag without the source will cause runtime errors (missing named exports from the missing-module stub). ` +
|
||||
`Either mirror the source file or set ${flag}: false.`,
|
||||
)
|
||||
}
|
||||
|
||||
// When the source IS present, the flag can be either true or false; either
|
||||
// is fine. We only care about the "enabled but missing" combination.
|
||||
expect(true).toBe(true)
|
||||
}
|
||||
})
|
||||
121
scripts/grpc-cli.ts
Normal file
121
scripts/grpc-cli.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
import * as grpc from '@grpc/grpc-js'
|
||||
import * as protoLoader from '@grpc/proto-loader'
|
||||
import path from 'path'
|
||||
import * as readline from 'readline'
|
||||
|
||||
const PROTO_PATH = path.resolve(import.meta.dirname, '../src/proto/openclaude.proto')
|
||||
|
||||
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
|
||||
keepCase: true,
|
||||
longs: String,
|
||||
enums: String,
|
||||
defaults: true,
|
||||
oneofs: true,
|
||||
})
|
||||
|
||||
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition) as any
|
||||
const openclaudeProto = protoDescriptor.openclaude.v1
|
||||
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout
|
||||
})
|
||||
|
||||
function askQuestion(query: string): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
rl.question(query, resolve)
|
||||
})
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const host = process.env.GRPC_HOST || 'localhost'
|
||||
const port = process.env.GRPC_PORT || '50051'
|
||||
const client = new openclaudeProto.AgentService(
|
||||
`${host}:${port}`,
|
||||
grpc.credentials.createInsecure()
|
||||
)
|
||||
|
||||
let call: grpc.ClientDuplexStream<any, any> | null = null
|
||||
|
||||
const startStream = () => {
|
||||
call = client.Chat()
|
||||
let textStreamed = false
|
||||
|
||||
call.on('data', async (serverMessage: any) => {
|
||||
if (serverMessage.text_chunk) {
|
||||
process.stdout.write(serverMessage.text_chunk.text)
|
||||
textStreamed = true
|
||||
} else if (serverMessage.tool_start) {
|
||||
console.log(`\n\x1b[36m[Tool Call]\x1b[0m \x1b[1m${serverMessage.tool_start.tool_name}\x1b[0m`)
|
||||
console.log(`\x1b[90m${serverMessage.tool_start.arguments_json}\x1b[0m\n`)
|
||||
} else if (serverMessage.tool_result) {
|
||||
console.log(`\n\x1b[32m[Tool Result]\x1b[0m \x1b[1m${serverMessage.tool_result.tool_name}\x1b[0m`)
|
||||
const out = serverMessage.tool_result.output
|
||||
if (out.length > 500) {
|
||||
console.log(`\x1b[90m${out.substring(0, 500)}...\n(Output truncated, total length: ${out.length})\x1b[0m`)
|
||||
} else {
|
||||
console.log(`\x1b[90m${out}\x1b[0m`)
|
||||
}
|
||||
} else if (serverMessage.action_required) {
|
||||
const action = serverMessage.action_required
|
||||
console.log(`\n\x1b[33m[Action Required]\x1b[0m`)
|
||||
const reply = await askQuestion(`\x1b[1m${action.question}\x1b[0m (y/n) > `)
|
||||
|
||||
call?.write({
|
||||
input: {
|
||||
prompt_id: action.prompt_id,
|
||||
reply: reply.trim()
|
||||
}
|
||||
})
|
||||
} else if (serverMessage.done) {
|
||||
if (!textStreamed && serverMessage.done.full_text) {
|
||||
process.stdout.write(serverMessage.done.full_text)
|
||||
}
|
||||
textStreamed = false
|
||||
console.log('\n\x1b[32m[Generation Complete]\x1b[0m')
|
||||
promptUser()
|
||||
} else if (serverMessage.error) {
|
||||
console.error(`\n\x1b[31m[Server Error]\x1b[0m ${serverMessage.error.message}`)
|
||||
promptUser()
|
||||
}
|
||||
})
|
||||
|
||||
call.on('end', () => {
|
||||
console.log('\n\x1b[90m[Stream closed by server]\x1b[0m')
|
||||
// Don't prompt user here, let 'done' or 'error' handlers do it
|
||||
})
|
||||
|
||||
call.on('error', (err: Error) => {
|
||||
console.error('\n\x1b[31m[Stream Error]\x1b[0m', err.message)
|
||||
promptUser()
|
||||
})
|
||||
}
|
||||
|
||||
const promptUser = async () => {
|
||||
const message = await askQuestion('\n\x1b[35m> \x1b[0m')
|
||||
|
||||
if (message.trim().toLowerCase() === '/exit' || message.trim().toLowerCase() === '/quit') {
|
||||
console.log('Bye!')
|
||||
rl.close()
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
if (!call || call.destroyed) {
|
||||
startStream()
|
||||
}
|
||||
|
||||
call!.write({
|
||||
request: {
|
||||
session_id: 'cli-session-1',
|
||||
message: message,
|
||||
working_directory: process.cwd()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
console.log('\x1b[32mOpenClaude gRPC CLI\x1b[0m')
|
||||
console.log('\x1b[90mType /exit to quit.\x1b[0m')
|
||||
promptUser()
|
||||
}
|
||||
|
||||
main()
|
||||
163
scripts/no-telemetry-growthbook-stub.test.ts
Normal file
163
scripts/no-telemetry-growthbook-stub.test.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
import { afterAll, beforeEach, describe, expect, test } from 'bun:test'
|
||||
import { mkdirSync, readFileSync, rmSync, unlinkSync, writeFileSync } from 'node:fs'
|
||||
import { join } from 'node:path'
|
||||
import { tmpdir } from 'node:os'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Setup: extract the growthbook stub from no-telemetry-plugin.ts, write it to
|
||||
// a temp .mjs file, and dynamically import it so we can test the real code
|
||||
// that gets bundled.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const pluginSource = readFileSync(join(__dirname, 'no-telemetry-plugin.ts'), 'utf-8')
|
||||
const stubMatch = pluginSource.match(/'services\/analytics\/growthbook': `([\s\S]*?)`/)
|
||||
if (!stubMatch) throw new Error('Could not extract growthbook stub from no-telemetry-plugin.ts')
|
||||
|
||||
const testDir = join(tmpdir(), `growthbook-stub-test-${process.pid}`)
|
||||
const stubFile = join(testDir, 'growthbook-stub.mjs')
|
||||
const flagsFile = join(testDir, 'test-flags.json')
|
||||
|
||||
mkdirSync(testDir, { recursive: true })
|
||||
writeFileSync(stubFile, stubMatch[1])
|
||||
|
||||
// Point the stub at our test flags file (checked by _loadFlags on first access)
|
||||
process.env.CLAUDE_FEATURE_FLAGS_FILE = flagsFile
|
||||
|
||||
const stub = await import(stubFile)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('growthbook stub — local feature flag overrides', () => {
|
||||
beforeEach(() => {
|
||||
stub.resetGrowthBook()
|
||||
try { unlinkSync(flagsFile) } catch { /* may not exist */ }
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
rmSync(testDir, { recursive: true, force: true })
|
||||
delete process.env.CLAUDE_FEATURE_FLAGS_FILE
|
||||
})
|
||||
|
||||
// ── File absent ──────────────────────────────────────────────────
|
||||
|
||||
test('returns defaultValue when flags file is absent', () => {
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 42)).toBe(42)
|
||||
})
|
||||
|
||||
test('getAllGrowthBookFeatures returns {} when file is absent', () => {
|
||||
expect(stub.getAllGrowthBookFeatures()).toEqual({})
|
||||
})
|
||||
|
||||
// ── Open-build defaults (_openBuildDefaults) ────────────────────
|
||||
|
||||
test('returns open-build default when flags file is absent', () => {
|
||||
// tengu_passport_quail is in _openBuildDefaults as true; without a
|
||||
// flags file the stub should return the open-build override, not
|
||||
// the call-site defaultValue.
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_passport_quail', false)).toBe(true)
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_coral_fern', false)).toBe(true)
|
||||
})
|
||||
|
||||
test('flags file overrides open-build defaults', () => {
|
||||
// User-provided feature-flags.json takes priority over _openBuildDefaults.
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_passport_quail: false }))
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_passport_quail', true)).toBe(false)
|
||||
})
|
||||
|
||||
// ── Valid JSON object ────────────────────────────────────────────
|
||||
|
||||
test('loads and returns values from a valid JSON file', () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: true, tengu_bar: 'hello' }))
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', false)).toBe(true)
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_bar', 'default')).toBe('hello')
|
||||
})
|
||||
|
||||
test('returns defaultValue for keys not present in the file', () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: true }))
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_missing', 99)).toBe(99)
|
||||
})
|
||||
|
||||
test('getAllGrowthBookFeatures returns the full flags object', () => {
|
||||
const flags = { tengu_a: true, tengu_b: false, tengu_c: 42 }
|
||||
writeFileSync(flagsFile, JSON.stringify(flags))
|
||||
|
||||
expect(stub.getAllGrowthBookFeatures()).toEqual(flags)
|
||||
})
|
||||
|
||||
// ── Malformed / non-object JSON ──────────────────────────────────
|
||||
|
||||
test('falls back to defaults on malformed JSON', () => {
|
||||
writeFileSync(flagsFile, '{not valid json!!!')
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
|
||||
})
|
||||
|
||||
test('falls back to defaults when JSON is a primitive (true)', () => {
|
||||
writeFileSync(flagsFile, 'true')
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
|
||||
})
|
||||
|
||||
test('falls back to defaults when JSON is an array', () => {
|
||||
writeFileSync(flagsFile, '["a", "b"]')
|
||||
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'fallback')).toBe('fallback')
|
||||
})
|
||||
|
||||
// ── Cache invalidation ───────────────────────────────────────────
|
||||
|
||||
test('resetGrowthBook clears cache so the file is re-read', () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'first' }))
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('first')
|
||||
|
||||
// Update the file — cached value is still 'first'
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'second' }))
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('first')
|
||||
|
||||
// After reset, the new value is picked up
|
||||
stub.resetGrowthBook()
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('second')
|
||||
})
|
||||
|
||||
test('refreshGrowthBookFeatures clears cache', async () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'v1' }))
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('v1')
|
||||
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_foo: 'v2' }))
|
||||
await stub.refreshGrowthBookFeatures()
|
||||
expect(stub.getFeatureValue_CACHED_MAY_BE_STALE('tengu_foo', 'x')).toBe('v2')
|
||||
})
|
||||
|
||||
// ── Multiple getter variants ─────────────────────────────────────
|
||||
|
||||
test('all getter functions read from local flags', async () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({ tengu_gate: true, tengu_config: { a: 1 } }))
|
||||
|
||||
expect(await stub.getFeatureValue_DEPRECATED('tengu_gate', false)).toBe(true)
|
||||
stub.resetGrowthBook()
|
||||
expect(stub.getFeatureValue_CACHED_WITH_REFRESH('tengu_gate', false)).toBe(true)
|
||||
stub.resetGrowthBook()
|
||||
expect(stub.checkStatsigFeatureGate_CACHED_MAY_BE_STALE('tengu_gate')).toBe(true)
|
||||
stub.resetGrowthBook()
|
||||
expect(await stub.checkGate_CACHED_OR_BLOCKING('tengu_gate')).toBe(true)
|
||||
stub.resetGrowthBook()
|
||||
expect(await stub.getDynamicConfig_BLOCKS_ON_INIT('tengu_config', {})).toEqual({ a: 1 })
|
||||
stub.resetGrowthBook()
|
||||
expect(stub.getDynamicConfig_CACHED_MAY_BE_STALE('tengu_config', {})).toEqual({ a: 1 })
|
||||
})
|
||||
|
||||
// ── Security gate ────────────────────────────────────────────────
|
||||
|
||||
test('checkSecurityRestrictionGate always returns false regardless of flags', async () => {
|
||||
writeFileSync(flagsFile, JSON.stringify({
|
||||
tengu_disable_bypass_permissions_mode: true,
|
||||
}))
|
||||
|
||||
expect(await stub.checkSecurityRestrictionGate()).toBe(false)
|
||||
})
|
||||
})
|
||||
459
scripts/no-telemetry-plugin.ts
Normal file
459
scripts/no-telemetry-plugin.ts
Normal file
@@ -0,0 +1,459 @@
|
||||
/**
|
||||
* No-Telemetry Build Plugin for OpenClaude
|
||||
*
|
||||
* Replaces all analytics, telemetry, and phone-home modules with no-op stubs
|
||||
* at compile time. Zero runtime cost, zero network calls to Anthropic.
|
||||
*
|
||||
* This file is NOT tracked upstream — merge conflicts are impossible.
|
||||
* Only build.ts needs a one-line import + one-line array entry.
|
||||
*
|
||||
* Kills:
|
||||
* - GrowthBook remote feature flags (api.anthropic.com)
|
||||
* - Datadog event intake
|
||||
* - 1P event logging (api.anthropic.com/api/event_logging/batch)
|
||||
* - BigQuery metrics exporter (api.anthropic.com/api/claude_code/metrics)
|
||||
* - Perfetto / OpenTelemetry session tracing
|
||||
* - Auto-updater (storage.googleapis.com, npm registry)
|
||||
* - Plugin fetch telemetry
|
||||
* - Transcript / feedback sharing
|
||||
*/
|
||||
|
||||
import type { BunPlugin } from 'bun'
|
||||
|
||||
// Module path (relative to src/, without extension) → stub source
|
||||
const stubs: Record<string, string> = {
|
||||
|
||||
// ─── Analytics core ─────────────────────────────────────────────
|
||||
|
||||
'services/analytics/index': `
|
||||
export function stripProtoFields(metadata) { return metadata; }
|
||||
export function attachAnalyticsSink() {}
|
||||
export function logEvent() {}
|
||||
export async function logEventAsync() {}
|
||||
export function _resetForTesting() {}
|
||||
`,
|
||||
|
||||
'services/analytics/growthbook': `
|
||||
import _fs from 'node:fs';
|
||||
import _path from 'node:path';
|
||||
import _os from 'node:os';
|
||||
|
||||
let _flags = undefined;
|
||||
|
||||
// ── Open-build GrowthBook overrides ───────────────────────────────────
|
||||
// Override upstream defaultValue for runtime gates tied to build-time
|
||||
// features. Only keys that DIFFER from upstream belong here — the
|
||||
// catalog below is pure documentation and does NOT affect resolution.
|
||||
//
|
||||
// Priority: ~/.claude/feature-flags.json > _openBuildDefaults > defaultValue
|
||||
//
|
||||
// To override at runtime, create ~/.claude/feature-flags.json:
|
||||
// { "tengu_some_flag": true }
|
||||
const _openBuildDefaults = {
|
||||
'tengu_sedge_lantern': true, // AWAY_SUMMARY — "while you were away" recap (upstream: false)
|
||||
'tengu_hive_evidence': true, // VERIFICATION_AGENT — read-only test/verification agent (upstream: false)
|
||||
'tengu_passport_quail': true, // EXTRACT_MEMORIES — enable memory extraction (upstream: false)
|
||||
'tengu_coral_fern': true, // EXTRACT_MEMORIES — enable memory search in past context (upstream: false)
|
||||
};
|
||||
|
||||
/* ── Known runtime feature keys (reference only) ───────────────────────
|
||||
* This catalog does NOT participate in flag resolution. It documents
|
||||
* the known GrowthBook keys and their upstream default values, scraped
|
||||
* from src/ call sites. It is NOT exhaustive — new keys may be added
|
||||
* upstream between catalog updates.
|
||||
*
|
||||
* Some keys have different defaults at different call sites — this is
|
||||
* intentional upstream (the server unifies the value at runtime).
|
||||
*
|
||||
* To activate any of these, add them to ~/.claude/feature-flags.json
|
||||
* or to _openBuildDefaults above.
|
||||
*
|
||||
* ── Reasoning & thinking ──────────────────────────────────────────────
|
||||
* tengu_turtle_carbon = true ULTRATHINK deep thinking runtime gate
|
||||
* tengu_thinkback = gate /thinkback replay command
|
||||
*
|
||||
* ── Agents & orchestration ────────────────────────────────────────────
|
||||
* tengu_amber_flint = true Agent swarms coordination
|
||||
* tengu_amber_stoat = true Built-in agent availability (Explore, Plan, etc.)
|
||||
* tengu_agent_list_attach = true Attach file context to agent list
|
||||
* tengu_auto_background_agents = false Auto-spawn background agents
|
||||
* tengu_slim_subagent_claudemd = true Lighter ClaudeMD for subagents
|
||||
* tengu_hive_evidence = false Verification agent / evidence tracking (4 call sites)
|
||||
* tengu_ultraplan_model = model cfg ULTRAPLAN model selection (dynamic config)
|
||||
*
|
||||
* ── Memory & context ──────────────────────────────────────────────────
|
||||
* tengu_passport_quail = false EXTRACT_MEMORIES main gate (isExtractModeActive)
|
||||
* tengu_coral_fern = false EXTRACT_MEMORIES search in past context
|
||||
* tengu_slate_thimble = false Memory dir paths (non-interactive sessions)
|
||||
* tengu_herring_clock = true/false Team memory paths (varies by call site)
|
||||
* tengu_bramble_lintel = null Extract memories throttle (null → every turn)
|
||||
* tengu_sedge_lantern = false AWAY_SUMMARY "while you were away" recap
|
||||
* tengu_session_memory = false Session memory service
|
||||
* tengu_sm_config = {} Session memory config (dynamic)
|
||||
* tengu_sm_compact_config = {} Session memory compaction config (dynamic)
|
||||
* tengu_cobalt_raccoon = false Reactive compaction (suppress auto-compact)
|
||||
* tengu_pebble_leaf_prune = false Session storage pruning
|
||||
*
|
||||
* ── Kairos & cron ─────────────────────────────────────────────────────
|
||||
* tengu_kairos_brief = false Brief layout mode (KAIROS)
|
||||
* tengu_kairos_brief_config = {} Brief config (dynamic)
|
||||
* tengu_kairos_cron = true Cron scheduler enable
|
||||
* tengu_kairos_cron_durable = true Durable (disk-persistent) cron tasks
|
||||
* tengu_kairos_cron_config = {} Cron jitter config (dynamic)
|
||||
*
|
||||
* ── Bridge & remote (require Anthropic infra) ─────────────────────────
|
||||
* tengu_ccr_bridge = false CCR bridge connection
|
||||
* tengu_ccr_bridge_multi_session = gate Multi-session spawn mode
|
||||
* tengu_ccr_mirror = false CCR session mirroring
|
||||
* tengu_ccr_bundle_seed_enabled = gate Git bundle seeding for CCR
|
||||
* tengu_ccr_bundle_max_bytes = null Bundle size limit (null → default)
|
||||
* tengu_bridge_repl_v2 = false Environment-less REPL bridge v2
|
||||
* tengu_bridge_repl_v2_cse_shim_enabled = true CSE→Session tag retag shim
|
||||
* tengu_bridge_min_version = {min:'0'} Min CLI version for bridge (dynamic)
|
||||
* tengu_bridge_initial_history_cap = 200 Initial history cap for bridge
|
||||
* tengu_bridge_system_init = false Bridge system initialization
|
||||
* tengu_cobalt_harbor = false Auto-connect CCR at startup
|
||||
* tengu_cobalt_lantern = false Remote setup preconditions
|
||||
* tengu_remote_backend = false Remote TUI backend
|
||||
* tengu_surreal_dali = false Remote agent tasks / triggers
|
||||
*
|
||||
* ── Prompt & API ──────────────────────────────────────────────────────
|
||||
* tengu_attribution_header = true Attribution header in API requests
|
||||
* tengu_basalt_3kr = true MCP instructions delta
|
||||
* tengu_slate_prism = true/false Message formatting (varies by call site)
|
||||
* tengu_amber_prism = false Message content formatting
|
||||
* tengu_amber_json_tools = false JSON format for tool schemas
|
||||
* tengu_fgts = false API feature gates
|
||||
* tengu_otk_slot_v1 = false One-time key slots for API auth
|
||||
* tengu_cicada_nap_ms = 0 Background GrowthBook refresh throttle (ms)
|
||||
* tengu_miraculo_the_bard = false Service initialization gate
|
||||
* tengu_immediate_model_command = false Immediate /model command execution
|
||||
* tengu_chomp_inflection = false Prompt suggestions after responses
|
||||
* tengu_tool_pear = gate API betas for tool use
|
||||
* tengu-off-switch = {act:false} Service kill switch (dynamic; uses dash)
|
||||
*
|
||||
* ── Permissions & security ────────────────────────────────────────────
|
||||
* tengu_birch_trellis = true Bash auto-mode permissions config
|
||||
* tengu_auto_mode_config = {} Auto-mode configuration (dynamic, many call sites)
|
||||
* tengu_iron_gate_closed = true Permission iron gate (with refresh)
|
||||
* tengu_destructive_command_warning = false Warning for destructive bash commands
|
||||
* tengu_disable_bypass_permissions_mode = security Security killswitch (always false in open build)
|
||||
*
|
||||
* ── UI & UX ───────────────────────────────────────────────────────────
|
||||
* tengu_willow_mode = 'off' REPL rendering mode
|
||||
* tengu_terminal_panel = false Terminal panel keybinding
|
||||
* tengu_terminal_sidebar = false Terminal sidebar in REPL/config
|
||||
* tengu_marble_sandcastle = false Fast mode gate
|
||||
* tengu_jade_anvil_4 = false Rate limit options UI ordering
|
||||
* tengu_collage_kaleidoscope = true Native clipboard image paste (macOS)
|
||||
* tengu_lapis_finch = false Plugin/hint recommendation
|
||||
* tengu_lodestone_enabled = false Deep links claude-cli:// protocol
|
||||
* tengu_copper_panda = false Skill improvement suggestions
|
||||
* tengu_desktop_upsell = {} Desktop app upsell config (dynamic)
|
||||
* tengu-top-of-feed-tip = {} Emergency tip of feed (dynamic; uses dash)
|
||||
*
|
||||
* ── File operations ───────────────────────────────────────────────────
|
||||
* tengu_quartz_lantern = false File read/write dedup optimization
|
||||
* tengu_moth_copse = false Attachments handling (variant A)
|
||||
* tengu_marble_fox = false Attachments handling (variant B)
|
||||
* tengu_scratch = gate Scratchpad filesystem access / coordinator
|
||||
*
|
||||
* ── MCP & plugins ─────────────────────────────────────────────────────
|
||||
* tengu_harbor = false MCP channel allowlist verification
|
||||
* tengu_harbor_permissions = false MCP channel permissions enforcement
|
||||
* tengu_copper_bridge = false Chrome MCP bridge
|
||||
* tengu_chrome_auto_enable = false Auto-enable Chrome MCP on startup
|
||||
* tengu_glacier_2xr = false Enhanced tool search / ToolSearchTool
|
||||
* tengu_malort_pedway = {} Computer-use (Chicago) config (dynamic)
|
||||
*
|
||||
* ── VSCode / IDE ──────────────────────────────────────────────────────
|
||||
* tengu_quiet_fern = false VSCode browser support
|
||||
* tengu_vscode_cc_auth = false VSCode in-band OAuth via claude_authenticate
|
||||
* tengu_vscode_review_upsell = gate VSCode review upsell
|
||||
* tengu_vscode_onboarding = gate VSCode onboarding experience
|
||||
*
|
||||
* ── Voice ─────────────────────────────────────────────────────────────
|
||||
* tengu_amber_quartz_disabled = false VOICE_MODE kill-switch (false = voice allowed)
|
||||
*
|
||||
* ── Auto-updater (stubbed in open build) ──────────────────────────────
|
||||
* tengu_version_config = {min:'0'} Min version enforcement (dynamic)
|
||||
* tengu_max_version_config = {} Max version / deprecation config (dynamic)
|
||||
*
|
||||
* ── Telemetry & tracing ───────────────────────────────────────────────
|
||||
* tengu_trace_lantern = false Beta session tracing
|
||||
* tengu_chair_sermon = gate Analytics / message formatting gate
|
||||
* tengu_strap_foyer = false Settings sync to cloud
|
||||
*/
|
||||
|
||||
function _loadFlags() {
|
||||
if (_flags !== undefined) return;
|
||||
try {
|
||||
const flagsPath = process.env.CLAUDE_FEATURE_FLAGS_FILE
|
||||
|| _path.join(_os.homedir(), '.claude', 'feature-flags.json');
|
||||
const parsed = JSON.parse(_fs.readFileSync(flagsPath, 'utf-8'));
|
||||
_flags = (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) ? parsed : null;
|
||||
} catch {
|
||||
_flags = null;
|
||||
}
|
||||
}
|
||||
|
||||
function _getFlagValue(key, defaultValue) {
|
||||
_loadFlags();
|
||||
if (_flags != null && Object.hasOwn(_flags, key)) return _flags[key];
|
||||
if (Object.hasOwn(_openBuildDefaults, key)) return _openBuildDefaults[key];
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
const noop = () => {};
|
||||
export function onGrowthBookRefresh() { return noop; }
|
||||
export function hasGrowthBookEnvOverride() { return false; }
|
||||
export function getAllGrowthBookFeatures() { _loadFlags(); return _flags || {}; }
|
||||
export function getGrowthBookConfigOverrides() { return {}; }
|
||||
export function setGrowthBookConfigOverride() {}
|
||||
export function clearGrowthBookConfigOverrides() {}
|
||||
export function getApiBaseUrlHost() { return undefined; }
|
||||
export const initializeGrowthBook = async () => null;
|
||||
export async function getFeatureValue_DEPRECATED(feature, defaultValue) { return _getFlagValue(feature, defaultValue); }
|
||||
export function getFeatureValue_CACHED_MAY_BE_STALE(feature, defaultValue) { return _getFlagValue(feature, defaultValue); }
|
||||
export function getFeatureValue_CACHED_WITH_REFRESH(feature, defaultValue) { return _getFlagValue(feature, defaultValue); }
|
||||
export function checkStatsigFeatureGate_CACHED_MAY_BE_STALE(gate) { return Boolean(_getFlagValue(gate, false)); }
|
||||
// Security killswitch — always false in the open build. Anthropic uses this
|
||||
// gate to remotely disable bypassPermissions mode; exposing it via local flags
|
||||
// would let users accidentally lock themselves out of --dangerously-skip-permissions.
|
||||
export async function checkSecurityRestrictionGate(gate) { return false; }
|
||||
export async function checkGate_CACHED_OR_BLOCKING(gate) { return Boolean(_getFlagValue(gate, false)); }
|
||||
export function refreshGrowthBookAfterAuthChange() {}
|
||||
export function resetGrowthBook() { _flags = undefined; }
|
||||
export async function refreshGrowthBookFeatures() { _flags = undefined; }
|
||||
export function setupPeriodicGrowthBookRefresh() {}
|
||||
export function stopPeriodicGrowthBookRefresh() {}
|
||||
export async function getDynamicConfig_BLOCKS_ON_INIT(configName, defaultValue) { return _getFlagValue(configName, defaultValue); }
|
||||
export function getDynamicConfig_CACHED_MAY_BE_STALE(configName, defaultValue) { return _getFlagValue(configName, defaultValue); }
|
||||
`,
|
||||
|
||||
'services/analytics/sink': `
|
||||
export function initializeAnalyticsGates() {}
|
||||
export function initializeAnalyticsSink() {}
|
||||
`,
|
||||
|
||||
'services/analytics/config': `
|
||||
export function isAnalyticsDisabled() { return true; }
|
||||
export function isFeedbackSurveyDisabled() { return true; }
|
||||
`,
|
||||
|
||||
'services/analytics/datadog': `
|
||||
export const initializeDatadog = async () => false;
|
||||
export async function shutdownDatadog() {}
|
||||
export async function trackDatadogEvent() {}
|
||||
`,
|
||||
|
||||
'services/analytics/firstPartyEventLogger': `
|
||||
export function getEventSamplingConfig() { return {}; }
|
||||
export function shouldSampleEvent() { return null; }
|
||||
export async function shutdown1PEventLogging() {}
|
||||
export function is1PEventLoggingEnabled() { return false; }
|
||||
export function logEventTo1P() {}
|
||||
export function logGrowthBookExperimentTo1P() {}
|
||||
export function initialize1PEventLogging() {}
|
||||
export async function reinitialize1PEventLoggingIfConfigChanged() {}
|
||||
`,
|
||||
|
||||
'services/analytics/firstPartyEventLoggingExporter': `
|
||||
export class FirstPartyEventLoggingExporter {
|
||||
constructor() {}
|
||||
async export(logs, resultCallback) { resultCallback({ code: 0 }); }
|
||||
async getQueuedEventCount() { return 0; }
|
||||
async shutdown() {}
|
||||
async forceFlush() {}
|
||||
}
|
||||
`,
|
||||
|
||||
'services/analytics/metadata': `
|
||||
export function sanitizeToolNameForAnalytics(toolName) { return toolName; }
|
||||
export function isToolDetailsLoggingEnabled() { return false; }
|
||||
export function isAnalyticsToolDetailsLoggingEnabled() { return false; }
|
||||
export function mcpToolDetailsForAnalytics() { return {}; }
|
||||
export function extractMcpToolDetails() { return undefined; }
|
||||
export function extractSkillName() { return undefined; }
|
||||
export function extractToolInputForTelemetry() { return undefined; }
|
||||
export function getFileExtensionForAnalytics() { return undefined; }
|
||||
export function getFileExtensionsFromBashCommand() { return undefined; }
|
||||
export async function getEventMetadata() { return {}; }
|
||||
export function to1PEventFormat() { return {}; }
|
||||
`,
|
||||
|
||||
// ─── Telemetry subsystems ───────────────────────────────────────
|
||||
|
||||
'utils/telemetry/bigqueryExporter': `
|
||||
export class BigQueryMetricsExporter {
|
||||
constructor() {}
|
||||
async export(metrics, resultCallback) { resultCallback({ code: 0 }); }
|
||||
async shutdown() {}
|
||||
async forceFlush() {}
|
||||
selectAggregationTemporality() { return 0; }
|
||||
}
|
||||
`,
|
||||
|
||||
'utils/telemetry/perfettoTracing': `
|
||||
export function initializePerfettoTracing() {}
|
||||
export function isPerfettoTracingEnabled() { return false; }
|
||||
export function registerAgent() {}
|
||||
export function unregisterAgent() {}
|
||||
export function startLLMRequestPerfettoSpan() { return ''; }
|
||||
export function endLLMRequestPerfettoSpan() {}
|
||||
export function startToolPerfettoSpan() { return ''; }
|
||||
export function endToolPerfettoSpan() {}
|
||||
export function startUserInputPerfettoSpan() { return ''; }
|
||||
export function endUserInputPerfettoSpan() {}
|
||||
export function emitPerfettoInstant() {}
|
||||
export function emitPerfettoCounter() {}
|
||||
export function startInteractionPerfettoSpan() { return ''; }
|
||||
export function endInteractionPerfettoSpan() {}
|
||||
export function getPerfettoEvents() { return []; }
|
||||
export function resetPerfettoTracer() {}
|
||||
export async function triggerPeriodicWriteForTesting() {}
|
||||
export function evictStaleSpansForTesting() {}
|
||||
export const MAX_EVENTS_FOR_TESTING = 0;
|
||||
export function evictOldestEventsForTesting() {}
|
||||
`,
|
||||
|
||||
'utils/telemetry/sessionTracing': `
|
||||
const noopSpan = {
|
||||
end() {}, setAttribute() {}, setStatus() {},
|
||||
recordException() {}, addEvent() {}, isRecording() { return false; },
|
||||
};
|
||||
export function isBetaTracingEnabled() { return false; }
|
||||
export function isEnhancedTelemetryEnabled() { return false; }
|
||||
export function startInteractionSpan() { return noopSpan; }
|
||||
export function endInteractionSpan() {}
|
||||
export function startLLMRequestSpan() { return noopSpan; }
|
||||
export function endLLMRequestSpan() {}
|
||||
export function startToolSpan() { return noopSpan; }
|
||||
export function startToolBlockedOnUserSpan() { return noopSpan; }
|
||||
export function endToolBlockedOnUserSpan() {}
|
||||
export function startToolExecutionSpan() { return noopSpan; }
|
||||
export function endToolExecutionSpan() {}
|
||||
export function endToolSpan() {}
|
||||
export function addToolContentEvent() {}
|
||||
export function getCurrentSpan() { return null; }
|
||||
export async function executeInSpan(spanName, fn) { return fn(noopSpan); }
|
||||
export function startHookSpan() { return noopSpan; }
|
||||
export function endHookSpan() {}
|
||||
`,
|
||||
|
||||
// ─── Auto-updater (phones home to GCS + npm) ──────────────────
|
||||
|
||||
'utils/autoUpdater': `
|
||||
export async function assertMinVersion() {}
|
||||
export async function getMaxVersion() { return undefined; }
|
||||
export async function getMaxVersionMessage() { return undefined; }
|
||||
export function shouldSkipVersion() { return true; }
|
||||
export function getLockFilePath() { return '/tmp/openclaude-update.lock'; }
|
||||
export async function checkGlobalInstallPermissions() { return { hasPermissions: false, npmPrefix: null }; }
|
||||
export async function getLatestVersion() { return null; }
|
||||
export async function getNpmDistTags() { return { latest: null, stable: null }; }
|
||||
export async function getLatestVersionFromGcs() { return null; }
|
||||
export async function getGcsDistTags() { return { latest: null, stable: null }; }
|
||||
export async function getVersionHistory() { return []; }
|
||||
export async function installGlobalPackage() { return 'success'; }
|
||||
`,
|
||||
|
||||
// ─── Plugin fetch telemetry (not the marketplace itself) ───────
|
||||
|
||||
'utils/plugins/fetchTelemetry': `
|
||||
export function logPluginFetch() {}
|
||||
export function classifyFetchError() { return 'disabled'; }
|
||||
`,
|
||||
|
||||
// ─── Transcript / feedback sharing ─────────────────────────────
|
||||
|
||||
'components/FeedbackSurvey/submitTranscriptShare': `
|
||||
export async function submitTranscriptShare() { return { success: false }; }
|
||||
`,
|
||||
|
||||
// ─── Internal employee logging (not needed in the external build) ─────
|
||||
|
||||
'services/internalLogging': `
|
||||
export async function logPermissionContextForAnts() {}
|
||||
export const getContainerId = async () => null;
|
||||
`,
|
||||
|
||||
// ─── Deleted Anthropic-internal modules ───────────────────────────────
|
||||
|
||||
'services/api/dumpPrompts': `
|
||||
export function createDumpPromptsFetch() { return undefined; }
|
||||
export function getDumpPromptsPath() { return ''; }
|
||||
export function getLastApiRequests() { return []; }
|
||||
export function clearApiRequestCache() {}
|
||||
export function clearDumpState() {}
|
||||
export function clearAllDumpState() {}
|
||||
export function addApiRequestToCache() {}
|
||||
`,
|
||||
|
||||
'utils/undercover': `
|
||||
export function isUndercover() { return false; }
|
||||
export function getUndercoverInstructions() { return ''; }
|
||||
export function shouldShowUndercoverAutoNotice() { return false; }
|
||||
`,
|
||||
|
||||
'types/generated/events_mono/claude_code/v1/claude_code_internal_event': `
|
||||
export const ClaudeCodeInternalEvent = {
|
||||
fromJSON: value => value,
|
||||
toJSON: value => value,
|
||||
create: value => value ?? {},
|
||||
fromPartial: value => value ?? {},
|
||||
};
|
||||
`,
|
||||
|
||||
'types/generated/events_mono/growthbook/v1/growthbook_experiment_event': `
|
||||
export const GrowthbookExperimentEvent = {
|
||||
fromJSON: value => value,
|
||||
toJSON: value => value,
|
||||
create: value => value ?? {},
|
||||
fromPartial: value => value ?? {},
|
||||
};
|
||||
`,
|
||||
|
||||
'types/generated/events_mono/common/v1/auth': `
|
||||
export const PublicApiAuth = {
|
||||
fromJSON: value => value,
|
||||
toJSON: value => value,
|
||||
create: value => value ?? {},
|
||||
fromPartial: value => value ?? {},
|
||||
};
|
||||
`,
|
||||
|
||||
'types/generated/google/protobuf/timestamp': `
|
||||
export const Timestamp = {
|
||||
fromJSON: value => value,
|
||||
toJSON: value => value,
|
||||
create: value => value ?? {},
|
||||
fromPartial: value => value ?? {},
|
||||
};
|
||||
`,
|
||||
}
|
||||
|
||||
function escapeForResolvedPathRegex(modulePath: string): string {
|
||||
return modulePath
|
||||
.replace(/[|\\{}()[\]^$+*?.]/g, '\\$&')
|
||||
.replace(/\//g, '[/\\\\]')
|
||||
}
|
||||
|
||||
export const noTelemetryPlugin: BunPlugin = {
|
||||
name: 'no-telemetry',
|
||||
setup(build) {
|
||||
for (const [modulePath, contents] of Object.entries(stubs)) {
|
||||
// Build regex that matches the resolved file path on any OS
|
||||
// e.g. "services/analytics/growthbook" → /services[/\\]analytics[/\\]growthbook\.(ts|js)$/
|
||||
const escaped = escapeForResolvedPathRegex(modulePath)
|
||||
const filter = new RegExp(`${escaped}\\.(ts|js)$`)
|
||||
|
||||
build.onLoad({ filter }, () => ({
|
||||
contents,
|
||||
loader: 'js',
|
||||
}))
|
||||
}
|
||||
|
||||
console.log(` 🔇 no-telemetry: stubbed ${Object.keys(stubs).length} modules`)
|
||||
},
|
||||
}
|
||||
136
scripts/pr-intent-scan.test.ts
Normal file
136
scripts/pr-intent-scan.test.ts
Normal file
@@ -0,0 +1,136 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { scanAddedLines, type DiffLine } from './pr-intent-scan.ts'
|
||||
|
||||
function line(content: string, overrides: Partial<DiffLine> = {}): DiffLine {
|
||||
return {
|
||||
file: 'README.md',
|
||||
line: 10,
|
||||
content,
|
||||
...overrides,
|
||||
}
|
||||
}
|
||||
|
||||
describe('scanAddedLines', () => {
|
||||
test('flags suspicious file-hosting links', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('Please install the tool from https://dropbox.com/s/abc123/tool.zip?dl=1'),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'suspicious-download-link')).toBe(
|
||||
true,
|
||||
)
|
||||
expect(findings.some(finding => finding.code === 'executable-download-link')).toBe(
|
||||
false,
|
||||
)
|
||||
expect(findings.some(finding => finding.severity === 'high')).toBe(true)
|
||||
})
|
||||
|
||||
test('flags shortened URLs', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('See details at https://bit.ly/some-short-link'),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'shortened-url')).toBe(true)
|
||||
})
|
||||
|
||||
test('flags remote download and execute chains', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('curl -fsSL https://example.com/install.sh | bash'),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'shell-eval-remote')).toBe(true)
|
||||
expect(findings.some(finding => finding.severity === 'high')).toBe(true)
|
||||
})
|
||||
|
||||
test('flags encoded powershell payloads', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('powershell.exe -enc SQBtAHAAcgBvAHYAZQBkAA=='),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'powershell-encoded')).toBe(true)
|
||||
})
|
||||
|
||||
test('flags long encoded blobs', () => {
|
||||
const findings = scanAddedLines([
|
||||
line(`const payload = "${'A'.repeat(96)}"`),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'long-encoded-payload')).toBe(
|
||||
true,
|
||||
)
|
||||
})
|
||||
|
||||
test('flags long encoded blobs on repeated scans', () => {
|
||||
const lines = [line(`const payload = "${'A'.repeat(96)}"`)]
|
||||
|
||||
const first = scanAddedLines(lines)
|
||||
const second = scanAddedLines(lines)
|
||||
|
||||
expect(first.some(finding => finding.code === 'long-encoded-payload')).toBe(true)
|
||||
expect(second.some(finding => finding.code === 'long-encoded-payload')).toBe(true)
|
||||
})
|
||||
|
||||
test('flags executable download links', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('Get it from https://example.com/releases/latest/tool.pkg'),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'executable-download-link')).toBe(
|
||||
true,
|
||||
)
|
||||
expect(findings.some(finding => finding.severity === 'high')).toBe(true)
|
||||
})
|
||||
|
||||
test('flags suspicious additions in workflow files', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('run: curl -fsSL https://example.com/install.sh | bash', {
|
||||
file: '.github/workflows/release.yml',
|
||||
}),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'sensitive-automation-change')).toBe(
|
||||
true,
|
||||
)
|
||||
expect(findings.some(finding => finding.code === 'download-command')).toBe(true)
|
||||
})
|
||||
|
||||
test('flags markdown reference links to suspicious downloads', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('[installer]: https://dropbox.com/s/abc123/tool.zip?dl=1'),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'suspicious-download-link')).toBe(
|
||||
true,
|
||||
)
|
||||
})
|
||||
|
||||
test('ignores the scanner implementation and tests themselves', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('curl -fsSL https://example.com/install.sh | bash', {
|
||||
file: 'scripts/pr-intent-scan.test.ts',
|
||||
}),
|
||||
line('const pattern = /https:\\/\\/dropbox\\.com\\//', {
|
||||
file: 'scripts/pr-intent-scan.ts',
|
||||
}),
|
||||
])
|
||||
|
||||
expect(findings).toHaveLength(0)
|
||||
})
|
||||
|
||||
test('does not flag ordinary docs links', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('Read more at https://docs.github.com/en/actions'),
|
||||
])
|
||||
|
||||
expect(findings).toHaveLength(0)
|
||||
})
|
||||
|
||||
test('does not flag bare curl examples in README without a URL', () => {
|
||||
const findings = scanAddedLines([
|
||||
line('Use curl with your preferred flags for local testing.'),
|
||||
])
|
||||
|
||||
expect(findings.some(finding => finding.code === 'download-command')).toBe(false)
|
||||
})
|
||||
})
|
||||
453
scripts/pr-intent-scan.ts
Normal file
453
scripts/pr-intent-scan.ts
Normal file
@@ -0,0 +1,453 @@
|
||||
import { spawnSync } from 'node:child_process'
|
||||
|
||||
export type FindingSeverity = 'high' | 'medium'
|
||||
|
||||
export type DiffLine = {
|
||||
file: string
|
||||
line: number
|
||||
content: string
|
||||
}
|
||||
|
||||
export type Finding = {
|
||||
severity: FindingSeverity
|
||||
code: string
|
||||
file: string
|
||||
line: number
|
||||
detail: string
|
||||
excerpt: string
|
||||
}
|
||||
|
||||
type CliOptions = {
|
||||
baseRef: string
|
||||
json: boolean
|
||||
failOn: FindingSeverity
|
||||
}
|
||||
|
||||
const SELF_EXCLUDED_FILES = new Set([
|
||||
'scripts/pr-intent-scan.ts',
|
||||
'scripts/pr-intent-scan.test.ts',
|
||||
])
|
||||
|
||||
const SHORTENER_DOMAINS = [
|
||||
'bit.ly',
|
||||
'tinyurl.com',
|
||||
'goo.gl',
|
||||
't.co',
|
||||
'is.gd',
|
||||
'rb.gy',
|
||||
'cutt.ly',
|
||||
]
|
||||
|
||||
const SUSPICIOUS_DOWNLOAD_DOMAINS = [
|
||||
'dropbox.com',
|
||||
'dl.dropboxusercontent.com',
|
||||
'drive.google.com',
|
||||
'docs.google.com',
|
||||
'mega.nz',
|
||||
'mediafire.com',
|
||||
'transfer.sh',
|
||||
'anonfiles.com',
|
||||
'catbox.moe',
|
||||
]
|
||||
|
||||
const URL_REGEX = /\bhttps?:\/\/[^\s)>"']+/gi
|
||||
const LONG_BASE64_REGEX = /\b(?:[A-Za-z0-9+/]{80,}={0,2}|[A-Za-z0-9_-]{80,})\b/
|
||||
const EXECUTABLE_PATH_REGEX =
|
||||
/\.(?:sh|bash|zsh|ps1|exe|msi|pkg|deb|rpm|zip|tar|tgz|gz|xz|dmg|appimage)(?:$|[?#])/i
|
||||
const SENSITIVE_PATH_REGEX =
|
||||
/^(?:\.github\/workflows\/|scripts\/|bin\/|install(?:\/|\.|$)|.*(?:Dockerfile|docker-compose|compose\.ya?ml)$)/i
|
||||
|
||||
function parseOptions(argv: string[]): CliOptions {
|
||||
const options: CliOptions = {
|
||||
baseRef: 'origin/main',
|
||||
json: false,
|
||||
failOn: 'high',
|
||||
}
|
||||
|
||||
for (let index = 0; index < argv.length; index++) {
|
||||
const arg = argv[index]
|
||||
if (arg === '--json') {
|
||||
options.json = true
|
||||
continue
|
||||
}
|
||||
if (arg === '--base') {
|
||||
const next = argv[index + 1]
|
||||
if (next && !next.startsWith('--')) {
|
||||
options.baseRef = next
|
||||
index++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if (arg === '--fail-on') {
|
||||
const next = argv[index + 1]
|
||||
if (next === 'high' || next === 'medium') {
|
||||
options.failOn = next
|
||||
index++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
function trimExcerpt(content: string): string {
|
||||
const compact = content.trim().replace(/\s+/g, ' ')
|
||||
return compact.length > 140 ? `${compact.slice(0, 137)}...` : compact
|
||||
}
|
||||
|
||||
function uniqueFindings(findings: Finding[]): Finding[] {
|
||||
const seen = new Set<string>()
|
||||
return findings.filter(finding => {
|
||||
const key = `${finding.code}:${finding.file}:${finding.line}:${finding.detail}`
|
||||
if (seen.has(key)) {
|
||||
return false
|
||||
}
|
||||
seen.add(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
function parseAddedLines(diffText: string): DiffLine[] {
|
||||
const lines = diffText.split('\n')
|
||||
const added: DiffLine[] = []
|
||||
let currentFile: string | null = null
|
||||
let currentLine = 0
|
||||
|
||||
for (const rawLine of lines) {
|
||||
if (rawLine.startsWith('+++ b/')) {
|
||||
currentFile = rawLine.slice('+++ b/'.length)
|
||||
continue
|
||||
}
|
||||
|
||||
if (rawLine.startsWith('@@')) {
|
||||
const match = /\+(\d+)(?:,(\d+))?/.exec(rawLine)
|
||||
if (match) {
|
||||
currentLine = Number(match[1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (!currentFile) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (rawLine.startsWith('+') && !rawLine.startsWith('+++')) {
|
||||
added.push({
|
||||
file: currentFile,
|
||||
line: currentLine,
|
||||
content: rawLine.slice(1),
|
||||
})
|
||||
currentLine += 1
|
||||
continue
|
||||
}
|
||||
|
||||
if (rawLine.startsWith('-') && !rawLine.startsWith('---')) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (!rawLine.startsWith('\\')) {
|
||||
currentLine += 1
|
||||
}
|
||||
}
|
||||
|
||||
return added
|
||||
}
|
||||
|
||||
function tryParseUrl(value: string): URL | null {
|
||||
try {
|
||||
return new URL(value)
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
function hostMatches(hostname: string, domain: string): boolean {
|
||||
return hostname === domain || hostname.endsWith(`.${domain}`)
|
||||
}
|
||||
|
||||
function hasSuspiciousDownloadIndicators(url: URL): boolean {
|
||||
const combined = `${url.pathname}${url.search}`.toLowerCase()
|
||||
return (
|
||||
combined.includes('dl=1') ||
|
||||
combined.includes('raw=1') ||
|
||||
combined.includes('export=download') ||
|
||||
combined.includes('/download') ||
|
||||
combined.includes('/uc?export=download')
|
||||
)
|
||||
}
|
||||
|
||||
function findUrlFindings(line: DiffLine): Finding[] {
|
||||
const findings: Finding[] = []
|
||||
const matches = line.content.match(URL_REGEX) ?? []
|
||||
|
||||
for (const match of matches) {
|
||||
const parsed = tryParseUrl(match)
|
||||
if (!parsed) continue
|
||||
|
||||
const hostname = parsed.hostname.toLowerCase()
|
||||
|
||||
for (const domain of SHORTENER_DOMAINS) {
|
||||
if (hostMatches(hostname, domain)) {
|
||||
findings.push({
|
||||
severity: 'medium',
|
||||
code: 'shortened-url',
|
||||
file: line.file,
|
||||
line: line.line,
|
||||
detail: `Added shortened URL: ${hostname}`,
|
||||
excerpt: trimExcerpt(line.content),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const isSuspiciousHost = SUSPICIOUS_DOWNLOAD_DOMAINS.some(domain =>
|
||||
hostMatches(hostname, domain),
|
||||
)
|
||||
const isExecutableDownload = EXECUTABLE_PATH_REGEX.test(
|
||||
`${parsed.pathname}${parsed.search}`,
|
||||
)
|
||||
|
||||
if (isSuspiciousHost) {
|
||||
findings.push({
|
||||
severity:
|
||||
hasSuspiciousDownloadIndicators(parsed) || isExecutableDownload
|
||||
? 'high'
|
||||
: 'medium',
|
||||
code: 'suspicious-download-link',
|
||||
file: line.file,
|
||||
line: line.line,
|
||||
detail: `Added external file-hosting link: ${hostname}`,
|
||||
excerpt: trimExcerpt(line.content),
|
||||
})
|
||||
} else if (isExecutableDownload) {
|
||||
findings.push({
|
||||
severity: 'high',
|
||||
code: 'executable-download-link',
|
||||
file: line.file,
|
||||
line: line.line,
|
||||
detail: `Added direct link to executable or archive payload: ${hostname}`,
|
||||
excerpt: trimExcerpt(line.content),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return findings
|
||||
}
|
||||
|
||||
function findSensitivePathFindings(line: DiffLine): Finding[] {
|
||||
if (!SENSITIVE_PATH_REGEX.test(line.file)) {
|
||||
return []
|
||||
}
|
||||
|
||||
const lower = line.content.toLowerCase()
|
||||
|
||||
if (
|
||||
/\b(curl|wget|invoke-webrequest|iwr|powershell|bash|sh|chmod\s+\+x)\b/i.test(
|
||||
line.content,
|
||||
) ||
|
||||
URL_REGEX.test(line.content) ||
|
||||
lower.includes('download')
|
||||
) {
|
||||
return [
|
||||
{
|
||||
severity: 'medium',
|
||||
code: 'sensitive-automation-change',
|
||||
file: line.file,
|
||||
line: line.line,
|
||||
detail:
|
||||
'Added network, execution, or download-related content in a sensitive automation file',
|
||||
excerpt: trimExcerpt(line.content),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
return []
|
||||
}
|
||||
|
||||
function findCommandFindings(line: DiffLine): Finding[] {
|
||||
const findings: Finding[] = []
|
||||
const lower = line.content.toLowerCase()
|
||||
|
||||
const highPatterns: Array<[string, RegExp, string]> = [
|
||||
[
|
||||
'download-exec-chain',
|
||||
/\b(curl|wget|invoke-webrequest|iwr)\b.*(\|\s*(sh|bash|zsh)|;\s*chmod\s+\+x|&&\s*\.\.?\/|>\s*\/tmp\/)/i,
|
||||
'Added remote download followed by execution or staging',
|
||||
],
|
||||
[
|
||||
'powershell-encoded',
|
||||
/\bpowershell(?:\.exe)?\b.*(?:-enc|-encodedcommand)\b/i,
|
||||
'Added encoded PowerShell invocation',
|
||||
],
|
||||
[
|
||||
'shell-eval-remote',
|
||||
/\b(curl|wget)\b.*\|\s*(sh|bash|zsh)\b/i,
|
||||
'Added shell pipe from remote content into interpreter',
|
||||
],
|
||||
[
|
||||
'binary-lolbin',
|
||||
/\b(mshta|rundll32|regsvr32|certutil)\b/i,
|
||||
'Added living-off-the-land binary often used for payload staging',
|
||||
],
|
||||
[
|
||||
'invoke-expression',
|
||||
/\b(iex|invoke-expression)\b/i,
|
||||
'Added PowerShell expression execution',
|
||||
],
|
||||
]
|
||||
|
||||
const mediumPatterns: Array<[string, RegExp, string]> = [
|
||||
[
|
||||
'download-command',
|
||||
/\b(curl|wget|invoke-webrequest|iwr)\b.*https?:\/\//i,
|
||||
'Added command that downloads remote content',
|
||||
],
|
||||
[
|
||||
'archive-extract-exec',
|
||||
/\b(unzip|tar|7z)\b.*(&&|;).*\b(chmod|node|python|bash|sh)\b/i,
|
||||
'Added archive extraction followed by execution',
|
||||
],
|
||||
[
|
||||
'base64-decode',
|
||||
/\b(base64\s+-d|openssl\s+base64\s+-d|python .*b64decode)\b/i,
|
||||
'Added explicit payload decode step',
|
||||
],
|
||||
]
|
||||
|
||||
for (const [code, pattern, detail] of highPatterns) {
|
||||
if (pattern.test(line.content)) {
|
||||
findings.push({
|
||||
severity: 'high',
|
||||
code,
|
||||
file: line.file,
|
||||
line: line.line,
|
||||
detail,
|
||||
excerpt: trimExcerpt(line.content),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for (const [code, pattern, detail] of mediumPatterns) {
|
||||
if (code === 'download-command' && !SENSITIVE_PATH_REGEX.test(line.file)) {
|
||||
continue
|
||||
}
|
||||
if (pattern.test(line.content)) {
|
||||
findings.push({
|
||||
severity: 'medium',
|
||||
code,
|
||||
file: line.file,
|
||||
line: line.line,
|
||||
detail,
|
||||
excerpt: trimExcerpt(line.content),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (LONG_BASE64_REGEX.test(line.content) && !lower.includes('sha256') && !lower.includes('sha512')) {
|
||||
findings.push({
|
||||
severity: 'medium',
|
||||
code: 'long-encoded-payload',
|
||||
file: line.file,
|
||||
line: line.line,
|
||||
detail: 'Added long encoded blob or token-like payload',
|
||||
excerpt: trimExcerpt(line.content),
|
||||
})
|
||||
}
|
||||
|
||||
return findings
|
||||
}
|
||||
|
||||
export function scanAddedLines(lines: DiffLine[]): Finding[] {
|
||||
const findings = lines
|
||||
.filter(line => !SELF_EXCLUDED_FILES.has(line.file))
|
||||
.flatMap(line => [
|
||||
...findUrlFindings(line),
|
||||
...findCommandFindings(line),
|
||||
...findSensitivePathFindings(line),
|
||||
])
|
||||
return uniqueFindings(findings)
|
||||
}
|
||||
|
||||
export function getGitDiff(baseRef: string): string {
|
||||
const mergeBase = spawnSync('git', ['merge-base', baseRef, 'HEAD'], {
|
||||
encoding: 'utf8',
|
||||
})
|
||||
|
||||
if (mergeBase.status !== 0) {
|
||||
throw new Error(
|
||||
`Could not determine merge-base with ${baseRef}: ${mergeBase.stderr.trim() || mergeBase.stdout.trim()}`,
|
||||
)
|
||||
}
|
||||
|
||||
const base = mergeBase.stdout.trim()
|
||||
const diff = spawnSync(
|
||||
'git',
|
||||
['diff', '--unified=0', '--no-ext-diff', `${base}...HEAD`],
|
||||
{ encoding: 'utf8' },
|
||||
)
|
||||
|
||||
if (diff.status !== 0) {
|
||||
throw new Error(`git diff failed: ${diff.stderr.trim() || diff.stdout.trim()}`)
|
||||
}
|
||||
|
||||
return diff.stdout
|
||||
}
|
||||
|
||||
function shouldFail(findings: Finding[], failOn: FindingSeverity): boolean {
|
||||
if (failOn === 'medium') {
|
||||
return findings.length > 0
|
||||
}
|
||||
return findings.some(finding => finding.severity === 'high')
|
||||
}
|
||||
|
||||
function renderText(findings: Finding[]): string {
|
||||
if (findings.length === 0) {
|
||||
return 'PR intent scan: no suspicious additions found.'
|
||||
}
|
||||
|
||||
const high = findings.filter(f => f.severity === 'high')
|
||||
const medium = findings.filter(f => f.severity === 'medium')
|
||||
const lines = [
|
||||
`PR intent scan: ${findings.length} finding(s)`,
|
||||
`- high: ${high.length}`,
|
||||
`- medium: ${medium.length}`,
|
||||
'',
|
||||
]
|
||||
|
||||
for (const finding of findings) {
|
||||
lines.push(
|
||||
`[${finding.severity.toUpperCase()}] ${finding.file}:${finding.line} ${finding.detail}`,
|
||||
)
|
||||
lines.push(` ${finding.excerpt}`)
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
export function run(options: CliOptions): number {
|
||||
const diff = getGitDiff(options.baseRef)
|
||||
const addedLines = parseAddedLines(diff)
|
||||
const findings = scanAddedLines(addedLines)
|
||||
|
||||
if (options.json) {
|
||||
process.stdout.write(
|
||||
`${JSON.stringify(
|
||||
{
|
||||
baseRef: options.baseRef,
|
||||
addedLines: addedLines.length,
|
||||
findings,
|
||||
},
|
||||
null,
|
||||
2,
|
||||
)}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stdout.write(`${renderText(findings)}\n`)
|
||||
}
|
||||
|
||||
return shouldFail(findings, options.failOn) ? 1 : 0
|
||||
}
|
||||
|
||||
if (import.meta.main) {
|
||||
const options = parseOptions(process.argv.slice(2))
|
||||
process.exitCode = run(options)
|
||||
}
|
||||
@@ -1,6 +1,4 @@
|
||||
// @ts-nocheck
|
||||
import { writeFileSync } from 'node:fs'
|
||||
import { resolve } from 'node:path'
|
||||
import {
|
||||
resolveCodexApiCredentials,
|
||||
} from '../src/services/api/providerConfig.js'
|
||||
@@ -10,18 +8,24 @@ import {
|
||||
recommendOllamaModel,
|
||||
} from '../src/utils/providerRecommendation.ts'
|
||||
import {
|
||||
buildAtomicChatProfileEnv,
|
||||
buildCodexProfileEnv,
|
||||
buildGeminiProfileEnv,
|
||||
buildMistralProfileEnv,
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
createProfileFile,
|
||||
saveProfileFile,
|
||||
selectAutoProfile,
|
||||
type ProfileFile,
|
||||
type ProviderProfile,
|
||||
} from '../src/utils/providerProfile.ts'
|
||||
import {
|
||||
getAtomicChatChatBaseUrl,
|
||||
getOllamaChatBaseUrl,
|
||||
hasLocalAtomicChat,
|
||||
hasLocalOllama,
|
||||
listAtomicChatModels,
|
||||
listOllamaModels,
|
||||
} from './provider-discovery.ts'
|
||||
|
||||
@@ -34,7 +38,7 @@ function parseArg(name: string): string | null {
|
||||
|
||||
function parseProviderArg(): ProviderProfile | 'auto' {
|
||||
const p = parseArg('--provider')?.toLowerCase()
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini') return p
|
||||
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'mistral' || p === 'atomic-chat') return p
|
||||
return 'auto'
|
||||
}
|
||||
|
||||
@@ -87,6 +91,21 @@ async function main(): Promise<void> {
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = builtEnv
|
||||
} else if (selected === 'mistral') {
|
||||
const builtEnv = buildMistralProfileEnv({
|
||||
model: argModel || null,
|
||||
baseUrl: argBaseUrl || null,
|
||||
apiKey: argApiKey || null,
|
||||
processEnv: process.env,
|
||||
})
|
||||
|
||||
if (!builtEnv) {
|
||||
console.error('Mistral profile requires an API key. Use --api-key or set MISTRAL_API_KEY.')
|
||||
console.error('Get a free key at: https://admin.mistral.ai/organization/api-keys')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = builtEnv
|
||||
} else if (selected === 'ollama') {
|
||||
resolvedOllamaModel ??= await resolveOllamaModel(argModel, argBaseUrl, goal)
|
||||
@@ -102,6 +121,21 @@ async function main(): Promise<void> {
|
||||
getOllamaChatBaseUrl,
|
||||
},
|
||||
)
|
||||
} else if (selected === 'atomic-chat') {
|
||||
const model = argModel || (await listAtomicChatModels(argBaseUrl || undefined))[0]
|
||||
if (!model) {
|
||||
if (!(await hasLocalAtomicChat(argBaseUrl || undefined))) {
|
||||
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
|
||||
} else {
|
||||
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
|
||||
}
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
env = buildAtomicChatProfileEnv(model, {
|
||||
baseUrl: argBaseUrl,
|
||||
getAtomicChatChatBaseUrl,
|
||||
})
|
||||
} else if (selected === 'codex') {
|
||||
const builtEnv = buildCodexProfileEnv({
|
||||
model: argModel,
|
||||
@@ -147,12 +181,11 @@ async function main(): Promise<void> {
|
||||
|
||||
const profile = createProfileFile(selected, env)
|
||||
|
||||
const outputPath = resolve(process.cwd(), '.openclaude-profile.json')
|
||||
writeFileSync(outputPath, JSON.stringify(profile, null, 2), { encoding: 'utf8', mode: 0o600 })
|
||||
const outputPath = saveProfileFile(profile)
|
||||
|
||||
console.log(`Saved profile: ${selected}`)
|
||||
console.log(`Goal: ${goal}`)
|
||||
console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
|
||||
console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.MISTRAL_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
|
||||
console.log(`Path: ${outputPath}`)
|
||||
console.log('Next: bun run dev:profile')
|
||||
}
|
||||
|
||||
@@ -1,129 +1,13 @@
|
||||
import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts'
|
||||
|
||||
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
|
||||
|
||||
function withTimeoutSignal(timeoutMs: number): {
|
||||
signal: AbortSignal
|
||||
clear: () => void
|
||||
} {
|
||||
const controller = new AbortController()
|
||||
const timeout = setTimeout(() => controller.abort(), timeoutMs)
|
||||
return {
|
||||
signal: controller.signal,
|
||||
clear: () => clearTimeout(timeout),
|
||||
}
|
||||
}
|
||||
|
||||
function trimTrailingSlash(value: string): string {
|
||||
return value.replace(/\/+$/, '')
|
||||
}
|
||||
|
||||
export function getOllamaApiBaseUrl(baseUrl?: string): string {
|
||||
const parsed = new URL(
|
||||
baseUrl || process.env.OLLAMA_BASE_URL || DEFAULT_OLLAMA_BASE_URL,
|
||||
)
|
||||
const pathname = trimTrailingSlash(parsed.pathname)
|
||||
parsed.pathname = pathname.endsWith('/v1')
|
||||
? pathname.slice(0, -3) || '/'
|
||||
: pathname || '/'
|
||||
parsed.search = ''
|
||||
parsed.hash = ''
|
||||
return trimTrailingSlash(parsed.toString())
|
||||
}
|
||||
|
||||
export function getOllamaChatBaseUrl(baseUrl?: string): string {
|
||||
return `${getOllamaApiBaseUrl(baseUrl)}/v1`
|
||||
}
|
||||
|
||||
export async function hasLocalOllama(baseUrl?: string): Promise<boolean> {
|
||||
const { signal, clear } = withTimeoutSignal(1200)
|
||||
try {
|
||||
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
return response.ok
|
||||
} catch {
|
||||
return false
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
export async function listOllamaModels(
|
||||
baseUrl?: string,
|
||||
): Promise<OllamaModelDescriptor[]> {
|
||||
const { signal, clear } = withTimeoutSignal(5000)
|
||||
try {
|
||||
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
})
|
||||
if (!response.ok) {
|
||||
return []
|
||||
}
|
||||
|
||||
const data = await response.json() as {
|
||||
models?: Array<{
|
||||
name?: string
|
||||
size?: number
|
||||
details?: {
|
||||
family?: string
|
||||
families?: string[]
|
||||
parameter_size?: string
|
||||
quantization_level?: string
|
||||
}
|
||||
}>
|
||||
}
|
||||
|
||||
return (data.models ?? [])
|
||||
.filter(model => Boolean(model.name))
|
||||
.map(model => ({
|
||||
name: model.name!,
|
||||
sizeBytes: typeof model.size === 'number' ? model.size : null,
|
||||
family: model.details?.family ?? null,
|
||||
families: model.details?.families ?? [],
|
||||
parameterSize: model.details?.parameter_size ?? null,
|
||||
quantizationLevel: model.details?.quantization_level ?? null,
|
||||
}))
|
||||
} catch {
|
||||
return []
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
export async function benchmarkOllamaModel(
|
||||
modelName: string,
|
||||
baseUrl?: string,
|
||||
): Promise<number | null> {
|
||||
const start = Date.now()
|
||||
const { signal, clear } = withTimeoutSignal(20000)
|
||||
try {
|
||||
const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
signal,
|
||||
body: JSON.stringify({
|
||||
model: modelName,
|
||||
stream: false,
|
||||
messages: [{ role: 'user', content: 'Reply with OK.' }],
|
||||
options: {
|
||||
temperature: 0,
|
||||
num_predict: 8,
|
||||
},
|
||||
}),
|
||||
})
|
||||
if (!response.ok) {
|
||||
return null
|
||||
}
|
||||
await response.json()
|
||||
return Date.now() - start
|
||||
} catch {
|
||||
return null
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
export {
|
||||
benchmarkOllamaModel,
|
||||
DEFAULT_ATOMIC_CHAT_BASE_URL,
|
||||
DEFAULT_OLLAMA_BASE_URL,
|
||||
getAtomicChatApiBaseUrl,
|
||||
getAtomicChatChatBaseUrl,
|
||||
getOllamaApiBaseUrl,
|
||||
getOllamaChatBaseUrl,
|
||||
hasLocalAtomicChat,
|
||||
hasLocalOllama,
|
||||
listAtomicChatModels,
|
||||
listOllamaModels,
|
||||
} from '../src/utils/providerDiscovery.ts'
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
// @ts-nocheck
|
||||
import { spawn } from 'node:child_process'
|
||||
import { existsSync, readFileSync } from 'node:fs'
|
||||
import { resolve } from 'node:path'
|
||||
import {
|
||||
resolveCodexApiCredentials,
|
||||
} from '../src/services/api/providerConfig.js'
|
||||
@@ -11,13 +9,17 @@ import {
|
||||
} from '../src/utils/providerRecommendation.ts'
|
||||
import {
|
||||
buildLaunchEnv,
|
||||
loadProfileFile,
|
||||
selectAutoProfile,
|
||||
type ProfileFile,
|
||||
type ProviderProfile,
|
||||
} from '../src/utils/providerProfile.ts'
|
||||
import {
|
||||
getAtomicChatChatBaseUrl,
|
||||
getOllamaChatBaseUrl,
|
||||
hasLocalAtomicChat,
|
||||
hasLocalOllama,
|
||||
listAtomicChatModels,
|
||||
listOllamaModels,
|
||||
} from './provider-discovery.ts'
|
||||
|
||||
@@ -48,7 +50,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
|
||||
continue
|
||||
}
|
||||
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') {
|
||||
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower ==='mistral' || lower === 'atomic-chat') && requestedProfile === 'auto') {
|
||||
requestedProfile = lower as ProviderProfile | 'auto'
|
||||
continue
|
||||
}
|
||||
@@ -75,17 +77,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
|
||||
}
|
||||
|
||||
function loadPersistedProfile(): ProfileFile | null {
|
||||
const path = resolve(process.cwd(), '.openclaude-profile.json')
|
||||
if (!existsSync(path)) return null
|
||||
try {
|
||||
const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile
|
||||
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') {
|
||||
return parsed
|
||||
}
|
||||
return null
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
return loadProfileFile()
|
||||
}
|
||||
|
||||
async function resolveOllamaDefaultModel(
|
||||
@@ -96,6 +88,11 @@ async function resolveOllamaDefaultModel(
|
||||
return recommended?.name ?? null
|
||||
}
|
||||
|
||||
async function resolveAtomicChatDefaultModel(): Promise<string | null> {
|
||||
const models = await listAtomicChatModels()
|
||||
return models[0] ?? null
|
||||
}
|
||||
|
||||
function runCommand(command: string, env: NodeJS.ProcessEnv): Promise<number> {
|
||||
return runProcess(command, [], env)
|
||||
}
|
||||
@@ -123,19 +120,20 @@ function applyFastFlags(env: NodeJS.ProcessEnv): NodeJS.ProcessEnv {
|
||||
return env
|
||||
}
|
||||
|
||||
function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
|
||||
function printSummary(profile: ProviderProfile): void {
|
||||
console.log(`Launching profile: ${profile}`)
|
||||
if (profile === 'gemini') {
|
||||
console.log(`GEMINI_MODEL=${env.GEMINI_MODEL}`)
|
||||
console.log(`GEMINI_API_KEY_SET=${Boolean(env.GEMINI_API_KEY)}`)
|
||||
console.log('Using configured Gemini provider settings.')
|
||||
} else if (profile === 'mistral') {
|
||||
console.log('Using configured Mistral provider settings.')
|
||||
} else if (profile === 'codex') {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`)
|
||||
console.log('Using configured Codex/OpenAI-compatible provider settings.')
|
||||
} else if (profile === 'atomic-chat') {
|
||||
console.log('Using configured Atomic Chat provider settings.')
|
||||
} else if (profile === 'ollama') {
|
||||
console.log('Using configured Ollama provider settings.')
|
||||
} else {
|
||||
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
|
||||
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
|
||||
console.log(`OPENAI_API_KEY_SET=${Boolean(env.OPENAI_API_KEY)}`)
|
||||
console.log('Using configured OpenAI-compatible provider settings.')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,7 +141,7 @@ async function main(): Promise<void> {
|
||||
const options = parseLaunchOptions(process.argv.slice(2))
|
||||
const requestedProfile = options.requestedProfile
|
||||
if (!requestedProfile) {
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|mistral|atomic-chat|mistral|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
@@ -175,12 +173,30 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
let resolvedAtomicChatModel: string | null = null
|
||||
if (
|
||||
profile === 'atomic-chat' &&
|
||||
(persisted?.profile !== 'atomic-chat' || !persisted?.env?.OPENAI_MODEL)
|
||||
) {
|
||||
if (!(await hasLocalAtomicChat())) {
|
||||
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
|
||||
process.exit(1)
|
||||
}
|
||||
resolvedAtomicChatModel = await resolveAtomicChatDefaultModel()
|
||||
if (!resolvedAtomicChatModel) {
|
||||
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
const env = await buildLaunchEnv({
|
||||
profile,
|
||||
persisted,
|
||||
goal: options.goal,
|
||||
getOllamaChatBaseUrl,
|
||||
resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b',
|
||||
getAtomicChatChatBaseUrl,
|
||||
resolveAtomicChatDefaultModel: async () => resolvedAtomicChatModel,
|
||||
})
|
||||
if (options.fast) {
|
||||
applyFastFlags(env)
|
||||
@@ -191,6 +207,11 @@ async function main(): Promise<void> {
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (profile === 'mistral' && !env.MISTRAL_API_KEY) {
|
||||
console.error('MISTRAL_API_KEY is required for mistral profile. Run: bun run profile:init -- --provider mistral --api-key <key>')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (profile === 'openai' && (!env.OPENAI_API_KEY || env.OPENAI_API_KEY === 'SUA_CHAVE')) {
|
||||
console.error('OPENAI_API_KEY is required for openai profile and cannot be SUA_CHAVE. Run: bun run profile:init -- --provider openai --api-key <key>')
|
||||
process.exit(1)
|
||||
@@ -212,7 +233,7 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
printSummary(profile, env)
|
||||
printSummary(profile)
|
||||
|
||||
const doctorCode = await runProcess('bun', ['run', 'scripts/system-check.ts'], env)
|
||||
if (doctorCode !== 0) {
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
// @ts-nocheck
|
||||
import { writeFileSync } from 'node:fs'
|
||||
import { resolve } from 'node:path'
|
||||
|
||||
import {
|
||||
applyBenchmarkLatency,
|
||||
@@ -16,6 +14,7 @@ import {
|
||||
buildOllamaProfileEnv,
|
||||
buildOpenAIProfileEnv,
|
||||
createProfileFile,
|
||||
saveProfileFile,
|
||||
sanitizeApiKey,
|
||||
type ProfileFile,
|
||||
type ProviderProfile,
|
||||
@@ -153,11 +152,7 @@ async function maybeApplyProfile(
|
||||
|
||||
const profileFile = createProfileFile(profile, env)
|
||||
|
||||
writeFileSync(
|
||||
resolve(process.cwd(), '.openclaude-profile.json'),
|
||||
JSON.stringify(profileFile, null, 2),
|
||||
'utf8',
|
||||
)
|
||||
saveProfileFile(profileFile)
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
393
scripts/render-coverage-heatmap.ts
Normal file
393
scripts/render-coverage-heatmap.ts
Normal file
@@ -0,0 +1,393 @@
|
||||
import { mkdir, readFile, writeFile } from 'fs/promises'
|
||||
import { dirname, resolve } from 'path'
|
||||
|
||||
type FileCoverage = {
|
||||
path: string
|
||||
found: number
|
||||
hit: number
|
||||
chunks: number[]
|
||||
}
|
||||
|
||||
type DirectoryCoverage = {
|
||||
path: string
|
||||
found: number
|
||||
hit: number
|
||||
}
|
||||
|
||||
const LCOV_PATH = resolve(process.cwd(), 'coverage/lcov.info')
|
||||
const HTML_PATH = resolve(process.cwd(), 'coverage/index.html')
|
||||
const CHUNK_COUNT = 20
|
||||
|
||||
function escapeHtml(value: string): string {
|
||||
return value
|
||||
.replaceAll('&', '&')
|
||||
.replaceAll('<', '<')
|
||||
.replaceAll('>', '>')
|
||||
.replaceAll('"', '"')
|
||||
}
|
||||
|
||||
function bucketColor(ratio: number): string {
|
||||
if (ratio >= 0.9) return '#166534'
|
||||
if (ratio >= 0.75) return '#15803d'
|
||||
if (ratio >= 0.5) return '#65a30d'
|
||||
if (ratio > 0) return '#a3a3a3'
|
||||
return '#262626'
|
||||
}
|
||||
|
||||
function coverageLabel(ratio: number): string {
|
||||
return `${Math.round(ratio * 100)}%`
|
||||
}
|
||||
|
||||
function coverageRatio(found: number, hit: number): number {
|
||||
return found === 0 ? 0 : hit / found
|
||||
}
|
||||
|
||||
function bucketGlyph(ratio: number): string {
|
||||
if (ratio >= 0.9) return '█'
|
||||
if (ratio >= 0.75) return '▓'
|
||||
if (ratio >= 0.5) return '▒'
|
||||
if (ratio > 0) return '░'
|
||||
return '·'
|
||||
}
|
||||
|
||||
function terminalBar(chunks: number[]): string {
|
||||
return chunks.map(bucketGlyph).join('')
|
||||
}
|
||||
|
||||
function summarizeDirectories(files: FileCoverage[]): DirectoryCoverage[] {
|
||||
const dirs = new Map<string, DirectoryCoverage>()
|
||||
|
||||
for (const file of files) {
|
||||
const dir =
|
||||
file.path.includes('/') ? file.path.slice(0, file.path.lastIndexOf('/')) : '.'
|
||||
const current = dirs.get(dir) ?? { path: dir, found: 0, hit: 0 }
|
||||
current.found += file.found
|
||||
current.hit += file.hit
|
||||
dirs.set(dir, current)
|
||||
}
|
||||
|
||||
return [...dirs.values()].sort((a, b) => {
|
||||
const left = coverageRatio(a.found, a.hit)
|
||||
const right = coverageRatio(b.found, b.hit)
|
||||
if (right !== left) return right - left
|
||||
return b.found - a.found
|
||||
})
|
||||
}
|
||||
|
||||
function buildTerminalReport(files: FileCoverage[]): string {
|
||||
const totalFound = files.reduce((sum, file) => sum + file.found, 0)
|
||||
const totalHit = files.reduce((sum, file) => sum + file.hit, 0)
|
||||
const totalRatio = coverageRatio(totalFound, totalHit)
|
||||
const overallChunks = new Array(CHUNK_COUNT).fill(totalRatio)
|
||||
const topDirectories = summarizeDirectories(files)
|
||||
.filter(dir => dir.found > 0)
|
||||
.slice(0, 8)
|
||||
const lowestFiles = [...files]
|
||||
.filter(file => file.found >= 20)
|
||||
.sort((a, b) => {
|
||||
const left = coverageRatio(a.found, a.hit)
|
||||
const right = coverageRatio(b.found, b.hit)
|
||||
if (left !== right) return left - right
|
||||
return b.found - a.found
|
||||
})
|
||||
.slice(0, 10)
|
||||
|
||||
const lines = [
|
||||
'',
|
||||
'Coverage Activity',
|
||||
`${terminalBar(overallChunks)} ${coverageLabel(totalRatio)} ${totalHit}/${totalFound} lines ${files.length} files`,
|
||||
'',
|
||||
'Top Directories',
|
||||
]
|
||||
|
||||
for (const dir of topDirectories) {
|
||||
const ratio = coverageRatio(dir.found, dir.hit)
|
||||
lines.push(
|
||||
`${terminalBar(new Array(12).fill(ratio))} ${coverageLabel(ratio).padStart(4)} ${String(dir.hit).padStart(5)}/${String(dir.found).padEnd(5)} ${dir.path}`,
|
||||
)
|
||||
}
|
||||
|
||||
lines.push('', 'Lowest Coverage Files')
|
||||
|
||||
for (const file of lowestFiles) {
|
||||
const ratio = coverageRatio(file.found, file.hit)
|
||||
lines.push(
|
||||
`${terminalBar(file.chunks).padEnd(CHUNK_COUNT)} ${coverageLabel(ratio).padStart(4)} ${String(file.hit).padStart(5)}/${String(file.found).padEnd(5)} ${file.path}`,
|
||||
)
|
||||
}
|
||||
|
||||
lines.push('', `HTML report: ${HTML_PATH}`)
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
function parseLcov(content: string): FileCoverage[] {
|
||||
const files: FileCoverage[] = []
|
||||
const sections = content.split('end_of_record')
|
||||
|
||||
for (const rawSection of sections) {
|
||||
const section = rawSection.trim()
|
||||
if (!section) continue
|
||||
|
||||
const lines = section.split('\n')
|
||||
let filePath = ''
|
||||
const lineHits = new Map<number, number>()
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('SF:')) {
|
||||
filePath = line.slice(3).trim()
|
||||
} else if (line.startsWith('DA:')) {
|
||||
const [lineNumberText, hitText] = line.slice(3).split(',')
|
||||
const lineNumber = Number(lineNumberText)
|
||||
const hits = Number(hitText)
|
||||
if (Number.isFinite(lineNumber) && Number.isFinite(hits)) {
|
||||
lineHits.set(lineNumber, hits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!filePath || lineHits.size === 0) continue
|
||||
|
||||
const ordered = [...lineHits.entries()].sort((a, b) => a[0] - b[0])
|
||||
const found = ordered.length
|
||||
const hit = ordered.filter(([, hits]) => hits > 0).length
|
||||
const chunkSize = Math.max(1, Math.ceil(found / CHUNK_COUNT))
|
||||
const chunks: number[] = []
|
||||
|
||||
for (let index = 0; index < found; index += chunkSize) {
|
||||
const slice = ordered.slice(index, index + chunkSize)
|
||||
const covered = slice.filter(([, hits]) => hits > 0).length
|
||||
chunks.push(slice.length === 0 ? 0 : covered / slice.length)
|
||||
}
|
||||
|
||||
while (chunks.length < CHUNK_COUNT) {
|
||||
chunks.push(0)
|
||||
}
|
||||
|
||||
files.push({
|
||||
path: filePath,
|
||||
found,
|
||||
hit,
|
||||
chunks: chunks.slice(0, CHUNK_COUNT),
|
||||
})
|
||||
}
|
||||
|
||||
return files.sort((a, b) => {
|
||||
const left = a.found === 0 ? 0 : a.hit / a.found
|
||||
const right = b.found === 0 ? 0 : b.hit / b.found
|
||||
if (right !== left) return right - left
|
||||
return a.path.localeCompare(b.path)
|
||||
})
|
||||
}
|
||||
|
||||
function buildHtml(files: FileCoverage[]): string {
|
||||
const totalFound = files.reduce((sum, file) => sum + file.found, 0)
|
||||
const totalHit = files.reduce((sum, file) => sum + file.hit, 0)
|
||||
const totalRatio = totalFound === 0 ? 0 : totalHit / totalFound
|
||||
|
||||
const cards = [
|
||||
['Files', String(files.length)],
|
||||
['Covered Lines', `${totalHit}/${totalFound}`],
|
||||
['Line Coverage', coverageLabel(totalRatio)],
|
||||
]
|
||||
|
||||
const rows = files
|
||||
.map(file => {
|
||||
const ratio = file.found === 0 ? 0 : file.hit / file.found
|
||||
const squares = file.chunks
|
||||
.map(
|
||||
(chunk, index) =>
|
||||
`<span class="cell" title="Chunk ${index + 1}: ${coverageLabel(chunk)}" style="background:${bucketColor(chunk)}"></span>`,
|
||||
)
|
||||
.join('')
|
||||
|
||||
return `
|
||||
<tr>
|
||||
<td class="file">${escapeHtml(file.path)}</td>
|
||||
<td class="percent">${coverageLabel(ratio)}</td>
|
||||
<td class="lines">${file.hit}/${file.found}</td>
|
||||
<td class="heatmap">${squares}</td>
|
||||
</tr>
|
||||
`
|
||||
})
|
||||
.join('')
|
||||
|
||||
const summary = cards
|
||||
.map(
|
||||
([label, value]) => `
|
||||
<div class="card">
|
||||
<div class="card-label">${escapeHtml(label)}</div>
|
||||
<div class="card-value">${escapeHtml(value)}</div>
|
||||
</div>
|
||||
`,
|
||||
)
|
||||
.join('')
|
||||
|
||||
return `<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>OpenClaude Coverage</title>
|
||||
<style>
|
||||
:root {
|
||||
color-scheme: dark;
|
||||
--bg: #09090b;
|
||||
--panel: #111113;
|
||||
--panel-2: #18181b;
|
||||
--border: #27272a;
|
||||
--text: #fafafa;
|
||||
--muted: #a1a1aa;
|
||||
}
|
||||
* { box-sizing: border-box; }
|
||||
body {
|
||||
margin: 0;
|
||||
background: linear-gradient(180deg, #09090b 0%, #0f0f12 100%);
|
||||
color: var(--text);
|
||||
font: 14px/1.4 ui-monospace, SFMono-Regular, Menlo, monospace;
|
||||
}
|
||||
main {
|
||||
max-width: 1440px;
|
||||
margin: 0 auto;
|
||||
padding: 32px 24px 48px;
|
||||
}
|
||||
h1 {
|
||||
margin: 0 0 8px;
|
||||
font-size: 32px;
|
||||
letter-spacing: -0.04em;
|
||||
}
|
||||
p {
|
||||
margin: 0;
|
||||
color: var(--muted);
|
||||
}
|
||||
.summary {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, minmax(0, 1fr));
|
||||
gap: 12px;
|
||||
margin: 24px 0;
|
||||
}
|
||||
.card {
|
||||
background: rgba(24, 24, 27, 0.92);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 16px;
|
||||
padding: 16px 18px;
|
||||
}
|
||||
.card-label {
|
||||
color: var(--muted);
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
.card-value {
|
||||
font-size: 28px;
|
||||
font-weight: 700;
|
||||
}
|
||||
.table-wrap {
|
||||
background: rgba(17, 17, 19, 0.94);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 18px;
|
||||
overflow: hidden;
|
||||
}
|
||||
table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
thead th {
|
||||
text-align: left;
|
||||
color: var(--muted);
|
||||
font-weight: 500;
|
||||
background: rgba(24, 24, 27, 0.95);
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
th, td {
|
||||
padding: 12px 16px;
|
||||
vertical-align: middle;
|
||||
}
|
||||
tbody tr + tr td {
|
||||
border-top: 1px solid rgba(39, 39, 42, 0.65);
|
||||
}
|
||||
.file {
|
||||
width: 48%;
|
||||
word-break: break-all;
|
||||
}
|
||||
.percent, .lines {
|
||||
white-space: nowrap;
|
||||
}
|
||||
.heatmap {
|
||||
width: 32%;
|
||||
min-width: 280px;
|
||||
}
|
||||
.cell {
|
||||
display: inline-block;
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
margin-right: 4px;
|
||||
border-radius: 3px;
|
||||
border: 1px solid rgba(255,255,255,0.05);
|
||||
}
|
||||
.legend {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
margin-top: 16px;
|
||||
color: var(--muted);
|
||||
}
|
||||
.legend-scale {
|
||||
display: flex;
|
||||
gap: 4px;
|
||||
}
|
||||
@media (max-width: 900px) {
|
||||
.summary {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
.heatmap {
|
||||
min-width: 220px;
|
||||
}
|
||||
th, td {
|
||||
padding: 10px 12px;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<main>
|
||||
<h1>Coverage Activity</h1>
|
||||
<p>Git-style heatmap generated from coverage/lcov.info</p>
|
||||
<section class="summary">${summary}</section>
|
||||
<section class="table-wrap">
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>File</th>
|
||||
<th>Coverage</th>
|
||||
<th>Lines</th>
|
||||
<th>Activity</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>${rows}</tbody>
|
||||
</table>
|
||||
</section>
|
||||
<div class="legend">
|
||||
<span>Less</span>
|
||||
<div class="legend-scale">
|
||||
<span class="cell" style="background:#262626"></span>
|
||||
<span class="cell" style="background:#a3a3a3"></span>
|
||||
<span class="cell" style="background:#65a30d"></span>
|
||||
<span class="cell" style="background:#15803d"></span>
|
||||
<span class="cell" style="background:#166534"></span>
|
||||
</div>
|
||||
<span>More</span>
|
||||
</div>
|
||||
</main>
|
||||
</body>
|
||||
</html>`
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const content = await readFile(LCOV_PATH, 'utf8')
|
||||
const files = parseLcov(content)
|
||||
const html = buildHtml(files)
|
||||
await mkdir(dirname(HTML_PATH), { recursive: true })
|
||||
await writeFile(HTML_PATH, html, 'utf8')
|
||||
console.log(buildTerminalReport(files))
|
||||
console.log(`coverage heatmap written to ${HTML_PATH}`)
|
||||
}
|
||||
|
||||
await main()
|
||||
50
scripts/start-grpc.ts
Normal file
50
scripts/start-grpc.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import { GrpcServer } from '../src/grpc/server.ts'
|
||||
import { init } from '../src/entrypoints/init.ts'
|
||||
|
||||
// Polyfill MACRO which is normally injected by the bundler
|
||||
Object.assign(globalThis, {
|
||||
MACRO: {
|
||||
VERSION: '0.1.7',
|
||||
DISPLAY_VERSION: '0.1.7',
|
||||
PACKAGE_URL: '@gitlawb/openclaude',
|
||||
}
|
||||
})
|
||||
|
||||
async function main() {
|
||||
console.log('Starting OpenClaude gRPC Server...')
|
||||
await init()
|
||||
|
||||
// Mirror CLI bootstrap: hydrate secure tokens and resolve provider profile
|
||||
const { enableConfigs } = await import('../src/utils/config.js')
|
||||
enableConfigs()
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../src/utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGeminiAccessTokenFromSecureStorage } = await import('../src/utils/geminiCredentials.js')
|
||||
hydrateGeminiAccessTokenFromSecureStorage()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../src/utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
|
||||
const { buildStartupEnvFromProfile, applyProfileEnvToProcessEnv } = await import('../src/utils/providerProfile.js')
|
||||
const { getProviderValidationError, validateProviderEnvOrExit } = await import('../src/utils/providerValidation.js')
|
||||
const startupEnv = await buildStartupEnvFromProfile({ processEnv: process.env })
|
||||
if (startupEnv !== process.env) {
|
||||
const startupProfileError = await getProviderValidationError(startupEnv)
|
||||
if (startupProfileError) {
|
||||
console.warn(`Warning: ignoring saved provider profile. ${startupProfileError}`)
|
||||
} else {
|
||||
applyProfileEnvToProcessEnv(process.env, startupEnv)
|
||||
}
|
||||
}
|
||||
await validateProviderEnvOrExit()
|
||||
|
||||
const port = process.env.GRPC_PORT ? parseInt(process.env.GRPC_PORT, 10) : 50051
|
||||
const host = process.env.GRPC_HOST || 'localhost'
|
||||
const server = new GrpcServer()
|
||||
|
||||
server.start(port, host)
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error('Fatal error starting gRPC server:', err)
|
||||
process.exit(1)
|
||||
})
|
||||
59
scripts/system-check.test.ts
Normal file
59
scripts/system-check.test.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { formatReachabilityFailureDetail } from './system-check.ts'
|
||||
|
||||
describe('formatReachabilityFailureDetail', () => {
|
||||
test('returns generic failure detail for non-codex transport', () => {
|
||||
const detail = formatReachabilityFailureDetail(
|
||||
'https://api.openai.com/v1/models',
|
||||
429,
|
||||
'{"error":"rate_limit"}',
|
||||
{
|
||||
transport: 'chat_completions',
|
||||
requestedModel: 'gpt-4o',
|
||||
resolvedModel: 'gpt-4o',
|
||||
},
|
||||
)
|
||||
|
||||
expect(detail).toBe(
|
||||
'Unexpected status 429 from https://api.openai.com/v1/models. Body: {"error":"rate_limit"}',
|
||||
)
|
||||
})
|
||||
|
||||
test('redacts credentials and sensitive query parameters in endpoint details', () => {
|
||||
const detail = formatReachabilityFailureDetail(
|
||||
'http://user:pass@localhost:11434/v1/models?token=abc123&mode=test',
|
||||
502,
|
||||
'bad gateway',
|
||||
{
|
||||
transport: 'chat_completions',
|
||||
requestedModel: 'llama3.1:8b',
|
||||
resolvedModel: 'llama3.1:8b',
|
||||
},
|
||||
)
|
||||
|
||||
expect(detail).toBe(
|
||||
'Unexpected status 502 from http://redacted:redacted@localhost:11434/v1/models?token=redacted&mode=test. Body: bad gateway',
|
||||
)
|
||||
})
|
||||
|
||||
test('adds alias/entitlement hint for codex model support 400s', () => {
|
||||
const detail = formatReachabilityFailureDetail(
|
||||
'https://chatgpt.com/backend-api/codex/responses',
|
||||
400,
|
||||
'{"detail":"The \\"gpt-5.3-codex-spark\\" model is not supported when using Codex with a ChatGPT account."}',
|
||||
{
|
||||
transport: 'codex_responses',
|
||||
requestedModel: 'codexspark',
|
||||
resolvedModel: 'gpt-5.3-codex-spark',
|
||||
},
|
||||
)
|
||||
|
||||
expect(detail).toContain(
|
||||
'model alias "codexspark" resolved to "gpt-5.3-codex-spark"',
|
||||
)
|
||||
expect(detail).toContain(
|
||||
'Try "codexplan" or another entitled Codex model.',
|
||||
)
|
||||
})
|
||||
})
|
||||
@@ -7,6 +7,11 @@ import {
|
||||
resolveProviderRequest,
|
||||
isLocalProviderUrl as isProviderLocalUrl,
|
||||
} from '../src/services/api/providerConfig.js'
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
probeOllamaGenerationReadiness,
|
||||
} from '../src/utils/providerDiscovery.js'
|
||||
import { redactUrlForDisplay } from '../src/utils/urlRedaction.js'
|
||||
|
||||
type CheckResult = {
|
||||
ok: boolean
|
||||
@@ -58,6 +63,31 @@ function parseOptions(argv: string[]): CliOptions {
|
||||
return options
|
||||
}
|
||||
|
||||
export function formatReachabilityFailureDetail(
|
||||
endpoint: string,
|
||||
status: number,
|
||||
responseBody: string,
|
||||
request: {
|
||||
transport: string
|
||||
requestedModel: string
|
||||
resolvedModel: string
|
||||
},
|
||||
): string {
|
||||
const compactBody = responseBody.trim().replace(/\s+/g, ' ').slice(0, 240)
|
||||
const base = `Unexpected status ${status} from ${redactUrlForDisplay(endpoint)}.`
|
||||
const bodySuffix = compactBody ? ` Body: ${compactBody}` : ''
|
||||
|
||||
if (request.transport !== 'codex_responses' || status !== 400) {
|
||||
return `${base}${bodySuffix}`
|
||||
}
|
||||
|
||||
if (!/not supported.*chatgpt account/i.test(responseBody)) {
|
||||
return `${base}${bodySuffix}`
|
||||
}
|
||||
|
||||
return `${base}${bodySuffix} Hint: model alias "${request.requestedModel}" resolved to "${request.resolvedModel}", which this ChatGPT account does not currently allow. Try "codexplan" or another entitled Codex model.`
|
||||
}
|
||||
|
||||
function checkNodeVersion(): CheckResult {
|
||||
const raw = process.versions.node
|
||||
const major = Number(raw.split('.')[0] ?? '0')
|
||||
@@ -93,11 +123,19 @@ function isLocalBaseUrl(baseUrl: string): boolean {
|
||||
}
|
||||
|
||||
const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const MISTRAL_DEFAULT_BASE_URL = 'https://api.mistral.ai/v1'
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
|
||||
function currentBaseUrl(): string {
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return process.env.MISTRAL_BASE_URL ?? MISTRAL_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
}
|
||||
return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
|
||||
}
|
||||
|
||||
@@ -126,15 +164,77 @@ function checkGeminiEnv(): CheckResult[] {
|
||||
return results
|
||||
}
|
||||
|
||||
function checkMistralEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const model = process.env.MISTRAL_MODEL
|
||||
const key = process.env.MISTRAL_API_KEY
|
||||
const baseUrl = process.env.MISTRAL_BASE_URL ?? MISTRAL_DEFAULT_BASE_URL
|
||||
|
||||
results.push(pass('Provider mode', 'Mistral provider enabled.'))
|
||||
|
||||
if (!model) {
|
||||
results.push(pass('MISTRAL_MODEL', 'Not set. Default will be used at runtime.'))
|
||||
} else {
|
||||
results.push(pass('MISTRAL_MODEL', model))
|
||||
}
|
||||
|
||||
results.push(pass('MISTRAL_BASE_URL', baseUrl))
|
||||
|
||||
if (!key) {
|
||||
results.push(fail('MISTRAL_API_KEY', 'Missing. Set MISTRAL_API_KEY.'))
|
||||
} else {
|
||||
results.push(pass('MISTRAL_API_KEY', 'Configured.'))
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
function checkGithubEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
results.push(pass('Provider mode', 'GitHub Models provider enabled.'))
|
||||
|
||||
const token = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
|
||||
if (!token?.trim()) {
|
||||
results.push(fail('GITHUB_TOKEN', 'Missing. Set GITHUB_TOKEN or GH_TOKEN.'))
|
||||
} else {
|
||||
results.push(pass('GITHUB_TOKEN', 'Configured.'))
|
||||
}
|
||||
|
||||
if (!process.env.OPENAI_MODEL) {
|
||||
results.push(
|
||||
pass(
|
||||
'OPENAI_MODEL',
|
||||
'Not set. Default github:copilot → openai/gpt-4.1 at runtime.',
|
||||
),
|
||||
)
|
||||
} else {
|
||||
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
|
||||
}
|
||||
|
||||
results.push(pass('OPENAI_BASE_URL', baseUrl))
|
||||
return results
|
||||
}
|
||||
|
||||
function checkOpenAIEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
|
||||
if (useGemini) {
|
||||
return checkGeminiEnv()
|
||||
}
|
||||
|
||||
if (useMistral) {
|
||||
return checkMistralEnv()
|
||||
}
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
return checkGithubEnv()
|
||||
}
|
||||
|
||||
if (!useOpenAI) {
|
||||
results.push(pass('Provider mode', 'Anthropic login flow enabled (CLAUDE_CODE_USE_OPENAI is off).'))
|
||||
return results
|
||||
@@ -160,7 +260,7 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
|
||||
}
|
||||
|
||||
results.push(pass('OPENAI_BASE_URL', request.baseUrl))
|
||||
results.push(pass('OPENAI_BASE_URL', redactUrlForDisplay(request.baseUrl)))
|
||||
|
||||
if (request.transport === 'codex_responses') {
|
||||
const credentials = resolveCodexApiCredentials(process.env)
|
||||
@@ -181,12 +281,21 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
}
|
||||
|
||||
const key = process.env.OPENAI_API_KEY
|
||||
const githubToken = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
|
||||
if (key === 'SUA_CHAVE') {
|
||||
results.push(fail('OPENAI_API_KEY', 'Placeholder value detected: SUA_CHAVE.'))
|
||||
} else if (!key && !isLocalBaseUrl(request.baseUrl)) {
|
||||
} else if (
|
||||
!key &&
|
||||
!isLocalBaseUrl(request.baseUrl) &&
|
||||
!(useGithub && githubToken?.trim())
|
||||
) {
|
||||
results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.'))
|
||||
} else if (!key && useGithub && githubToken?.trim()) {
|
||||
results.push(
|
||||
pass('OPENAI_API_KEY', 'Not set; GITHUB_TOKEN/GH_TOKEN will be used for GitHub Models.'),
|
||||
)
|
||||
} else if (!key) {
|
||||
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).'))
|
||||
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Atomic Chat/Ollama/LM Studio).'))
|
||||
} else {
|
||||
results.push(pass('OPENAI_API_KEY', 'Configured.'))
|
||||
}
|
||||
@@ -197,11 +306,20 @@ function checkOpenAIEnv(): CheckResult[] {
|
||||
async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
|
||||
if (!useGemini && !useOpenAI) {
|
||||
if (!useGemini && !useOpenAI && !useGithub && !useMistral) {
|
||||
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
return pass(
|
||||
'Provider reachability',
|
||||
'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).',
|
||||
)
|
||||
}
|
||||
|
||||
const geminiBaseUrl = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const resolvedBaseUrl = useGemini
|
||||
? (process.env.GEMINI_BASE_URL ?? geminiBaseUrl)
|
||||
@@ -213,6 +331,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
const endpoint = request.transport === 'codex_responses'
|
||||
? `${request.baseUrl}/responses`
|
||||
: `${request.baseUrl}/models`
|
||||
const redactedEndpoint = redactUrlForDisplay(endpoint)
|
||||
|
||||
const controller = new AbortController()
|
||||
const timeout = setTimeout(() => controller.abort(), 4000)
|
||||
@@ -231,6 +350,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
headers['chatgpt-account-id'] = credentials.accountId
|
||||
}
|
||||
headers['Content-Type'] = 'application/json'
|
||||
headers.originator = 'openclaude'
|
||||
method = 'POST'
|
||||
body = JSON.stringify({
|
||||
model: request.resolvedModel,
|
||||
@@ -247,6 +367,8 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
})
|
||||
} else if (useGemini && (process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY)) {
|
||||
headers.Authorization = `Bearer ${process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY}`
|
||||
} else if (useMistral && process.env.MISTRAL_API_KEY) {
|
||||
headers.Authorization = `Bearer ${process.env.MISTRAL_API_KEY}`
|
||||
} else if (process.env.OPENAI_API_KEY) {
|
||||
headers.Authorization = `Bearer ${process.env.OPENAI_API_KEY}`
|
||||
}
|
||||
@@ -259,20 +381,135 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
|
||||
})
|
||||
|
||||
if (response.status === 200 || response.status === 401 || response.status === 403) {
|
||||
return pass('Provider reachability', `Reached ${endpoint} (status ${response.status}).`)
|
||||
return pass(
|
||||
'Provider reachability',
|
||||
`Reached ${redactedEndpoint} (status ${response.status}).`,
|
||||
)
|
||||
}
|
||||
|
||||
return fail('Provider reachability', `Unexpected status ${response.status} from ${endpoint}.`)
|
||||
const responseBody = await response.text().catch(() => '')
|
||||
const detail = formatReachabilityFailureDetail(
|
||||
endpoint,
|
||||
response.status,
|
||||
responseBody,
|
||||
request,
|
||||
)
|
||||
return fail(
|
||||
'Provider reachability',
|
||||
detail,
|
||||
)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return fail('Provider reachability', `Failed to reach ${endpoint}: ${message}`)
|
||||
return fail(
|
||||
'Provider reachability',
|
||||
`Failed to reach ${redactedEndpoint}: ${message}`,
|
||||
)
|
||||
} finally {
|
||||
clearTimeout(timeout)
|
||||
}
|
||||
}
|
||||
|
||||
async function checkProviderGenerationReadiness(): Promise<CheckResult> {
|
||||
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const useMistral = isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
|
||||
if (!useGemini && !useOpenAI && !useGithub && !useMistral) {
|
||||
return pass('Provider generation readiness', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
if (useGithub && !useOpenAI) {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
'Skipped for GitHub Models (runtime generation uses a different endpoint flow).',
|
||||
)
|
||||
}
|
||||
|
||||
if (useGemini || useMistral) {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
'Skipped for managed provider mode.',
|
||||
)
|
||||
}
|
||||
|
||||
if (!useOpenAI) {
|
||||
return pass('Provider generation readiness', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
const request = resolveProviderRequest({
|
||||
model: process.env.OPENAI_MODEL,
|
||||
baseUrl: process.env.OPENAI_BASE_URL,
|
||||
})
|
||||
|
||||
if (request.transport === 'codex_responses') {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
'Skipped for Codex responses (reachability probe already performs a lightweight generation request).',
|
||||
)
|
||||
}
|
||||
|
||||
if (!isLocalBaseUrl(request.baseUrl)) {
|
||||
return pass('Provider generation readiness', 'Skipped for non-local provider URL.')
|
||||
}
|
||||
|
||||
const localProviderLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
|
||||
if (localProviderLabel !== 'Ollama') {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
`Skipped for ${localProviderLabel} (no provider-specific generation probe).`,
|
||||
)
|
||||
}
|
||||
|
||||
const readiness = await probeOllamaGenerationReadiness({
|
||||
baseUrl: request.baseUrl,
|
||||
model: request.requestedModel,
|
||||
})
|
||||
|
||||
if (readiness.state === 'ready') {
|
||||
return pass(
|
||||
'Provider generation readiness',
|
||||
`Generated a test response with ${readiness.probeModel ?? request.requestedModel}.`,
|
||||
)
|
||||
}
|
||||
|
||||
if (readiness.state === 'unreachable') {
|
||||
return fail(
|
||||
'Provider generation readiness',
|
||||
`Could not reach Ollama at ${redactUrlForDisplay(request.baseUrl)}.`,
|
||||
)
|
||||
}
|
||||
|
||||
if (readiness.state === 'no_models') {
|
||||
return fail(
|
||||
'Provider generation readiness',
|
||||
'Ollama is reachable, but no installed models were found. Pull a model first (for example: ollama pull qwen2.5-coder:7b).',
|
||||
)
|
||||
}
|
||||
|
||||
const detailSuffix = readiness.detail ? ` Detail: ${readiness.detail}.` : ''
|
||||
return fail(
|
||||
'Provider generation readiness',
|
||||
`Ollama is reachable, but generation failed for ${readiness.probeModel ?? request.requestedModel}.${detailSuffix}`,
|
||||
)
|
||||
}
|
||||
|
||||
function isAtomicChatUrl(baseUrl: string): boolean {
|
||||
try {
|
||||
const parsed = new URL(baseUrl)
|
||||
return parsed.port === '1337' && isLocalBaseUrl(baseUrl)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
function checkOllamaProcessorMode(): CheckResult {
|
||||
if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
if (
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ||
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)
|
||||
) {
|
||||
return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).')
|
||||
}
|
||||
|
||||
@@ -281,6 +518,10 @@ function checkOllamaProcessorMode(): CheckResult {
|
||||
return pass('Ollama processor mode', 'Skipped (provider URL is not local).')
|
||||
}
|
||||
|
||||
if (isAtomicChatUrl(baseUrl)) {
|
||||
return pass('Ollama processor mode', 'Skipped (Atomic Chat local provider detected, not Ollama).')
|
||||
}
|
||||
|
||||
const result = spawnSync('ollama', ['ps'], {
|
||||
cwd: process.cwd(),
|
||||
encoding: 'utf8',
|
||||
@@ -289,7 +530,7 @@ function checkOllamaProcessorMode(): CheckResult {
|
||||
|
||||
if (result.status !== 0) {
|
||||
const detail = (result.stderr || result.stdout || 'Unable to run ollama ps').trim()
|
||||
return fail('Ollama processor mode', detail)
|
||||
return pass('Ollama processor mode', `Native CLI check failed (${detail}). Assuming valid Docker/remote backend since HTTP ping passed.`)
|
||||
}
|
||||
|
||||
const output = (result.stdout || '').trim()
|
||||
@@ -319,6 +560,30 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
|
||||
GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY),
|
||||
}
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_MISTRAL)) {
|
||||
return {
|
||||
CLAUDE_CODE_USE_MISTRAL: true,
|
||||
MISTRAL_MODEL: process.env.MISTRAL_MODEL ?? '(unset, default: devstral-latest)',
|
||||
MISTRAL_BASE_URL: process.env.MISTRAL_BASE_URL ?? 'https://api.mistral.ai/v1',
|
||||
MISTRAL_API_KEY_SET: Boolean(process.env.MISTRAL_API_KEY),
|
||||
}
|
||||
}
|
||||
if (
|
||||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
) {
|
||||
return {
|
||||
CLAUDE_CODE_USE_GITHUB: true,
|
||||
OPENAI_MODEL:
|
||||
process.env.OPENAI_MODEL ??
|
||||
'(unset, default: github:copilot → openai/gpt-4.1)',
|
||||
OPENAI_BASE_URL:
|
||||
process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE,
|
||||
GITHUB_TOKEN_SET: Boolean(
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN,
|
||||
),
|
||||
}
|
||||
}
|
||||
const request = resolveProviderRequest({
|
||||
model: process.env.OPENAI_MODEL,
|
||||
baseUrl: process.env.OPENAI_BASE_URL,
|
||||
@@ -344,6 +609,7 @@ function writeJsonReport(
|
||||
options: CliOptions,
|
||||
results: CheckResult[],
|
||||
): void {
|
||||
const envSummary = serializeSafeEnvSummary()
|
||||
const payload = {
|
||||
timestamp: new Date().toISOString(),
|
||||
cwd: process.cwd(),
|
||||
@@ -352,12 +618,24 @@ function writeJsonReport(
|
||||
passed: results.filter(result => result.ok).length,
|
||||
failed: results.filter(result => !result.ok).length,
|
||||
},
|
||||
env: serializeSafeEnvSummary(),
|
||||
env: envSummary,
|
||||
results,
|
||||
}
|
||||
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify(payload, null, 2))
|
||||
console.log(
|
||||
JSON.stringify(
|
||||
{
|
||||
timestamp: payload.timestamp,
|
||||
cwd: payload.cwd,
|
||||
summary: payload.summary,
|
||||
env: '[redacted in console JSON output; use --out-file for the full report]',
|
||||
results: payload.results,
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
if (options.outFile) {
|
||||
@@ -374,11 +652,19 @@ async function main(): Promise<void> {
|
||||
const options = parseOptions(process.argv.slice(2))
|
||||
const results: CheckResult[] = []
|
||||
|
||||
const { enableConfigs } = await import('../src/utils/config.js')
|
||||
enableConfigs()
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../src/utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../src/utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
|
||||
results.push(checkNodeVersion())
|
||||
results.push(checkBunRuntime())
|
||||
results.push(checkBuildArtifacts())
|
||||
results.push(...checkOpenAIEnv())
|
||||
results.push(await checkBaseUrlReachability())
|
||||
results.push(await checkProviderGenerationReadiness())
|
||||
results.push(checkOllamaProcessorMode())
|
||||
|
||||
if (!options.json) {
|
||||
@@ -398,6 +684,8 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
await main()
|
||||
if (import.meta.main) {
|
||||
await main()
|
||||
}
|
||||
|
||||
export {}
|
||||
|
||||
50
scripts/verify-no-phone-home.sh
Normal file
50
scripts/verify-no-phone-home.sh
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
DIST="dist/cli.mjs"
|
||||
|
||||
if [ ! -f "$DIST" ]; then
|
||||
echo "ERROR: $DIST not found. Run 'bun run build' first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
EXIT=0
|
||||
|
||||
BANNED=(
|
||||
"datadoghq.com"
|
||||
"api/event_logging/batch"
|
||||
"api/claude_code/metrics"
|
||||
"getKubernetesNamespace"
|
||||
"/var/run/secrets/kubernetes"
|
||||
"/proc/self/mountinfo"
|
||||
"tengu_internal_record_permission_context"
|
||||
"anthropic-serve"
|
||||
"infra.ant.dev"
|
||||
"claude-code-feedback"
|
||||
"C07VBSHV7EV"
|
||||
)
|
||||
|
||||
echo "Checking $DIST for banned patterns..."
|
||||
echo ""
|
||||
|
||||
for pattern in "${BANNED[@]}"; do
|
||||
COUNT=$(grep -F -c "$pattern" "$DIST" 2>/dev/null || true)
|
||||
COUNT=${COUNT:-0}
|
||||
if [ "$COUNT" -gt 0 ]; then
|
||||
echo " FAIL: '$pattern' found ($COUNT occurrences)"
|
||||
EXIT=1
|
||||
else
|
||||
echo " PASS: '$pattern' not found"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
|
||||
if [ "$EXIT" -eq 0 ]; then
|
||||
echo "✓ All checks passed — no banned patterns in build output"
|
||||
else
|
||||
echo "✗ FAILED — banned patterns found in build output"
|
||||
fi
|
||||
|
||||
exit $EXIT
|
||||
47
scripts/verify-no-phone-home.ts
Normal file
47
scripts/verify-no-phone-home.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { existsSync, readFileSync } from 'node:fs'
|
||||
|
||||
const DIST = 'dist/cli.mjs'
|
||||
const BANNED_PATTERNS = [
|
||||
'datadoghq.com',
|
||||
'api/event_logging/batch',
|
||||
'api/claude_code/metrics',
|
||||
'getKubernetesNamespace',
|
||||
'/var/run/secrets/kubernetes',
|
||||
'/proc/self/mountinfo',
|
||||
'tengu_internal_record_permission_context',
|
||||
'anthropic-serve',
|
||||
'infra.ant.dev',
|
||||
'claude-code-feedback',
|
||||
'C07VBSHV7EV',
|
||||
] as const
|
||||
|
||||
if (!existsSync(DIST)) {
|
||||
console.error(`ERROR: ${DIST} not found. Run 'bun run build' first.`)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const contents = readFileSync(DIST, 'utf8')
|
||||
let exitCode = 0
|
||||
|
||||
console.log(`Checking ${DIST} for banned patterns...`)
|
||||
console.log('')
|
||||
|
||||
for (const pattern of BANNED_PATTERNS) {
|
||||
const count = contents.split(pattern).length - 1
|
||||
if (count > 0) {
|
||||
console.log(` FAIL: '${pattern}' found (${count} occurrences)`)
|
||||
exitCode = 1
|
||||
} else {
|
||||
console.log(` PASS: '${pattern}' not found`)
|
||||
}
|
||||
}
|
||||
|
||||
console.log('')
|
||||
|
||||
if (exitCode === 0) {
|
||||
console.log('✓ All checks passed — no banned patterns in build output')
|
||||
} else {
|
||||
console.log('✗ FAILED — banned patterns found in build output')
|
||||
}
|
||||
|
||||
process.exit(exitCode)
|
||||
@@ -46,6 +46,7 @@ import type { AttributionState } from './utils/commitAttribution.js'
|
||||
import { getGlobalConfig } from './utils/config.js'
|
||||
import { getCwd } from './utils/cwd.js'
|
||||
import { isBareMode, isEnvTruthy } from './utils/envUtils.js'
|
||||
import { logForDebugging } from './utils/debug.js'
|
||||
import { getFastModeState } from './utils/fastMode.js'
|
||||
import {
|
||||
type FileHistoryState,
|
||||
@@ -695,9 +696,11 @@ export class QueryEngine {
|
||||
// progress are now recorded inline (their switch cases below), but
|
||||
// this flush still matters for the preservedSegment tail walk.
|
||||
// If the SDK subprocess restarts before then (claude-desktop kills
|
||||
// between turns), tailUuid points to a never-written message →
|
||||
// applyPreservedSegmentRelinks fails its tail→head walk → returns
|
||||
// without pruning → resume loads full pre-compact history.
|
||||
// between turns), tailUuid can point to a never-written message. In
|
||||
// that case strip preservedSegment before transcript persistence so
|
||||
// resume falls back to ordinary boundary pruning instead of relying on
|
||||
// broken relink metadata.
|
||||
let transcriptMessage = message
|
||||
if (
|
||||
persistSession &&
|
||||
message.type === 'system' &&
|
||||
@@ -710,10 +713,21 @@ export class QueryEngine {
|
||||
)
|
||||
if (tailIdx !== -1) {
|
||||
await recordTranscript(this.mutableMessages.slice(0, tailIdx + 1))
|
||||
} else {
|
||||
transcriptMessage = {
|
||||
...message,
|
||||
compactMetadata: {
|
||||
...message.compactMetadata,
|
||||
preservedSegment: undefined,
|
||||
},
|
||||
}
|
||||
logForDebugging(
|
||||
`[QueryEngine] stripped preservedSegment before transcript write; missing tail ${tailUuid}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
messages.push(message)
|
||||
messages.push(transcriptMessage)
|
||||
if (persistSession) {
|
||||
// Fire-and-forget for assistant messages. claude.ts yields one
|
||||
// assistant message per content block, then mutates the last
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { randomBytes } from 'crypto'
|
||||
import { randomInt } from 'crypto'
|
||||
import type { AppState } from './state/AppState.js'
|
||||
import type { AgentId } from './types/ids.js'
|
||||
import { getTaskOutputPath } from './utils/task/diskOutput.js'
|
||||
@@ -97,10 +97,9 @@ const TASK_ID_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
|
||||
|
||||
export function generateTaskId(type: TaskType): string {
|
||||
const prefix = getTaskIdPrefix(type)
|
||||
const bytes = randomBytes(8)
|
||||
let id = prefix
|
||||
for (let i = 0; i < 8; i++) {
|
||||
id += TASK_ID_ALPHABET[bytes[i]! % TASK_ID_ALPHABET.length]
|
||||
id += TASK_ID_ALPHABET[randomInt(TASK_ID_ALPHABET.length)]!
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
15
src/Tool.ts
15
src/Tool.ts
@@ -176,6 +176,8 @@ export type ToolUseContext = {
|
||||
querySource?: QuerySource
|
||||
/** Optional callback to get the latest tools (e.g., after MCP servers connect mid-query) */
|
||||
refreshTools?: () => Tools
|
||||
/** Per-agent provider override from agentRouting config */
|
||||
providerOverride?: { model: string; baseURL: string; apiKey: string }
|
||||
}
|
||||
abortController: AbortController
|
||||
readFileState: FileStateCache
|
||||
@@ -247,6 +249,11 @@ export type ToolUseContext = {
|
||||
/** When true, canUseTool must always be called even when hooks auto-approve.
|
||||
* Used by speculation for overlay file path rewriting. */
|
||||
requireCanUseTool?: boolean
|
||||
/**
|
||||
* Optional callback used by hook-chain fallback actions that launch
|
||||
* AgentTool from hook runtime paths.
|
||||
*/
|
||||
hookChainsCanUseTool?: CanUseToolFn
|
||||
messages: Message[]
|
||||
fileReadingLimits?: {
|
||||
maxTokens?: number
|
||||
@@ -290,6 +297,14 @@ export type ToolUseContext = {
|
||||
* resumeAgentBackground threads one reconstructed from sidechain records.
|
||||
*/
|
||||
contentReplacementState?: ContentReplacementState
|
||||
/**
|
||||
* Interactive REPL only: mirror persisted tool-result replacements back
|
||||
* into the live transcript so the original oversized payloads can be
|
||||
* released from heap once the replacement decision is known.
|
||||
*/
|
||||
syncToolResultReplacements?: (
|
||||
replacements: ReadonlyMap<string, string>,
|
||||
) => void
|
||||
/**
|
||||
* Parent's rendered system prompt bytes, frozen at turn start.
|
||||
* Used by fork subagents to share the parent's prompt cache — re-calling
|
||||
|
||||
290
src/__tests__/bugfixes.test.ts
Normal file
290
src/__tests__/bugfixes.test.ts
Normal file
@@ -0,0 +1,290 @@
|
||||
/**
|
||||
* Tests for Bug Fixes applied to openclaude.
|
||||
*
|
||||
* Covers:
|
||||
* 1. Gemini `store: false` rejection fix
|
||||
* 2. Session timeout / 500 error fix (stream idle timeout)
|
||||
* 3. Agent loop continuation nudge
|
||||
* 4. Web search result count improvements
|
||||
*/
|
||||
|
||||
import { describe, test, expect } from 'bun:test'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const SRC = resolve(import.meta.dir, '..')
|
||||
const file = (relative: string) => Bun.file(resolve(SRC, relative))
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 1: Gemini `store: false` rejection
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Gemini store field fix', () => {
|
||||
test('isGeminiMode is imported and used in openaiShim', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// Verify the fix: store deletion should check for Gemini mode
|
||||
expect(content).toContain('isGeminiMode()')
|
||||
expect(content).toContain("mistral and gemini don't recognize body.store")
|
||||
// Ensure the delete body.store is guarded for both Mistral and Gemini
|
||||
expect(content).toMatch(/isMistral\s*\|\|\s*isGeminiMode\(\)/)
|
||||
})
|
||||
|
||||
test('store: false is still set by default (OpenAI needs it)', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// The body should still have store: false by default
|
||||
expect(content).toMatch(/store:\s*false/)
|
||||
// But it should be deleted for non-OpenAI providers
|
||||
expect(content).toMatch(/delete body\.store/)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 2: Session timeout — stream idle timeout
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Session timeout fix', () => {
|
||||
test('openaiShim has idle timeout for SSE streams', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
expect(content).toContain('STREAM_IDLE_TIMEOUT_MS')
|
||||
expect(content).toContain('readWithTimeout')
|
||||
expect(content).toMatch(/readWithTimeout\(\)/)
|
||||
})
|
||||
|
||||
test('codexShim has idle timeout for SSE streams', async () => {
|
||||
const content = await file('services/api/codexShim.ts').text()
|
||||
|
||||
expect(content).toContain('STREAM_IDLE_TIMEOUT_MS')
|
||||
expect(content).toContain('readWithTimeout')
|
||||
expect(content).toMatch(/readWithTimeout\(\)/)
|
||||
})
|
||||
|
||||
test('idle timeout is set to a reasonable value (>= 60s)', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// Extract the timeout value (supports numeric separators like 120_000)
|
||||
const match = content.match(/STREAM_IDLE_TIMEOUT_MS\s*=\s*([\d_]+)/)
|
||||
expect(match).not.toBeNull()
|
||||
const timeoutMs = parseInt(match![1].replace(/_/g, ''), 10)
|
||||
expect(timeoutMs).toBeGreaterThanOrEqual(60_000)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 3: Agent loop continuation nudge
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Agent loop continuation nudge', () => {
|
||||
test('query.ts has continuation signal detection', async () => {
|
||||
const content = await file('query.ts').text()
|
||||
|
||||
expect(content).toContain('continuationSignals')
|
||||
expect(content).toContain('Continuation nudge triggered')
|
||||
expect(content).toContain('continuation_nudge')
|
||||
})
|
||||
|
||||
test('continuation signals include tightened patterns', async () => {
|
||||
const content = await file('query.ts').text()
|
||||
|
||||
// Should detect tightened patterns requiring explicit action verbs
|
||||
expect(content).toMatch(/so now \(i\|let me\|we\)/)
|
||||
expect(content).toContain('completionMarkers')
|
||||
expect(content).toContain('MAX_CONTINUATION_NUDGES')
|
||||
// Verify the nudge counter guard exists
|
||||
expect(content).toMatch(/continuationNudgeCount\s*<\s*MAX_CONTINUATION_NUDGES/)
|
||||
})
|
||||
|
||||
test('nudge creates a meta user message to continue', async () => {
|
||||
const content = await file('query.ts').text()
|
||||
|
||||
expect(content).toContain(
|
||||
'Continue with the task. Use the appropriate tools to proceed.',
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 4: Web search result count improvements
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Web search result count improvements', () => {
|
||||
test('Bing provider requests at least 15 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/bing.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/count.*['"]15['"]/)
|
||||
})
|
||||
|
||||
test('Tavily provider requests at least 15 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/tavily.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/max_results:\s*15/)
|
||||
})
|
||||
|
||||
test('Exa provider requests at least 15 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/exa.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/numResults:\s*15/)
|
||||
})
|
||||
|
||||
test('Firecrawl provider requests at least 15 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/firecrawl.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/limit:\s*15/)
|
||||
})
|
||||
|
||||
test('Mojeek provider requests at least 10 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/mojeek.ts',
|
||||
).text()
|
||||
|
||||
// Mojeek uses 't' param for result count — verify it's set to 10
|
||||
expect(content).toMatch(/searchParams\.set\('t',\s*'10'\)/)
|
||||
})
|
||||
|
||||
test('You.com provider requests at least 10 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/you.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/num_web_results.*['"]10['"]/)
|
||||
})
|
||||
|
||||
test('Jina provider requests at least 10 results', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/providers/jina.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/count.*['"]10['"]/)
|
||||
})
|
||||
|
||||
test('Native Anthropic web search max_uses increased to 15', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/WebSearchTool.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toMatch(/max_uses:\s*15/)
|
||||
})
|
||||
|
||||
test('codex web search path guarantees a non-empty result body', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/WebSearchTool.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toContain("results.push('No results found.')")
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 5: MCP tool timeout fix
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('MCP tool timeout fix', () => {
|
||||
test('default MCP tool timeout is reasonable (not 27 hours)', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
|
||||
// Should NOT have the old ~27.8 hour default
|
||||
expect(content).not.toContain('100_000_000')
|
||||
// Should have a reasonable timeout (5 minutes = 300_000ms)
|
||||
expect(content).toMatch(/DEFAULT_MCP_TOOL_TIMEOUT_MS\s*=\s*300_000/)
|
||||
})
|
||||
|
||||
test('MCP tools/list has retry logic', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
|
||||
expect(content).toContain('tools/list failed (attempt')
|
||||
expect(content).toContain('Retrying...')
|
||||
})
|
||||
|
||||
test('MCP URL elicitation checks abort signal', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
|
||||
expect(content).toContain('signal.aborted')
|
||||
expect(content).toContain('Tool call aborted during URL elicitation')
|
||||
})
|
||||
|
||||
test('MCP tool error messages include server and tool name in telemetry', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
|
||||
// Telemetry message should include context like "MCP tool [serverName] toolName: error"
|
||||
// The human-readable message stays unchanged to avoid breaking error consumers
|
||||
expect(content).toContain('MCP tool [${name}] ${tool}:')
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cross-cutting: verify no regressions
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Regression checks', () => {
|
||||
test('store field is still set for OpenAI (not deleted unconditionally)', async () => {
|
||||
const content = await file('services/api/openaiShim.ts').text()
|
||||
|
||||
// store: false should exist in body construction
|
||||
expect(content).toMatch(/store:\s*false/)
|
||||
// But delete body.store should be conditional (guarded by if)
|
||||
const deleteLines = content.split('\n').filter(l => l.includes('delete body.store'))
|
||||
expect(deleteLines.length).toBeGreaterThan(0)
|
||||
// Verify the delete is inside a conditional block by checking surrounding context
|
||||
for (const line of deleteLines) {
|
||||
const trimmed = line.trim()
|
||||
// Should be either inside an if block (indented delete) or a comment
|
||||
expect(
|
||||
trimmed.startsWith('delete') && !trimmed.includes('// unconditional'),
|
||||
).toBe(true)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 6: SendMessageTool race condition guard
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('SendMessageTool race condition fix', () => {
|
||||
test('SendMessageTool has double-check for concurrent resume', async () => {
|
||||
const content = await file('tools/SendMessageTool/SendMessageTool.ts').text()
|
||||
|
||||
// Should have a second status check before resuming to prevent race
|
||||
expect(content).toContain('was concurrently resumed')
|
||||
// The freshTask check should re-read from getAppState
|
||||
expect(content).toMatch(/const freshTask = context\.getAppState\(\)\.tasks\[agentId\]/)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 7: AgentTool dump state cleanup
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('AgentTool cleanup fix', () => {
|
||||
test('backgrounded agent always cleans up dump state', async () => {
|
||||
const content = await file('tools/AgentTool/AgentTool.tsx').text()
|
||||
|
||||
// The backgrounded agent's finally block should clean up regardless
|
||||
// of whether the agent crashed or completed normally
|
||||
expect(content).toContain('Defensive cleanup: wrap each call so one failure')
|
||||
// Verify cleanup is wrapped in try/catch for defensive execution
|
||||
expect(content).toMatch(/try\s*\{\s*clearInvokedSkillsForAgent/)
|
||||
expect(content).toMatch(/try\s*\{\s*clearDumpState/)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 8: Context overflow 500 error handling
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Context overflow 500 fix', () => {
|
||||
test('errors.ts has handler for context overflow 500 errors', async () => {
|
||||
const content = await file('services/api/errors.ts').text()
|
||||
|
||||
expect(content).toContain('500 errors caused by context overflow')
|
||||
expect(content).toContain('too many tokens')
|
||||
expect(content).toContain('The conversation has grown too large')
|
||||
})
|
||||
|
||||
test('query.ts has circuit breaker safety net for oversized context', async () => {
|
||||
const content = await file('query.ts').text()
|
||||
|
||||
expect(content).toContain('Safety net: when auto-compact')
|
||||
expect(content).toContain('circuit breaker has tripped')
|
||||
expect(content).toContain('automatic compaction has failed')
|
||||
})
|
||||
})
|
||||
55
src/__tests__/providerCounts.test.ts
Normal file
55
src/__tests__/providerCounts.test.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
/**
|
||||
* Tests for Web Search Provider result count configurations.
|
||||
*/
|
||||
|
||||
import { describe, test, expect } from 'bun:test'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const SRC = resolve(import.meta.dir, '..', 'tools', 'WebSearchTool', 'providers')
|
||||
const file = (name: string) => Bun.file(resolve(SRC, name))
|
||||
|
||||
describe('Provider result counts', () => {
|
||||
const providers = [
|
||||
'bing.ts',
|
||||
'tavily.ts',
|
||||
'exa.ts',
|
||||
'firecrawl.ts',
|
||||
'mojeek.ts',
|
||||
'you.ts',
|
||||
'jina.ts',
|
||||
'duckduckgo.ts',
|
||||
// linkup.ts excluded — uses depth param, not a result count field
|
||||
]
|
||||
|
||||
for (const name of providers) {
|
||||
test(`${name} exists and is readable`, async () => {
|
||||
const f = file(name)
|
||||
expect(await f.exists()).toBe(true)
|
||||
const content = await f.text()
|
||||
expect(content.length).toBeGreaterThan(100)
|
||||
})
|
||||
}
|
||||
|
||||
test('No provider hardcodes a limit below 10', async () => {
|
||||
const suspiciousPatterns = [
|
||||
/count['":\s]*['"]([1-9])['"]/i,
|
||||
/limit['":\s]*([1-9])\b/,
|
||||
/max_results['":\s]*([1-9])\b/,
|
||||
/numResults['":\s]*([1-9])\b/,
|
||||
]
|
||||
|
||||
for (const name of providers) {
|
||||
const content = await file(name).text()
|
||||
for (const pattern of suspiciousPatterns) {
|
||||
const match = content.match(pattern)
|
||||
if (match) {
|
||||
const num = parseInt(match[1], 10)
|
||||
expect(num).toBeGreaterThanOrEqual(
|
||||
10,
|
||||
`${name} has suspiciously low result count: ${match[0]}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
191
src/__tests__/security-hardening.test.ts
Normal file
191
src/__tests__/security-hardening.test.ts
Normal file
@@ -0,0 +1,191 @@
|
||||
/**
|
||||
* Security hardening regression tests.
|
||||
*
|
||||
* Covers:
|
||||
* 1. MCP tool result Unicode sanitization
|
||||
* 2. Sandbox settings source filtering (exclude projectSettings)
|
||||
* 3. Plugin git clone/pull hooks disabled
|
||||
* 4. ANTHROPIC_FOUNDRY_API_KEY removed from SAFE_ENV_VARS
|
||||
* 5. WebFetch SSRF protection via ssrfGuardedLookup
|
||||
*/
|
||||
|
||||
import { describe, test, expect } from 'bun:test'
|
||||
import { resolve } from 'path'
|
||||
|
||||
const SRC = resolve(import.meta.dir, '..')
|
||||
const file = (relative: string) => Bun.file(resolve(SRC, relative))
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 1: MCP tool result Unicode sanitization
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('MCP tool result sanitization', () => {
|
||||
test('transformResultContent sanitizes text content', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
// Tool definitions are already sanitized (line ~1798)
|
||||
expect(content).toContain('recursivelySanitizeUnicode(result.tools)')
|
||||
// Tool results must also be sanitized
|
||||
expect(content).toMatch(
|
||||
/case 'text':[\s\S]*?recursivelySanitizeUnicode\(resultContent\.text\)/,
|
||||
)
|
||||
})
|
||||
|
||||
test('resource text content is also sanitized', async () => {
|
||||
const content = await file('services/mcp/client.ts').text()
|
||||
expect(content).toMatch(
|
||||
/recursivelySanitizeUnicode\(\s*`\$\{prefix\}\$\{resource\.text\}`/,
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 2: Sandbox settings source filtering
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Sandbox settings trust boundary', () => {
|
||||
test('getSandboxEnabledSetting does not use getSettings_DEPRECATED', async () => {
|
||||
const content = await file('utils/sandbox/sandbox-adapter.ts').text()
|
||||
// Extract the getSandboxEnabledSetting function body
|
||||
const fnMatch = content.match(
|
||||
/function getSandboxEnabledSetting\(\)[^{]*\{([\s\S]*?)\n\}/,
|
||||
)
|
||||
expect(fnMatch).not.toBeNull()
|
||||
const fnBody = fnMatch![1]
|
||||
// Must NOT use getSettings_DEPRECATED (reads all sources including project)
|
||||
expect(fnBody).not.toContain('getSettings_DEPRECATED')
|
||||
// Must use getSettingsForSource for individual trusted sources
|
||||
expect(fnBody).toContain("getSettingsForSource('userSettings')")
|
||||
expect(fnBody).toContain("getSettingsForSource('policySettings')")
|
||||
// Must NOT read from projectSettings
|
||||
expect(fnBody).not.toContain("'projectSettings'")
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 3: Plugin git hooks disabled
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Plugin git operations disable hooks', () => {
|
||||
test('gitClone includes core.hooksPath=/dev/null', async () => {
|
||||
const content = await file('utils/plugins/marketplaceManager.ts').text()
|
||||
// The clone args must disable hooks
|
||||
const cloneSection = content.slice(
|
||||
content.indexOf('export async function gitClone('),
|
||||
content.indexOf('export async function gitClone(') + 2000,
|
||||
)
|
||||
expect(cloneSection).toContain("'core.hooksPath=/dev/null'")
|
||||
})
|
||||
|
||||
test('gitPull includes core.hooksPath=/dev/null', async () => {
|
||||
const content = await file('utils/plugins/marketplaceManager.ts').text()
|
||||
const pullSection = content.slice(
|
||||
content.indexOf('export async function gitPull('),
|
||||
content.indexOf('export async function gitPull(') + 2000,
|
||||
)
|
||||
expect(pullSection).toContain("'core.hooksPath=/dev/null'")
|
||||
})
|
||||
|
||||
test('gitSubmoduleUpdate includes core.hooksPath=/dev/null', async () => {
|
||||
const content = await file('utils/plugins/marketplaceManager.ts').text()
|
||||
const subSection = content.slice(
|
||||
content.indexOf('async function gitSubmoduleUpdate('),
|
||||
content.indexOf('async function gitSubmoduleUpdate(') + 1000,
|
||||
)
|
||||
expect(subSection).toContain("'core.hooksPath=/dev/null'")
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 4: ANTHROPIC_FOUNDRY_API_KEY not in SAFE_ENV_VARS
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('SAFE_ENV_VARS excludes credentials', () => {
|
||||
test('ANTHROPIC_FOUNDRY_API_KEY is not in SAFE_ENV_VARS', async () => {
|
||||
const content = await file('utils/managedEnvConstants.ts').text()
|
||||
// Extract the SAFE_ENV_VARS set definition
|
||||
const safeStart = content.indexOf('export const SAFE_ENV_VARS')
|
||||
const safeEnd = content.indexOf('])', safeStart)
|
||||
const safeSection = content.slice(safeStart, safeEnd)
|
||||
expect(safeSection).not.toContain('ANTHROPIC_FOUNDRY_API_KEY')
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 5: WebFetch SSRF protection
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('WebFetch SSRF guard', () => {
|
||||
test('getWithPermittedRedirects uses ssrfGuardedLookup', async () => {
|
||||
const content = await file('tools/WebFetchTool/utils.ts').text()
|
||||
expect(content).toContain(
|
||||
"import { ssrfGuardedLookup } from '../../utils/hooks/ssrfGuard.js'",
|
||||
)
|
||||
// The axios.get call in getWithPermittedRedirects must include lookup
|
||||
const fnSection = content.slice(
|
||||
content.indexOf('export async function getWithPermittedRedirects('),
|
||||
content.indexOf('export async function getWithPermittedRedirects(') +
|
||||
1000,
|
||||
)
|
||||
expect(fnSection).toContain('lookup: ssrfGuardedLookup')
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Fix 6: Swarm permission file polling removed (security hardening)
|
||||
// ---------------------------------------------------------------------------
|
||||
describe('Swarm permission file polling removed', () => {
|
||||
test('useSwarmPermissionPoller hook no longer exists', async () => {
|
||||
const content = await file(
|
||||
'hooks/useSwarmPermissionPoller.ts',
|
||||
).text()
|
||||
// The file-based polling hook must not exist — it read from an
|
||||
// unauthenticated resolved/ directory where any local process could
|
||||
// forge approval files.
|
||||
expect(content).not.toContain('function useSwarmPermissionPoller(')
|
||||
// The file-based processResponse must not exist
|
||||
expect(content).not.toContain('function processResponse(')
|
||||
})
|
||||
|
||||
test('poller does not import from permissionSync', async () => {
|
||||
const content = await file(
|
||||
'hooks/useSwarmPermissionPoller.ts',
|
||||
).text()
|
||||
// Must not import anything from permissionSync — all file-based
|
||||
// functions have been removed from this module's dependencies
|
||||
expect(content).not.toContain('permissionSync')
|
||||
})
|
||||
|
||||
test('file-based permission functions are marked deprecated', async () => {
|
||||
const content = await file(
|
||||
'utils/swarm/permissionSync.ts',
|
||||
).text()
|
||||
// All file-based functions must have @deprecated JSDoc
|
||||
const deprecatedFns = [
|
||||
'writePermissionRequest',
|
||||
'readPendingPermissions',
|
||||
'readResolvedPermission',
|
||||
'resolvePermission',
|
||||
'pollForResponse',
|
||||
'removeWorkerResponse',
|
||||
]
|
||||
for (const fn of deprecatedFns) {
|
||||
// Find the function and check that @deprecated appears before it
|
||||
const fnIndex = content.indexOf(`export async function ${fn}(`)
|
||||
if (fnIndex === -1) continue // submitPermissionRequest is a const, not async function
|
||||
const preceding = content.slice(Math.max(0, fnIndex - 500), fnIndex)
|
||||
expect(preceding).toContain('@deprecated')
|
||||
}
|
||||
})
|
||||
|
||||
test('mailbox-based functions are NOT deprecated', async () => {
|
||||
const content = await file(
|
||||
'utils/swarm/permissionSync.ts',
|
||||
).text()
|
||||
// These are the active path — must not be deprecated
|
||||
const activeFns = [
|
||||
'sendPermissionRequestViaMailbox',
|
||||
'sendPermissionResponseViaMailbox',
|
||||
]
|
||||
for (const fn of activeFns) {
|
||||
const fnIndex = content.indexOf(`export async function ${fn}(`)
|
||||
expect(fnIndex).not.toBe(-1)
|
||||
const preceding = content.slice(Math.max(0, fnIndex - 300), fnIndex)
|
||||
expect(preceding).not.toContain('@deprecated')
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -112,7 +112,7 @@ type State = {
|
||||
agentColorIndex: number
|
||||
// Last API request for bug reports
|
||||
lastAPIRequest: Omit<BetaMessageStreamParams, 'messages'> | null
|
||||
// Messages from the last API request (ant-only; reference, not clone).
|
||||
// Messages from the last API request (internal-only; reference, not clone).
|
||||
// Captures the exact post-compaction, CLAUDE.md-injected message set sent
|
||||
// to the API so /share's serialized_conversation.json reflects reality.
|
||||
lastAPIRequestMessages: BetaMessageStreamParams['messages'] | null
|
||||
@@ -185,7 +185,7 @@ type State = {
|
||||
agentId: string | null
|
||||
}
|
||||
>
|
||||
// Track slow operations for dev bar display (ant-only)
|
||||
// Track slow operations for dev bar display (internal-only)
|
||||
slowOperations: Array<{
|
||||
operation: string
|
||||
durationMs: number
|
||||
@@ -1562,29 +1562,8 @@ export function clearInvokedSkillsForAgent(agentId: string): void {
|
||||
}
|
||||
}
|
||||
|
||||
// Slow operations tracking for dev bar
|
||||
const MAX_SLOW_OPERATIONS = 10
|
||||
const SLOW_OPERATION_TTL_MS = 10000
|
||||
|
||||
export function addSlowOperation(operation: string, durationMs: number): void {
|
||||
if (process.env.USER_TYPE !== 'ant') return
|
||||
// Skip tracking for editor sessions (user editing a prompt file in $EDITOR)
|
||||
// These are intentionally slow since the user is drafting text
|
||||
if (operation.includes('exec') && operation.includes('claude-prompt-')) {
|
||||
return
|
||||
}
|
||||
const now = Date.now()
|
||||
// Remove stale operations
|
||||
STATE.slowOperations = STATE.slowOperations.filter(
|
||||
op => now - op.timestamp < SLOW_OPERATION_TTL_MS,
|
||||
)
|
||||
// Add new operation
|
||||
STATE.slowOperations.push({ operation, durationMs, timestamp: now })
|
||||
// Keep only the most recent operations
|
||||
if (STATE.slowOperations.length > MAX_SLOW_OPERATIONS) {
|
||||
STATE.slowOperations = STATE.slowOperations.slice(-MAX_SLOW_OPERATIONS)
|
||||
}
|
||||
}
|
||||
// Slow operations tracking removed (was internal-only).
|
||||
// Functions kept as no-ops to avoid breaking callers.
|
||||
|
||||
const EMPTY_SLOW_OPERATIONS: ReadonlyArray<{
|
||||
operation: string
|
||||
@@ -1592,32 +1571,17 @@ const EMPTY_SLOW_OPERATIONS: ReadonlyArray<{
|
||||
timestamp: number
|
||||
}> = []
|
||||
|
||||
export function addSlowOperation(
|
||||
_operation: string,
|
||||
_durationMs: number,
|
||||
): void {}
|
||||
|
||||
export function getSlowOperations(): ReadonlyArray<{
|
||||
operation: string
|
||||
durationMs: number
|
||||
timestamp: number
|
||||
}> {
|
||||
// Most common case: nothing tracked. Return a stable reference so the
|
||||
// caller's setState() can bail via Object.is instead of re-rendering at 2fps.
|
||||
if (STATE.slowOperations.length === 0) {
|
||||
return EMPTY_SLOW_OPERATIONS
|
||||
}
|
||||
const now = Date.now()
|
||||
// Only allocate a new array when something actually expired; otherwise keep
|
||||
// the reference stable across polls while ops are still fresh.
|
||||
if (
|
||||
STATE.slowOperations.some(op => now - op.timestamp >= SLOW_OPERATION_TTL_MS)
|
||||
) {
|
||||
STATE.slowOperations = STATE.slowOperations.filter(
|
||||
op => now - op.timestamp < SLOW_OPERATION_TTL_MS,
|
||||
)
|
||||
if (STATE.slowOperations.length === 0) {
|
||||
return EMPTY_SLOW_OPERATIONS
|
||||
}
|
||||
}
|
||||
// Safe to return directly: addSlowOperation() reassigns STATE.slowOperations
|
||||
// before pushing, so the array held in React state is never mutated.
|
||||
return STATE.slowOperations
|
||||
return EMPTY_SLOW_OPERATIONS
|
||||
}
|
||||
|
||||
export function getMainThreadAgentType(): string | undefined {
|
||||
@@ -1756,3 +1720,12 @@ export function setPromptId(id: string | null): void {
|
||||
STATE.promptId = id
|
||||
}
|
||||
|
||||
// Stub for feature-gated REPL bridge (not available in open build)
|
||||
export function isReplBridgeActive(): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
export function getReplBridgeHandle(): null {
|
||||
return null
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/**
|
||||
* Shared bridge auth/URL resolution. Consolidates the ant-only
|
||||
* Shared bridge auth/URL resolution. Consolidates the internal-only
|
||||
* CLAUDE_BRIDGE_* dev overrides that were previously copy-pasted across
|
||||
* a dozen files — inboundAttachments, BriefTool/upload, bridgeMain,
|
||||
* initReplBridge, remoteBridgeCore, daemon workers, /rename,
|
||||
* /remote-control.
|
||||
*
|
||||
* Two layers: *Override() returns the ant-only env var (or undefined);
|
||||
* Two layers: *Override() returns the internal-only env var (or undefined);
|
||||
* the non-Override versions fall through to the real OAuth store/config.
|
||||
* Callers that compose with a different auth source (e.g. daemon workers
|
||||
* using IPC auth) use the Override getters directly.
|
||||
@@ -14,21 +14,14 @@
|
||||
import { getOauthConfig } from '../constants/oauth.js'
|
||||
import { getClaudeAIOAuthTokens } from '../utils/auth.js'
|
||||
|
||||
/** Ant-only dev override: CLAUDE_BRIDGE_OAUTH_TOKEN, else undefined. */
|
||||
/** Dev override: CLAUDE_BRIDGE_OAUTH_TOKEN, else undefined. */
|
||||
export function getBridgeTokenOverride(): string | undefined {
|
||||
return (
|
||||
(process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_OAUTH_TOKEN) ||
|
||||
undefined
|
||||
)
|
||||
return process.env.CLAUDE_BRIDGE_OAUTH_TOKEN || undefined
|
||||
}
|
||||
|
||||
/** Ant-only dev override: CLAUDE_BRIDGE_BASE_URL, else undefined. */
|
||||
/** Dev override: CLAUDE_BRIDGE_BASE_URL, else undefined. */
|
||||
export function getBridgeBaseUrlOverride(): string | undefined {
|
||||
return (
|
||||
(process.env.USER_TYPE === 'ant' && process.env.CLAUDE_BRIDGE_BASE_URL) ||
|
||||
undefined
|
||||
)
|
||||
return process.env.CLAUDE_BRIDGE_BASE_URL || undefined
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -70,13 +70,13 @@ export async function isBridgeEnabledBlocking(): Promise<boolean> {
|
||||
export async function getBridgeDisabledReason(): Promise<string | null> {
|
||||
if (feature('BRIDGE_MODE')) {
|
||||
if (!isClaudeAISubscriber()) {
|
||||
return 'Remote Control requires a claude.ai subscription. Run `claude auth login` to sign in with your claude.ai account.'
|
||||
return 'Remote Control requires a claude.ai subscription. Run `openclaude auth login` to sign in with your claude.ai account.'
|
||||
}
|
||||
if (!hasProfileScope()) {
|
||||
return 'Remote Control requires a full-scope login token. Long-lived tokens (from `claude setup-token` or CLAUDE_CODE_OAUTH_TOKEN) are limited to inference-only for security reasons. Run `claude auth login` to use Remote Control.'
|
||||
return 'Remote Control requires a full-scope login token. Long-lived tokens (from `openclaude setup-token` or CLAUDE_CODE_OAUTH_TOKEN) are limited to inference-only for security reasons. Run `openclaude auth login` to use Remote Control.'
|
||||
}
|
||||
if (!getOauthAccountInfo()?.organizationUuid) {
|
||||
return 'Unable to determine your organization for Remote Control eligibility. Run `claude auth login` to refresh your account information.'
|
||||
return 'Unable to determine your organization for Remote Control eligibility. Run `openclaude auth login` to refresh your account information.'
|
||||
}
|
||||
if (!(await checkGate_CACHED_OR_BLOCKING('tengu_ccr_bridge'))) {
|
||||
return 'Remote Control is not yet enabled for your account.'
|
||||
@@ -166,7 +166,7 @@ export function checkBridgeMinVersion(): string | null {
|
||||
minVersion: string
|
||||
}>('tengu_bridge_min_version', { minVersion: '0.0.0' })
|
||||
if (config.minVersion && lt(MACRO.VERSION, config.minVersion)) {
|
||||
return `Your version of Claude Code (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${config.minVersion} or higher is required. Run \`claude update\` to update.`
|
||||
return `Your version of OpenClaude (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${config.minVersion} or higher is required. Run \`openclaude update\` to update.`
|
||||
}
|
||||
}
|
||||
return null
|
||||
@@ -174,7 +174,7 @@ export function checkBridgeMinVersion(): string | null {
|
||||
|
||||
/**
|
||||
* Default for remoteControlAtStartup when the user hasn't explicitly set it.
|
||||
* When the CCR_AUTO_CONNECT build flag is present (ant-only) and the
|
||||
* When the CCR_AUTO_CONNECT build flag is present (internal-only) and the
|
||||
* tengu_cobalt_harbor GrowthBook gate is on, all sessions connect to CCR by
|
||||
* default — the user can still opt out by setting remoteControlAtStartup=false
|
||||
* in config (explicit settings always win over this default).
|
||||
|
||||
@@ -1520,7 +1520,7 @@ export async function runBridgeLoop(
|
||||
// Skip when the loop exited fatally (env expired, auth failed, give-up) —
|
||||
// resume is impossible in those cases and the message would contradict the
|
||||
// error already printed.
|
||||
// feature('KAIROS') gate: --session-id is ant-only; without the gate,
|
||||
// feature('KAIROS') gate: --session-id is internal-only; without the gate,
|
||||
// revert to the pre-PR behavior (archive + deregister on every shutdown).
|
||||
if (
|
||||
feature('KAIROS') &&
|
||||
@@ -1888,7 +1888,7 @@ export function parseArgs(args: string[]): ParsedArgs {
|
||||
|
||||
async function printHelp(): Promise<void> {
|
||||
// Use EXTERNAL_PERMISSION_MODES for help text — internal modes (bubble)
|
||||
// are ant-only and auto is feature-gated; they're still accepted by validation.
|
||||
// are internal-only and auto is feature-gated; they're still accepted by validation.
|
||||
const { EXTERNAL_PERMISSION_MODES } = await import('../types/permissions.js')
|
||||
const modes = EXTERNAL_PERMISSION_MODES.join(', ')
|
||||
const showServer = await isMultiSessionSpawnEnabled()
|
||||
@@ -2194,14 +2194,10 @@ export async function bridgeMain(args: string[]): Promise<void> {
|
||||
|
||||
// Session ingress URL for WebSocket connections. In production this is the
|
||||
// same as baseUrl (Envoy routes /v1/session_ingress/* to session-ingress).
|
||||
// Locally, session-ingress runs on a different port (9413) than the
|
||||
// contain-provide-api (8211), so CLAUDE_BRIDGE_SESSION_INGRESS_URL must be
|
||||
// set explicitly. Ant-only, matching CLAUDE_BRIDGE_BASE_URL.
|
||||
// Locally, session-ingress may run on a different port, so
|
||||
// CLAUDE_BRIDGE_SESSION_INGRESS_URL can override the default.
|
||||
const sessionIngressUrl =
|
||||
process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
: baseUrl
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl
|
||||
|
||||
const { getBranch, getRemoteUrl, findGitRoot } = await import(
|
||||
'../utils/git.js'
|
||||
@@ -2252,7 +2248,7 @@ export async function bridgeMain(args: string[]): Promise<void> {
|
||||
})
|
||||
// biome-ignore lint/suspicious/noConsole: intentional dialog output
|
||||
console.log(
|
||||
`\nClaude Remote Control is launching in spawn mode which lets you create new sessions in this project from Claude Code on Web or your Mobile app. Learn more here: https://code.claude.com/docs/en/remote-control\n\n` +
|
||||
`\nClaude Remote Control is launching in spawn mode which lets you create new sessions in this project from OpenClaude on the web or your mobile app. Learn more here: https://code.claude.com/docs/en/remote-control\n\n` +
|
||||
`Spawn mode for this project:\n` +
|
||||
` [1] same-dir \u2014 sessions share the current directory (default)\n` +
|
||||
` [2] worktree \u2014 each session gets an isolated git worktree\n\n` +
|
||||
@@ -2356,7 +2352,7 @@ export async function bridgeMain(args: string[]): Promise<void> {
|
||||
// environment_id and reuse that for registration (idempotent on the
|
||||
// backend). Left undefined otherwise — the backend rejects
|
||||
// client-generated UUIDs and will allocate a fresh environment.
|
||||
// feature('KAIROS') gate: --session-id is ant-only; parseArgs already
|
||||
// feature('KAIROS') gate: --session-id is internal-only; parseArgs already
|
||||
// rejects the flag when the gate is off, so resumeSessionId is always
|
||||
// undefined here in external builds — this guard is for tree-shaking.
|
||||
let reuseEnvironmentId: string | undefined
|
||||
@@ -2851,10 +2847,7 @@ export async function runBridgeHeadless(
|
||||
)
|
||||
}
|
||||
const sessionIngressUrl =
|
||||
process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
: baseUrl
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl
|
||||
|
||||
const { getBranch, getRemoteUrl, findGitRoot } = await import(
|
||||
'../utils/git.js'
|
||||
|
||||
@@ -223,7 +223,7 @@ export function createBridgeLogger(options: {
|
||||
|
||||
if (process.env.USER_TYPE === 'ant' && debugLogPath) {
|
||||
writeStatus(
|
||||
`${chalk.yellow('[ANT-ONLY] Logs:')} ${chalk.dim(debugLogPath)}\n`,
|
||||
`${chalk.yellow('[internal] Logs:')} ${chalk.dim(debugLogPath)}\n`,
|
||||
)
|
||||
}
|
||||
writeStatus(`${indicatorColor(indicator)} ${stateText}${suffix}\n`)
|
||||
|
||||
@@ -217,25 +217,39 @@ export async function getBridgeSession(
|
||||
}
|
||||
|
||||
const url = `${opts?.baseUrl ?? getOauthConfig().BASE_API_URL}/v1/sessions/${sessionId}`
|
||||
const timeoutMs = 10_000
|
||||
logForDebugging(`[bridge] Fetching session ${sessionId}`)
|
||||
|
||||
let response
|
||||
try {
|
||||
response = await axios.get<{ environment_id?: string; title?: string }>(
|
||||
url,
|
||||
{ headers, timeout: 10_000, validateStatus: s => s < 500 },
|
||||
{ headers, timeout: timeoutMs, validateStatus: s => s < 500 },
|
||||
)
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch request failed: ${errorMessage(err)}`,
|
||||
)
|
||||
if (axios.isAxiosError(err)) {
|
||||
const status = err.response?.status ?? 'no-response'
|
||||
const code = err.code ?? 'unknown-code'
|
||||
const requestUrl = err.config?.url ?? url
|
||||
const method = err.config?.method?.toUpperCase() ?? 'GET'
|
||||
const message = err.message ?? errorMessage(err)
|
||||
const timeout = err.config?.timeout ?? timeoutMs
|
||||
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch request failed: status=${status} code=${code} method=${method} url=${requestUrl} timeout=${timeout} message=${message}`,
|
||||
)
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch request failed: url=${url} timeout=${timeoutMs} message=${errorMessage(err)}`,
|
||||
)
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
if (response.status !== 200) {
|
||||
const detail = extractErrorDetail(response.data)
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch failed with status ${response.status}${detail ? `: ${detail}` : ''}`,
|
||||
`[bridge] Session fetch failed with status ${response.status} url=${url}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -147,7 +147,7 @@ export async function getEnvLessBridgeConfig(): Promise<EnvLessBridgeConfig> {
|
||||
export async function checkEnvLessBridgeMinVersion(): Promise<string | null> {
|
||||
const cfg = await getEnvLessBridgeConfig()
|
||||
if (cfg.min_version && lt(MACRO.VERSION, cfg.min_version)) {
|
||||
return `Your version of Claude Code (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${cfg.min_version} or higher is required. Run \`claude update\` to update.`
|
||||
return `Your version of OpenClaude (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${cfg.min_version} or higher is required. Run \`openclaude update\` to update.`
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ export async function initReplBridge(
|
||||
return null
|
||||
}
|
||||
|
||||
// When CLAUDE_BRIDGE_OAUTH_TOKEN is set (ant-only local dev), the bridge
|
||||
// When CLAUDE_BRIDGE_OAUTH_TOKEN is set (internal-only local dev), the bridge
|
||||
// uses that token directly via getBridgeAccessToken() — keychain state is
|
||||
// irrelevant. Skip 2b/2c to preserve that decoupling: an expired keychain
|
||||
// token shouldn't block a bridge connection that doesn't use it.
|
||||
@@ -415,7 +415,7 @@ export async function initReplBridge(
|
||||
`[bridge:repl] Skipping: ${versionError}`,
|
||||
true,
|
||||
)
|
||||
onStateChange?.('failed', 'run `claude update` to upgrade')
|
||||
onStateChange?.('failed', 'run `openclaude update` to upgrade')
|
||||
return null
|
||||
}
|
||||
logForDebugging(
|
||||
@@ -456,7 +456,7 @@ export async function initReplBridge(
|
||||
const versionError = checkBridgeMinVersion()
|
||||
if (versionError) {
|
||||
logBridgeSkip('version_too_old', `[bridge:repl] Skipping: ${versionError}`)
|
||||
onStateChange?.('failed', 'run `claude update` to upgrade')
|
||||
onStateChange?.('failed', 'run `openclaude update` to upgrade')
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -465,10 +465,7 @@ export async function initReplBridge(
|
||||
const branch = await getBranch()
|
||||
const gitRepoUrl = await getRemoteUrl()
|
||||
const sessionIngressUrl =
|
||||
process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
: baseUrl
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL || baseUrl
|
||||
|
||||
// Assistant-mode sessions advertise a distinct worker_type so the web UI
|
||||
// can filter them into a dedicated picker. KAIROS guard keeps the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// biome-ignore-all assist/source/organizeImports: ANT-ONLY import markers must not be reordered
|
||||
// biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered
|
||||
/**
|
||||
* Env-less Remote Control bridge core.
|
||||
*
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// biome-ignore-all assist/source/organizeImports: ANT-ONLY import markers must not be reordered
|
||||
// biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered
|
||||
import { randomUUID } from 'crypto'
|
||||
import {
|
||||
createBridgeApiClient,
|
||||
|
||||
85
src/bridge/sessionRunner.test.ts
Normal file
85
src/bridge/sessionRunner.test.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
import { buildChildEnv } from './sessionRunner.ts'
|
||||
|
||||
// Finding #42-1: sessionRunner spreads the full parent process.env into the
|
||||
// child process environment, leaking API keys, DB credentials, proxy secrets.
|
||||
// Only CLAUDE_CODE_OAUTH_TOKEN was stripped. Fix: explicit allowlist.
|
||||
|
||||
const baseOpts = {
|
||||
accessToken: 'test-access-token',
|
||||
useCcrV2: false as const,
|
||||
}
|
||||
|
||||
test('buildChildEnv does not leak ANTHROPIC_API_KEY to child', () => {
|
||||
const parentEnv = {
|
||||
PATH: '/usr/bin',
|
||||
HOME: '/home/user',
|
||||
ANTHROPIC_API_KEY: 'sk-ant-secret-key',
|
||||
CLAUDE_CODE_SESSION_ACCESS_TOKEN: 'will-be-overwritten',
|
||||
}
|
||||
const env = buildChildEnv(parentEnv, baseOpts)
|
||||
expect(env.ANTHROPIC_API_KEY).toBeUndefined()
|
||||
})
|
||||
|
||||
test('buildChildEnv does not leak OPENAI_API_KEY to child', () => {
|
||||
const parentEnv = {
|
||||
PATH: '/usr/bin',
|
||||
HOME: '/home/user',
|
||||
OPENAI_API_KEY: 'sk-openai-secret',
|
||||
}
|
||||
const env = buildChildEnv(parentEnv, baseOpts)
|
||||
expect(env.OPENAI_API_KEY).toBeUndefined()
|
||||
})
|
||||
|
||||
test('buildChildEnv does not leak arbitrary secrets to child', () => {
|
||||
const parentEnv = {
|
||||
PATH: '/usr/bin',
|
||||
HOME: '/home/user',
|
||||
DB_PASSWORD: 'super-secret',
|
||||
AWS_SECRET_ACCESS_KEY: 'aws-secret',
|
||||
GITHUB_TOKEN: 'ghp_token',
|
||||
}
|
||||
const env = buildChildEnv(parentEnv, baseOpts)
|
||||
expect(env.DB_PASSWORD).toBeUndefined()
|
||||
expect(env.AWS_SECRET_ACCESS_KEY).toBeUndefined()
|
||||
expect(env.GITHUB_TOKEN).toBeUndefined()
|
||||
})
|
||||
|
||||
test('buildChildEnv includes PATH and HOME from parent', () => {
|
||||
const parentEnv = {
|
||||
PATH: '/usr/bin:/usr/local/bin',
|
||||
HOME: '/home/user',
|
||||
ANTHROPIC_API_KEY: 'sk-secret',
|
||||
}
|
||||
const env = buildChildEnv(parentEnv, baseOpts)
|
||||
expect(env.PATH).toBe('/usr/bin:/usr/local/bin')
|
||||
expect(env.HOME).toBe('/home/user')
|
||||
})
|
||||
|
||||
test('buildChildEnv sets CLAUDE_CODE_SESSION_ACCESS_TOKEN from opts', () => {
|
||||
const env = buildChildEnv({ PATH: '/usr/bin' }, { ...baseOpts, accessToken: 'my-token' })
|
||||
expect(env.CLAUDE_CODE_SESSION_ACCESS_TOKEN).toBe('my-token')
|
||||
})
|
||||
|
||||
test('buildChildEnv sets CLAUDE_CODE_ENVIRONMENT_KIND to bridge', () => {
|
||||
const env = buildChildEnv({ PATH: '/usr/bin' }, baseOpts)
|
||||
expect(env.CLAUDE_CODE_ENVIRONMENT_KIND).toBe('bridge')
|
||||
})
|
||||
|
||||
test('buildChildEnv does not pass CLAUDE_CODE_OAUTH_TOKEN to child', () => {
|
||||
const parentEnv = {
|
||||
PATH: '/usr/bin',
|
||||
CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-to-strip',
|
||||
}
|
||||
const env = buildChildEnv(parentEnv, baseOpts)
|
||||
expect(env.CLAUDE_CODE_OAUTH_TOKEN).toBeUndefined()
|
||||
})
|
||||
|
||||
test('buildChildEnv sets CCR v2 vars when useCcrV2 is true', () => {
|
||||
const env = buildChildEnv(
|
||||
{ PATH: '/usr/bin' },
|
||||
{ accessToken: 'tok', useCcrV2: true, workerEpoch: 42 },
|
||||
)
|
||||
expect(env.CLAUDE_CODE_USE_CCR_V2).toBe('1')
|
||||
expect(env.CLAUDE_CODE_WORKER_EPOCH).toBe('42')
|
||||
})
|
||||
@@ -16,6 +16,69 @@ import type {
|
||||
const MAX_ACTIVITIES = 10
|
||||
const MAX_STDERR_LINES = 10
|
||||
|
||||
/**
|
||||
* Safe OS and runtime variables that the child process needs to function.
|
||||
* Everything else (API keys, DB passwords, proxy secrets, etc.) must not
|
||||
* be inherited — the child authenticates via CLAUDE_CODE_SESSION_ACCESS_TOKEN.
|
||||
*/
|
||||
const CHILD_ENV_ALLOWLIST = new Set([
|
||||
// System / shell
|
||||
'PATH', 'HOME', 'USERPROFILE', 'HOMEPATH', 'HOMEDRIVE',
|
||||
'USERNAME', 'USER', 'LOGNAME',
|
||||
'TEMP', 'TMP', 'TMPDIR',
|
||||
'SYSTEMROOT', 'SYSTEMDRIVE', 'COMSPEC', 'WINDIR',
|
||||
'LANG', 'LC_ALL', 'LC_CTYPE',
|
||||
// Node.js runtime
|
||||
'NODE_OPTIONS', 'NODE_PATH', 'NODE_ENV',
|
||||
// OpenClaude session / bridge (non-secret)
|
||||
'CLAUDE_CODE_ENVIRONMENT_KIND',
|
||||
'CLAUDE_CODE_FORCE_SANDBOX',
|
||||
'CLAUDE_CODE_BUBBLEWRAP',
|
||||
'CLAUDE_CODE_ENTRYPOINT',
|
||||
'CLAUDE_CODE_COORDINATOR_MODE',
|
||||
'CLAUDE_CODE_PERMISSIONS_VERSION',
|
||||
'CLAUDE_CODE_PERMISSIONS_SETTING',
|
||||
// Display / terminal
|
||||
'TERM', 'COLORTERM', 'FORCE_COLOR', 'NO_COLOR',
|
||||
])
|
||||
|
||||
type BuildChildEnvOpts = {
|
||||
accessToken: string
|
||||
useCcrV2: boolean
|
||||
workerEpoch?: number
|
||||
sandbox?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the environment for the child CC process from an explicit allowlist.
|
||||
* This prevents the parent's API keys and credentials from leaking to the child.
|
||||
*/
|
||||
export function buildChildEnv(
|
||||
parentEnv: NodeJS.ProcessEnv,
|
||||
opts: BuildChildEnvOpts,
|
||||
): NodeJS.ProcessEnv {
|
||||
// Start from allowlisted parent vars only
|
||||
const env: NodeJS.ProcessEnv = {}
|
||||
for (const key of Object.keys(parentEnv)) {
|
||||
if (CHILD_ENV_ALLOWLIST.has(key)) {
|
||||
env[key] = parentEnv[key]
|
||||
}
|
||||
}
|
||||
|
||||
// Bridge-required overrides
|
||||
env.CLAUDE_CODE_OAUTH_TOKEN = undefined // explicitly strip
|
||||
env.CLAUDE_CODE_ENVIRONMENT_KIND = 'bridge'
|
||||
if (opts.sandbox) env.CLAUDE_CODE_FORCE_SANDBOX = '1'
|
||||
env.CLAUDE_CODE_SESSION_ACCESS_TOKEN = opts.accessToken
|
||||
env.CLAUDE_CODE_POST_FOR_SESSION_INGRESS_V2 = '1'
|
||||
if (opts.useCcrV2) {
|
||||
env.CLAUDE_CODE_USE_CCR_V2 = '1'
|
||||
env.CLAUDE_CODE_WORKER_EPOCH = String(opts.workerEpoch)
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize a session ID for use in file names.
|
||||
* Strips any characters that could cause path traversal (e.g. `../`, `/`)
|
||||
@@ -303,24 +366,12 @@ export function createSessionSpawner(deps: SessionSpawnerDeps): SessionSpawner {
|
||||
: []),
|
||||
]
|
||||
|
||||
const env: NodeJS.ProcessEnv = {
|
||||
...deps.env,
|
||||
// Strip the bridge's OAuth token so the child CC process uses
|
||||
// the session access token for inference instead.
|
||||
CLAUDE_CODE_OAUTH_TOKEN: undefined,
|
||||
CLAUDE_CODE_ENVIRONMENT_KIND: 'bridge',
|
||||
...(deps.sandbox && { CLAUDE_CODE_FORCE_SANDBOX: '1' }),
|
||||
CLAUDE_CODE_SESSION_ACCESS_TOKEN: opts.accessToken,
|
||||
// v1: HybridTransport (WS reads + POST writes) to Session-Ingress.
|
||||
// Harmless in v2 mode — transportUtils checks CLAUDE_CODE_USE_CCR_V2 first.
|
||||
CLAUDE_CODE_POST_FOR_SESSION_INGRESS_V2: '1',
|
||||
// v2: SSETransport + CCRClient to CCR's /v1/code/sessions/* endpoints.
|
||||
// Same env vars environment-manager sets in the container path.
|
||||
...(opts.useCcrV2 && {
|
||||
CLAUDE_CODE_USE_CCR_V2: '1',
|
||||
CLAUDE_CODE_WORKER_EPOCH: String(opts.workerEpoch),
|
||||
}),
|
||||
}
|
||||
const env = buildChildEnv(deps.env, {
|
||||
accessToken: opts.accessToken,
|
||||
useCcrV2: opts.useCcrV2,
|
||||
workerEpoch: opts.workerEpoch,
|
||||
sandbox: deps.sandbox,
|
||||
})
|
||||
|
||||
deps.onDebug(
|
||||
`[bridge:session] Spawning sessionId=${opts.sessionId} sdkUrl=${opts.sdkUrl} accessToken=${opts.accessToken ? 'present' : 'MISSING'}`,
|
||||
|
||||
@@ -17,7 +17,7 @@ import { jsonStringify } from '../utils/slowOperations.js'
|
||||
*
|
||||
* Bridge sessions have SecurityTier=ELEVATED on the server (CCR v2).
|
||||
* The server gates ConnectBridgeWorker on its own flag
|
||||
* (sessions_elevated_auth_enforcement in Anthropic Main); this CLI-side
|
||||
* (sessions_elevated_auth_enforcement in the server-side main deployment); this CLI-side
|
||||
* flag controls whether the CLI sends X-Trusted-Device-Token at all.
|
||||
* Two flags so rollout can be staged: flip CLI-side first (headers
|
||||
* start flowing, server still no-ops), then flip server-side.
|
||||
@@ -147,7 +147,7 @@ export async function enrollTrustedDevice(): Promise<void> {
|
||||
device_id?: string
|
||||
}>(
|
||||
`${baseUrl}/api/auth/trusted_devices`,
|
||||
{ display_name: `Claude Code on ${hostname()} · ${process.platform}` },
|
||||
{ display_name: `OpenClaude on ${hostname()} · ${process.platform}` },
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
|
||||
36
src/bridge/workSecret.test.ts
Normal file
36
src/bridge/workSecret.test.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
import { buildSdkUrl } from './workSecret.ts'
|
||||
|
||||
// Finding #42-5: buildSdkUrl uses string.includes() on the full URL,
|
||||
// so a remote URL containing "localhost" in its path gets ws:// (unencrypted).
|
||||
|
||||
test('buildSdkUrl uses wss for remote URL that contains localhost in path', () => {
|
||||
const url = buildSdkUrl('https://remote.example.com/proxy/localhost/api', 'sess-1')
|
||||
expect(url).toContain('wss://')
|
||||
expect(url).not.toContain('ws://')
|
||||
})
|
||||
|
||||
test('buildSdkUrl uses ws for actual localhost hostname', () => {
|
||||
const url = buildSdkUrl('http://localhost:8080', 'sess-1')
|
||||
expect(url).toContain('ws://')
|
||||
})
|
||||
|
||||
test('buildSdkUrl uses ws for 127.0.0.1 hostname', () => {
|
||||
const url = buildSdkUrl('http://127.0.0.1:3000', 'sess-1')
|
||||
expect(url).toContain('ws://')
|
||||
})
|
||||
|
||||
test('buildSdkUrl uses wss for regular remote hostname', () => {
|
||||
const url = buildSdkUrl('https://api.example.com', 'sess-1')
|
||||
expect(url).toContain('wss://')
|
||||
})
|
||||
|
||||
test('buildSdkUrl uses v2 path for localhost', () => {
|
||||
const url = buildSdkUrl('http://localhost:8080', 'sess-abc')
|
||||
expect(url).toContain('/v2/session_ingress/ws/sess-abc')
|
||||
})
|
||||
|
||||
test('buildSdkUrl uses v1 path for remote', () => {
|
||||
const url = buildSdkUrl('https://api.example.com', 'sess-abc')
|
||||
expect(url).toContain('/v1/session_ingress/ws/sess-abc')
|
||||
})
|
||||
@@ -39,8 +39,8 @@ export function decodeWorkSecret(secret: string): WorkSecret {
|
||||
* and /v1/ for production (Envoy rewrites /v1/ → /v2/).
|
||||
*/
|
||||
export function buildSdkUrl(apiBaseUrl: string, sessionId: string): string {
|
||||
const isLocalhost =
|
||||
apiBaseUrl.includes('localhost') || apiBaseUrl.includes('127.0.0.1')
|
||||
const hostname = new URL(apiBaseUrl).hostname
|
||||
const isLocalhost = hostname === 'localhost' || hostname === '127.0.0.1'
|
||||
const protocol = isLocalhost ? 'ws' : 'wss'
|
||||
const version = isLocalhost ? 'v2' : 'v1'
|
||||
const host = apiBaseUrl.replace(/^https?:\/\//, '').replace(/\/+$/, '')
|
||||
|
||||
File diff suppressed because one or more lines are too long
3
src/buddy/feature.ts
Normal file
3
src/buddy/feature.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export function isBuddyEnabled(): boolean {
|
||||
return true
|
||||
}
|
||||
65
src/buddy/observer.ts
Normal file
65
src/buddy/observer.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import type { Message } from '../types/message.js'
|
||||
import { getGlobalConfig } from '../utils/config.js'
|
||||
import { getUserMessageText } from '../utils/messages.js'
|
||||
import { getCompanion } from './companion.js'
|
||||
|
||||
const DIRECT_REPLIES = [
|
||||
'I am observing.',
|
||||
'I am helping from the corner.',
|
||||
'I saw that.',
|
||||
'Still here.',
|
||||
'Watching closely.',
|
||||
] as const
|
||||
|
||||
const PET_REPLIES = [
|
||||
'happy chirp',
|
||||
'tiny victory dance',
|
||||
'quietly approves',
|
||||
'wiggles with joy',
|
||||
'looks pleased',
|
||||
] as const
|
||||
|
||||
function hashString(s: string): number {
|
||||
let h = 2166136261
|
||||
for (let i = 0; i < s.length; i++) {
|
||||
h ^= s.charCodeAt(i)
|
||||
h = Math.imul(h, 16777619)
|
||||
}
|
||||
return h >>> 0
|
||||
}
|
||||
|
||||
function pickDeterministic<T>(items: readonly T[], seed: string): T {
|
||||
return items[hashString(seed) % items.length]!
|
||||
}
|
||||
|
||||
export async function fireCompanionObserver(
|
||||
messages: Message[],
|
||||
onReaction: (reaction: string | undefined) => void,
|
||||
): Promise<void> {
|
||||
const companion = getCompanion()
|
||||
if (!companion || getGlobalConfig().companionMuted) return
|
||||
|
||||
const lastUser = [...messages].reverse().find(msg => msg.type === 'user')
|
||||
if (!lastUser) return
|
||||
|
||||
const text = getUserMessageText(lastUser)?.trim()
|
||||
if (!text) return
|
||||
|
||||
const lower = text.toLowerCase()
|
||||
const companionName = companion.name.toLowerCase()
|
||||
|
||||
if (lower.includes('/buddy')) {
|
||||
onReaction(pickDeterministic(PET_REPLIES, text + companion.name))
|
||||
return
|
||||
}
|
||||
|
||||
if (
|
||||
lower.includes(companionName) ||
|
||||
lower.includes('buddy') ||
|
||||
lower.includes('companion')
|
||||
) {
|
||||
onReaction(
|
||||
`${companion.name}: ${pickDeterministic(DIRECT_REPLIES, text + companion.personality)}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type { Message } from '../types/message.js'
|
||||
import type { Attachment } from '../utils/attachments.js'
|
||||
import { getGlobalConfig } from '../utils/config.js'
|
||||
import { getCompanion } from './companion.js'
|
||||
import { isBuddyEnabled } from './feature.js'
|
||||
|
||||
export function companionIntroText(name: string, species: string): string {
|
||||
return `# Companion
|
||||
@@ -15,7 +15,7 @@ When the user addresses ${name} directly (by name), its bubble will answer. Your
|
||||
export function getCompanionIntroAttachment(
|
||||
messages: Message[] | undefined,
|
||||
): Attachment[] {
|
||||
if (!feature('BUDDY')) return []
|
||||
if (!isBuddyEnabled()) return []
|
||||
const companion = getCompanion()
|
||||
if (!companion || getGlobalConfig().companionMuted) return []
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -287,7 +287,7 @@ export async function authStatus(opts: {
|
||||
}
|
||||
if (!loggedIn) {
|
||||
process.stdout.write(
|
||||
'Not logged in. Run claude auth login to authenticate.\n',
|
||||
'Not logged in. Run openclaude auth login to authenticate.\n',
|
||||
)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -83,7 +83,7 @@ export async function autoModeCritiqueHandler(options: {
|
||||
process.stdout.write(
|
||||
'No custom auto mode rules found.\n\n' +
|
||||
'Add rules to your settings file under autoMode.{allow, soft_deny, environment}.\n' +
|
||||
'Run `claude auto-mode defaults` to see the default rules for reference.\n',
|
||||
'Run `openclaude auto-mode defaults` to see the default rules for reference.\n',
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -116,7 +116,6 @@ export async function autoModeCritiqueHandler(options: {
|
||||
querySource: 'auto_mode_critique',
|
||||
model,
|
||||
system: CRITIQUE_SYSTEM_PROMPT,
|
||||
skipSystemPromptPrefix: true,
|
||||
max_tokens: 4096,
|
||||
messages: [
|
||||
{
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -352,7 +352,7 @@ export async function pluginListHandler(options: {
|
||||
// through to the session section so the failure is visible.
|
||||
if (inlineLoadErrors.length === 0) {
|
||||
cliOk(
|
||||
'No plugins installed. Use `claude plugin install` to install a plugin.',
|
||||
'No plugins installed. Use `openclaude plugin install` to install a plugin.',
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,4 +1,4 @@
|
||||
// biome-ignore-all assist/source/organizeImports: ANT-ONLY import markers must not be reordered
|
||||
// biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered
|
||||
import { feature } from 'bun:bundle'
|
||||
import { readFile, stat } from 'fs/promises'
|
||||
import { dirname } from 'path'
|
||||
@@ -362,15 +362,9 @@ const proactiveModule =
|
||||
feature('PROACTIVE') || feature('KAIROS')
|
||||
? (require('../proactive/index.js') as typeof import('../proactive/index.js'))
|
||||
: null
|
||||
const cronSchedulerModule = feature('AGENT_TRIGGERS')
|
||||
? (require('../utils/cronScheduler.js') as typeof import('../utils/cronScheduler.js'))
|
||||
: null
|
||||
const cronJitterConfigModule = feature('AGENT_TRIGGERS')
|
||||
? (require('../utils/cronJitterConfig.js') as typeof import('../utils/cronJitterConfig.js'))
|
||||
: null
|
||||
const cronGate = feature('AGENT_TRIGGERS')
|
||||
? (require('../tools/ScheduleCronTool/prompt.js') as typeof import('../tools/ScheduleCronTool/prompt.js'))
|
||||
: null
|
||||
const cronSchedulerModule = require('../utils/cronScheduler.js') as typeof import('../utils/cronScheduler.js')
|
||||
const cronJitterConfigModule = require('../utils/cronJitterConfig.js') as typeof import('../utils/cronJitterConfig.js')
|
||||
const cronGate = require('../tools/ScheduleCronTool/prompt.js') as typeof import('../tools/ScheduleCronTool/prompt.js')
|
||||
const extractMemoriesModule = feature('EXTRACT_MEMORIES')
|
||||
? (require('../services/extractMemories/extractMemories.js') as typeof import('../services/extractMemories/extractMemories.js'))
|
||||
: null
|
||||
@@ -2701,11 +2695,7 @@ function runHeadlessStreaming(
|
||||
// the end of run() picks up the queued command.
|
||||
let cronScheduler: import('../utils/cronScheduler.js').CronScheduler | null =
|
||||
null
|
||||
if (
|
||||
feature('AGENT_TRIGGERS') &&
|
||||
cronSchedulerModule &&
|
||||
cronGate?.isKairosCronEnabled()
|
||||
) {
|
||||
if (cronGate.isKairosCronEnabled()) {
|
||||
cronScheduler = cronSchedulerModule.createCronScheduler({
|
||||
onFire: prompt => {
|
||||
if (inputClosed) return
|
||||
@@ -2727,8 +2717,8 @@ function runHeadlessStreaming(
|
||||
void run()
|
||||
},
|
||||
isLoading: () => running || inputClosed,
|
||||
getJitterConfig: cronJitterConfigModule?.getCronJitterConfig,
|
||||
isKilled: () => !cronGate?.isKairosCronEnabled(),
|
||||
getJitterConfig: cronJitterConfigModule.getCronJitterConfig,
|
||||
isKilled: () => !cronGate.isKairosCronEnabled(),
|
||||
})
|
||||
cronScheduler.start()
|
||||
}
|
||||
@@ -2829,7 +2819,7 @@ function runHeadlessStreaming(
|
||||
|
||||
if (message.type === 'control_request') {
|
||||
if (message.request.subtype === 'interrupt') {
|
||||
// Track escapes for attribution (ant-only feature)
|
||||
// Track escapes for attribution (internal-only feature)
|
||||
if (feature('COMMIT_ATTRIBUTION')) {
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
@@ -3765,7 +3755,7 @@ function runHeadlessStreaming(
|
||||
...getSettingsWithSources(),
|
||||
applied: {
|
||||
model,
|
||||
// Numeric effort (ant-only) → null; SDK schema is string-level only.
|
||||
// Numeric effort (internal-only) → null; SDK schema is string-level only.
|
||||
effort: typeof effort === 'string' ? effort : null,
|
||||
},
|
||||
})
|
||||
@@ -4592,7 +4582,7 @@ function handleSetPermissionMode(
|
||||
subtype: 'error',
|
||||
request_id: requestId,
|
||||
error:
|
||||
'Cannot set permission mode to bypassPermissions because the session was not launched with --dangerously-skip-permissions',
|
||||
'Cannot set permission mode to bypassPermissions. Enable it with --allow-dangerously-skip-permissions or set permissions.allowBypassPermissionsMode in settings.json',
|
||||
},
|
||||
})
|
||||
return toolPermissionContext
|
||||
@@ -5025,7 +5015,7 @@ async function loadInitialMessages(
|
||||
}
|
||||
|
||||
// Handle resume in print mode (accepts session ID or URL)
|
||||
// URLs are [ANT-ONLY]
|
||||
// URLs are [internal-only]
|
||||
if (options.resume) {
|
||||
try {
|
||||
logEvent('tengu_resume_print', {})
|
||||
@@ -5036,7 +5026,7 @@ async function loadInitialMessages(
|
||||
)
|
||||
if (!parsedSessionId) {
|
||||
let errorMessage =
|
||||
'Error: --resume requires a valid session ID when used with --print. Usage: claude -p --resume <session-id>'
|
||||
'Error: --resume requires a valid session ID when used with --print. Usage: openclaude -p --resume <session-id>'
|
||||
if (typeof options.resume === 'string') {
|
||||
errorMessage += `. Session IDs must be in UUID format (e.g., 550e8400-e29b-41d4-a716-446655440000). Provided value "${options.resume}" is not a valid UUID`
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import chalk from 'chalk'
|
||||
import { getAPIProvider } from 'src/utils/model/providers.js'
|
||||
import { logEvent } from 'src/services/analytics/index.js'
|
||||
import {
|
||||
getLatestVersion,
|
||||
@@ -28,8 +29,26 @@ import { gte } from 'src/utils/semver.js'
|
||||
import { getInitialSettings } from 'src/utils/settings/settings.js'
|
||||
|
||||
export async function update() {
|
||||
// Block updates for third-party providers. The update mechanism downloads
|
||||
// from the first-party distribution bucket, which would silently replace the
|
||||
// OpenClaude build (with the OpenAI shim) with the upstream Claude Code
|
||||
// binary (without it).
|
||||
if (getAPIProvider() !== 'firstParty') {
|
||||
writeToStdout(
|
||||
chalk.yellow(
|
||||
`Auto-update is not available for third-party provider builds.\n`,
|
||||
) +
|
||||
`Current version: ${MACRO.DISPLAY_VERSION}\n\n` +
|
||||
`To update, reinstall from npm:\n` +
|
||||
chalk.bold(` npm install -g ${MACRO.PACKAGE_URL}@latest`) + '\n\n' +
|
||||
`Or, if you built from source, pull and rebuild:\n` +
|
||||
chalk.bold(' git pull && bun install && bun run build') + '\n',
|
||||
)
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
logEvent('tengu_update_check', {})
|
||||
writeToStdout(`Current version: ${MACRO.VERSION}\n`)
|
||||
writeToStdout(`Current version: ${MACRO.DISPLAY_VERSION}\n`)
|
||||
|
||||
const channel = getInitialSettings()?.autoUpdatesChannel ?? 'latest'
|
||||
writeToStdout(`Checking for updates to ${channel} version...\n`)
|
||||
@@ -109,9 +128,14 @@ export async function update() {
|
||||
if (diagnostic.installationType === 'development') {
|
||||
writeToStdout('\n')
|
||||
writeToStdout(
|
||||
chalk.yellow('Warning: Cannot update development build') + '\n',
|
||||
chalk.yellow('You are running a development build — auto-update is unavailable.') + '\n',
|
||||
)
|
||||
await gracefulShutdown(1)
|
||||
writeToStdout('To update, pull the latest source and rebuild:\n')
|
||||
writeToStdout(chalk.bold(' git pull && bun install && bun run build') + '\n')
|
||||
writeToStdout('\n')
|
||||
writeToStdout('Or reinstall from npm:\n')
|
||||
writeToStdout(chalk.bold(` npm install -g ${MACRO.PACKAGE_URL}@latest`) + '\n')
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
// Check if running from a package manager
|
||||
@@ -122,8 +146,8 @@ export async function update() {
|
||||
if (packageManager === 'homebrew') {
|
||||
writeToStdout('Claude is managed by Homebrew.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(chalk.bold(' brew upgrade claude-code') + '\n')
|
||||
@@ -133,8 +157,8 @@ export async function update() {
|
||||
} else if (packageManager === 'winget') {
|
||||
writeToStdout('Claude is managed by winget.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(
|
||||
@@ -146,8 +170,8 @@ export async function update() {
|
||||
} else if (packageManager === 'apk') {
|
||||
writeToStdout('Claude is managed by apk.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(chalk.bold(' apk upgrade claude-code') + '\n')
|
||||
@@ -236,14 +260,14 @@ export async function update() {
|
||||
await gracefulShutdown(1)
|
||||
}
|
||||
|
||||
if (result.latestVersion === MACRO.VERSION) {
|
||||
if (result.latestVersion === MACRO.DISPLAY_VERSION) {
|
||||
writeToStdout(
|
||||
chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
|
||||
chalk.green(`OpenClaude is up to date (${MACRO.DISPLAY_VERSION})`) + '\n',
|
||||
)
|
||||
} else {
|
||||
writeToStdout(
|
||||
chalk.green(
|
||||
`Successfully updated from ${MACRO.VERSION} to version ${result.latestVersion}`,
|
||||
`Successfully updated from ${MACRO.DISPLAY_VERSION} to version ${result.latestVersion}`,
|
||||
) + '\n',
|
||||
)
|
||||
await regenerateCompletionCache()
|
||||
@@ -252,7 +276,7 @@ export async function update() {
|
||||
} catch (error) {
|
||||
process.stderr.write('Error: Failed to install native update\n')
|
||||
process.stderr.write(String(error) + '\n')
|
||||
process.stderr.write('Try running "claude doctor" for diagnostics\n')
|
||||
process.stderr.write('Try running "openclaude doctor" for diagnostics\n')
|
||||
await gracefulShutdown(1)
|
||||
}
|
||||
}
|
||||
@@ -306,15 +330,15 @@ export async function update() {
|
||||
}
|
||||
|
||||
// Check if versions match exactly, including any build metadata (like SHA)
|
||||
if (latestVersion === MACRO.VERSION) {
|
||||
if (latestVersion === MACRO.DISPLAY_VERSION) {
|
||||
writeToStdout(
|
||||
chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
|
||||
chalk.green(`OpenClaude is up to date (${MACRO.DISPLAY_VERSION})`) + '\n',
|
||||
)
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
writeToStdout(
|
||||
`New version available: ${latestVersion} (current: ${MACRO.VERSION})\n`,
|
||||
`New version available: ${latestVersion} (current: ${MACRO.DISPLAY_VERSION})\n`,
|
||||
)
|
||||
writeToStdout('Installing update...\n')
|
||||
|
||||
@@ -374,7 +398,7 @@ export async function update() {
|
||||
case 'success':
|
||||
writeToStdout(
|
||||
chalk.green(
|
||||
`Successfully updated from ${MACRO.VERSION} to version ${latestVersion}`,
|
||||
`Successfully updated from ${MACRO.DISPLAY_VERSION} to version ${latestVersion}`,
|
||||
) + '\n',
|
||||
)
|
||||
await regenerateCompletionCache()
|
||||
@@ -386,12 +410,12 @@ export async function update() {
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
` cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write('Try running with sudo or fix npm permissions\n')
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: claude install\n',
|
||||
'Or consider using native installation with: openclaude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
@@ -401,11 +425,11 @@ export async function update() {
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
` cd ~/.openclaude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: claude install\n',
|
||||
'Or consider using native installation with: openclaude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
|
||||
30
src/commands.test.ts
Normal file
30
src/commands.test.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import { formatDescriptionWithSource } from './commands.js'
|
||||
|
||||
describe('formatDescriptionWithSource', () => {
|
||||
test('returns empty text for prompt commands missing a description', () => {
|
||||
const command = {
|
||||
name: 'example',
|
||||
type: 'prompt',
|
||||
source: 'builtin',
|
||||
description: undefined,
|
||||
} as any
|
||||
|
||||
expect(formatDescriptionWithSource(command)).toBe('')
|
||||
})
|
||||
|
||||
test('formats plugin commands with missing description safely', () => {
|
||||
const command = {
|
||||
name: 'example',
|
||||
type: 'prompt',
|
||||
source: 'plugin',
|
||||
description: undefined,
|
||||
pluginInfo: {
|
||||
pluginManifest: {
|
||||
name: 'MyPlugin',
|
||||
},
|
||||
},
|
||||
} as any
|
||||
|
||||
expect(formatDescriptionWithSource(command)).toBe('(MyPlugin) ')
|
||||
})
|
||||
})
|
||||
@@ -1,4 +1,4 @@
|
||||
// biome-ignore-all assist/source/organizeImports: ANT-ONLY import markers must not be reordered
|
||||
// biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered
|
||||
import addDir from './commands/add-dir/index.js'
|
||||
import autofixPr from './commands/autofix-pr/index.js'
|
||||
import backfillSessions from './commands/backfill-sessions/index.js'
|
||||
@@ -17,8 +17,11 @@ import config from './commands/config/index.js'
|
||||
import { context, contextNonInteractive } from './commands/context/index.js'
|
||||
import cost from './commands/cost/index.js'
|
||||
import diff from './commands/diff/index.js'
|
||||
import dream from './commands/dream/index.js'
|
||||
import ctx_viz from './commands/ctx_viz/index.js'
|
||||
import doctor from './commands/doctor/index.js'
|
||||
import onboardGithub from './commands/onboard-github/index.js'
|
||||
import knowledge from './commands/knowledge/index.js'
|
||||
import memory from './commands/memory/index.js'
|
||||
import help from './commands/help/index.js'
|
||||
import ide from './commands/ide/index.js'
|
||||
@@ -30,6 +33,8 @@ import logout from './commands/logout/index.js'
|
||||
import installGitHubApp from './commands/install-github-app/index.js'
|
||||
import installSlackApp from './commands/install-slack-app/index.js'
|
||||
import breakCache from './commands/break-cache/index.js'
|
||||
import cacheProbe from './commands/cache-probe/index.js'
|
||||
import cacheStats from './commands/cacheStats/index.js'
|
||||
import mcp from './commands/mcp/index.js'
|
||||
import mobile from './commands/mobile/index.js'
|
||||
import onboarding from './commands/onboarding/index.js'
|
||||
@@ -57,6 +62,7 @@ import usage from './commands/usage/index.js'
|
||||
import theme from './commands/theme/index.js'
|
||||
import vim from './commands/vim/index.js'
|
||||
import { feature } from 'bun:bundle'
|
||||
import { isBuddyEnabled } from './buddy/feature.js'
|
||||
// Dead code elimination: conditional imports
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const proactive =
|
||||
@@ -115,7 +121,7 @@ const forkCmd = feature('FORK_SUBAGENT')
|
||||
require('./commands/fork/index.js') as typeof import('./commands/fork/index.js')
|
||||
).default
|
||||
: null
|
||||
const buddy = feature('BUDDY')
|
||||
const buddy = isBuddyEnabled()
|
||||
? (
|
||||
require('./commands/buddy/index.js') as typeof import('./commands/buddy/index.js')
|
||||
).default
|
||||
@@ -128,10 +134,12 @@ import plan from './commands/plan/index.js'
|
||||
import fast from './commands/fast/index.js'
|
||||
import passes from './commands/passes/index.js'
|
||||
import privacySettings from './commands/privacy-settings/index.js'
|
||||
import provider from './commands/provider/index.js'
|
||||
import hooks from './commands/hooks/index.js'
|
||||
import files from './commands/files/index.js'
|
||||
import branch from './commands/branch/index.js'
|
||||
import agents from './commands/agents/index.js'
|
||||
import autoFix from './commands/auto-fix.js'
|
||||
import plugin from './commands/plugin/index.js'
|
||||
import reloadPlugins from './commands/reload-plugins/index.js'
|
||||
import rewind from './commands/rewind/index.js'
|
||||
@@ -139,6 +147,7 @@ import heapDump from './commands/heapdump/index.js'
|
||||
import mockLimits from './commands/mock-limits/index.js'
|
||||
import bridgeKick from './commands/bridge-kick.js'
|
||||
import version from './commands/version.js'
|
||||
import wiki from './commands/wiki/index.js'
|
||||
import summary from './commands/summary/index.js'
|
||||
import {
|
||||
resetLimits,
|
||||
@@ -190,7 +199,7 @@ import stats from './commands/stats/index.js'
|
||||
const usageReport: Command = {
|
||||
type: 'prompt',
|
||||
name: 'insights',
|
||||
description: 'Generate a report analyzing your Claude Code sessions',
|
||||
description: 'Generate a report analyzing your OpenClaude sessions',
|
||||
contentLength: 0,
|
||||
progressMessage: 'analyzing your sessions',
|
||||
source: 'builtin',
|
||||
@@ -259,8 +268,11 @@ const COMMANDS = memoize((): Command[] => [
|
||||
addDir,
|
||||
advisor,
|
||||
agents,
|
||||
autoFix,
|
||||
branch,
|
||||
btw,
|
||||
cacheProbe,
|
||||
cacheStats,
|
||||
chrome,
|
||||
clear,
|
||||
color,
|
||||
@@ -272,6 +284,7 @@ const COMMANDS = memoize((): Command[] => [
|
||||
contextNonInteractive,
|
||||
cost,
|
||||
diff,
|
||||
dream,
|
||||
doctor,
|
||||
effort,
|
||||
exit,
|
||||
@@ -282,15 +295,18 @@ const COMMANDS = memoize((): Command[] => [
|
||||
ide,
|
||||
init,
|
||||
keybindings,
|
||||
knowledge,
|
||||
installGitHubApp,
|
||||
installSlackApp,
|
||||
mcp,
|
||||
memory,
|
||||
mobile,
|
||||
model,
|
||||
onboardGithub,
|
||||
outputStyle,
|
||||
remoteEnv,
|
||||
plugin,
|
||||
provider,
|
||||
pr_comments,
|
||||
releaseNotes,
|
||||
reloadPlugins,
|
||||
@@ -317,6 +333,7 @@ const COMMANDS = memoize((): Command[] => [
|
||||
usage,
|
||||
usageReport,
|
||||
vim,
|
||||
wiki,
|
||||
...(webCmd ? [webCmd] : []),
|
||||
...(forkCmd ? [forkCmd] : []),
|
||||
...(buddy ? [buddy] : []),
|
||||
@@ -727,23 +744,23 @@ export function getCommand(commandName: string, commands: Command[]): Command {
|
||||
*/
|
||||
export function formatDescriptionWithSource(cmd: Command): string {
|
||||
if (cmd.type !== 'prompt') {
|
||||
return cmd.description
|
||||
return cmd.description ?? ''
|
||||
}
|
||||
|
||||
if (cmd.kind === 'workflow') {
|
||||
return `${cmd.description} (workflow)`
|
||||
return `${cmd.description ?? ''} (workflow)`
|
||||
}
|
||||
|
||||
if (cmd.source === 'plugin') {
|
||||
const pluginName = cmd.pluginInfo?.pluginManifest.name
|
||||
if (pluginName) {
|
||||
return `(${pluginName}) ${cmd.description}`
|
||||
return `(${pluginName}) ${cmd.description ?? ''}`
|
||||
}
|
||||
return `${cmd.description} (plugin)`
|
||||
return `${cmd.description ?? ''} (plugin)`
|
||||
}
|
||||
|
||||
if (cmd.source === 'builtin' || cmd.source === 'mcp') {
|
||||
return cmd.description
|
||||
return cmd.description ?? ''
|
||||
}
|
||||
|
||||
if (cmd.source === 'bundled') {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -9,4 +9,3 @@ export async function call(onDone: LocalJSXCommandOnDone, context: ToolUseContex
|
||||
const tools = getTools(permissionContext);
|
||||
return <AgentsMenu tools={tools} onExit={onDone} />;
|
||||
}
|
||||
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJuYW1lcyI6WyJSZWFjdCIsIkFnZW50c01lbnUiLCJUb29sVXNlQ29udGV4dCIsImdldFRvb2xzIiwiTG9jYWxKU1hDb21tYW5kT25Eb25lIiwiY2FsbCIsIm9uRG9uZSIsImNvbnRleHQiLCJQcm9taXNlIiwiUmVhY3ROb2RlIiwiYXBwU3RhdGUiLCJnZXRBcHBTdGF0ZSIsInBlcm1pc3Npb25Db250ZXh0IiwidG9vbFBlcm1pc3Npb25Db250ZXh0IiwidG9vbHMiXSwic291cmNlcyI6WyJhZ2VudHMudHN4Il0sInNvdXJjZXNDb250ZW50IjpbImltcG9ydCAqIGFzIFJlYWN0IGZyb20gJ3JlYWN0J1xuaW1wb3J0IHsgQWdlbnRzTWVudSB9IGZyb20gJy4uLy4uL2NvbXBvbmVudHMvYWdlbnRzL0FnZW50c01lbnUuanMnXG5pbXBvcnQgdHlwZSB7IFRvb2xVc2VDb250ZXh0IH0gZnJvbSAnLi4vLi4vVG9vbC5qcydcbmltcG9ydCB7IGdldFRvb2xzIH0gZnJvbSAnLi4vLi4vdG9vbHMuanMnXG5pbXBvcnQgdHlwZSB7IExvY2FsSlNYQ29tbWFuZE9uRG9uZSB9IGZyb20gJy4uLy4uL3R5cGVzL2NvbW1hbmQuanMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBjYWxsKFxuICBvbkRvbmU6IExvY2FsSlNYQ29tbWFuZE9uRG9uZSxcbiAgY29udGV4dDogVG9vbFVzZUNvbnRleHQsXG4pOiBQcm9taXNlPFJlYWN0LlJlYWN0Tm9kZT4ge1xuICBjb25zdCBhcHBTdGF0ZSA9IGNvbnRleHQuZ2V0QXBwU3RhdGUoKVxuICBjb25zdCBwZXJtaXNzaW9uQ29udGV4dCA9IGFwcFN0YXRlLnRvb2xQZXJtaXNzaW9uQ29udGV4dFxuICBjb25zdCB0b29scyA9IGdldFRvb2xzKHBlcm1pc3Npb25Db250ZXh0KVxuXG4gIHJldHVybiA8QWdlbnRzTWVudSB0b29scz17dG9vbHN9IG9uRXhpdD17b25Eb25lfSAvPlxufVxuIl0sIm1hcHBpbmdzIjoiQUFBQSxPQUFPLEtBQUtBLEtBQUssTUFBTSxPQUFPO0FBQzlCLFNBQVNDLFVBQVUsUUFBUSx1Q0FBdUM7QUFDbEUsY0FBY0MsY0FBYyxRQUFRLGVBQWU7QUFDbkQsU0FBU0MsUUFBUSxRQUFRLGdCQUFnQjtBQUN6QyxjQUFjQyxxQkFBcUIsUUFBUSx3QkFBd0I7QUFFbkUsT0FBTyxlQUFlQyxJQUFJQSxDQUN4QkMsTUFBTSxFQUFFRixxQkFBcUIsRUFDN0JHLE9BQU8sRUFBRUwsY0FBYyxDQUN4QixFQUFFTSxPQUFPLENBQUNSLEtBQUssQ0FBQ1MsU0FBUyxDQUFDLENBQUM7RUFDMUIsTUFBTUMsUUFBUSxHQUFHSCxPQUFPLENBQUNJLFdBQVcsQ0FBQyxDQUFDO0VBQ3RDLE1BQU1DLGlCQUFpQixHQUFHRixRQUFRLENBQUNHLHFCQUFxQjtFQUN4RCxNQUFNQyxLQUFLLEdBQUdYLFFBQVEsQ0FBQ1MsaUJBQWlCLENBQUM7RUFFekMsT0FBTyxDQUFDLFVBQVUsQ0FBQyxLQUFLLENBQUMsQ0FBQ0UsS0FBSyxDQUFDLENBQUMsTUFBTSxDQUFDLENBQUNSLE1BQU0sQ0FBQyxHQUFHO0FBQ3JEIiwiaWdub3JlTGlzdCI6W119
|
||||
25
src/commands/auto-fix.ts
Normal file
25
src/commands/auto-fix.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import type { Command } from '../types/command.js'
|
||||
|
||||
const command: Command = {
|
||||
name: 'auto-fix',
|
||||
description: 'Configure auto-fix: run lint/test after AI edits',
|
||||
isEnabled: () => true,
|
||||
type: 'prompt',
|
||||
progressMessage: 'Configuring auto-fix...',
|
||||
contentLength: 0,
|
||||
source: 'builtin',
|
||||
async getPromptForCommand() {
|
||||
return [
|
||||
{
|
||||
type: 'text',
|
||||
text:
|
||||
'The user wants to configure auto-fix settings. Auto-fix automatically runs lint and test commands after AI file edits, feeding errors back for self-repair.\n\n' +
|
||||
'Current settings location: `.claude/settings.json` or `.claude/settings.local.json`\n\n' +
|
||||
'Example configuration:\n```json\n{\n "autoFix": {\n "enabled": true,\n "lint": "eslint . --fix",\n "test": "bun test",\n "maxRetries": 3,\n "timeout": 30000\n }\n}\n```\n\n' +
|
||||
'Ask the user what lint and test commands they use, then help them set up the configuration.',
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
export default command
|
||||
56
src/commands/benchmark.ts
Normal file
56
src/commands/benchmark.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
import type { ToolUseContext } from '../Tool.js'
|
||||
import type { Command } from '../types/command.js'
|
||||
import {
|
||||
benchmarkModel,
|
||||
benchmarkMultipleModels,
|
||||
formatBenchmarkResults,
|
||||
isBenchmarkSupported,
|
||||
} from '../utils/model/benchmark.js'
|
||||
import { getOllamaModelOptions } from '../utils/model/ollamaModels.js'
|
||||
|
||||
async function runBenchmark(
|
||||
model?: string,
|
||||
context?: ToolUseContext,
|
||||
): Promise<void> {
|
||||
if (!isBenchmarkSupported()) {
|
||||
context?.stdout?.write(
|
||||
'Benchmark not supported for this provider.\n' +
|
||||
'Supported: OpenAI-compatible endpoints (Ollama, NVIDIA NIM, MiniMax)\n',
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
let modelsToBenchmark: string[]
|
||||
|
||||
if (model) {
|
||||
modelsToBenchmark = [model]
|
||||
} else {
|
||||
const ollamaModels = getOllamaModelOptions()
|
||||
modelsToBenchmark = ollamaModels.slice(0, 3).map((m) => m.value)
|
||||
}
|
||||
|
||||
context?.stdout?.write(`Benchmarking ${modelsToBenchmark.length} model(s)...\n`)
|
||||
|
||||
const results = await benchmarkMultipleModels(
|
||||
modelsToBenchmark,
|
||||
(completed, total, result) => {
|
||||
context?.stdout?.write(
|
||||
`[${completed}/${total}] ${result.model}: ` +
|
||||
`${result.success ? result.tokensPerSecond.toFixed(1) + ' tps' : 'FAILED'}\n`,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
context?.stdout?.write('\n' + formatBenchmarkResults(results) + '\n')
|
||||
}
|
||||
|
||||
export const benchmark: Command = {
|
||||
name: 'benchmark',
|
||||
|
||||
async onExecute(context: ToolUseContext): Promise<void> {
|
||||
const args = context.args ?? {}
|
||||
const model = args.model as string | undefined
|
||||
|
||||
await runBenchmark(model, context)
|
||||
},
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
185
src/commands/buddy/buddy.tsx
Normal file
185
src/commands/buddy/buddy.tsx
Normal file
@@ -0,0 +1,185 @@
|
||||
import type { LocalJSXCommandContext, LocalJSXCommandOnDone } from '../../types/command.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js'
|
||||
import { companionUserId, getCompanion, rollWithSeed } from '../../buddy/companion.js'
|
||||
import type { StoredCompanion } from '../../buddy/types.js'
|
||||
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js'
|
||||
|
||||
const NAME_PREFIXES = [
|
||||
'Byte',
|
||||
'Echo',
|
||||
'Glint',
|
||||
'Miso',
|
||||
'Nova',
|
||||
'Pixel',
|
||||
'Rune',
|
||||
'Static',
|
||||
'Vector',
|
||||
'Whisk',
|
||||
] as const
|
||||
|
||||
const NAME_SUFFIXES = [
|
||||
'bean',
|
||||
'bit',
|
||||
'bud',
|
||||
'dot',
|
||||
'ling',
|
||||
'loop',
|
||||
'moss',
|
||||
'patch',
|
||||
'puff',
|
||||
'spark',
|
||||
] as const
|
||||
|
||||
const PERSONALITIES = [
|
||||
'Curious and quietly encouraging',
|
||||
'A patient little watcher with strong debugging instincts',
|
||||
'Playful, observant, and suspicious of flaky tests',
|
||||
'Calm under pressure and fond of clean diffs',
|
||||
'A tiny terminal gremlin who likes successful builds',
|
||||
] as const
|
||||
|
||||
const PET_REACTIONS = [
|
||||
'leans into the headpat',
|
||||
'does a proud little bounce',
|
||||
'emits a content beep',
|
||||
'looks delighted',
|
||||
'wiggles happily',
|
||||
] as const
|
||||
|
||||
function hashString(s: string): number {
|
||||
let h = 2166136261
|
||||
for (let i = 0; i < s.length; i++) {
|
||||
h ^= s.charCodeAt(i)
|
||||
h = Math.imul(h, 16777619)
|
||||
}
|
||||
return h >>> 0
|
||||
}
|
||||
|
||||
function pickDeterministic<T>(items: readonly T[], seed: string): T {
|
||||
return items[hashString(seed) % items.length]!
|
||||
}
|
||||
|
||||
function titleCase(s: string): string {
|
||||
return s.charAt(0).toUpperCase() + s.slice(1)
|
||||
}
|
||||
|
||||
function createStoredCompanion(): StoredCompanion {
|
||||
const userId = companionUserId()
|
||||
const { bones } = rollWithSeed(`${userId}:buddy`)
|
||||
const prefix = pickDeterministic(NAME_PREFIXES, `${userId}:prefix`)
|
||||
const suffix = pickDeterministic(NAME_SUFFIXES, `${userId}:suffix`)
|
||||
const personality = pickDeterministic(PERSONALITIES, `${userId}:personality`)
|
||||
|
||||
return {
|
||||
name: `${prefix}${suffix}`,
|
||||
personality: `${personality}.`,
|
||||
hatchedAt: Date.now(),
|
||||
}
|
||||
}
|
||||
|
||||
function setCompanionReaction(
|
||||
context: LocalJSXCommandContext,
|
||||
reaction: string | undefined,
|
||||
pet = false,
|
||||
): void {
|
||||
context.setAppState(prev => ({
|
||||
...prev,
|
||||
companionReaction: reaction,
|
||||
companionPetAt: pet ? Date.now() : prev.companionPetAt,
|
||||
}))
|
||||
}
|
||||
|
||||
function showHelp(onDone: LocalJSXCommandOnDone): void {
|
||||
onDone(
|
||||
'Usage: /buddy [status|mute|unmute]\n\nRun /buddy with no args to hatch your companion the first time, then pet it on later runs.',
|
||||
{ display: 'system' },
|
||||
)
|
||||
}
|
||||
|
||||
export async function call(
|
||||
onDone: LocalJSXCommandOnDone,
|
||||
context: LocalJSXCommandContext,
|
||||
args?: string,
|
||||
): Promise<null> {
|
||||
const arg = args?.trim().toLowerCase() ?? ''
|
||||
|
||||
if (COMMON_HELP_ARGS.includes(arg) || arg === '') {
|
||||
const existing = getCompanion()
|
||||
if (arg !== '' || existing) {
|
||||
if (arg !== '') {
|
||||
showHelp(onDone)
|
||||
return null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (COMMON_HELP_ARGS.includes(arg)) {
|
||||
showHelp(onDone)
|
||||
return null
|
||||
}
|
||||
|
||||
if (COMMON_INFO_ARGS.includes(arg) || arg === 'status') {
|
||||
const companion = getCompanion()
|
||||
if (!companion) {
|
||||
onDone('No buddy hatched yet. Run /buddy to hatch one.', {
|
||||
display: 'system',
|
||||
})
|
||||
return null
|
||||
}
|
||||
onDone(
|
||||
`${companion.name} is your ${titleCase(companion.rarity)} ${companion.species}. ${companion.personality}`,
|
||||
{ display: 'system' },
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
if (arg === 'mute' || arg === 'unmute') {
|
||||
const muted = arg === 'mute'
|
||||
saveGlobalConfig(current => ({
|
||||
...current,
|
||||
companionMuted: muted,
|
||||
}))
|
||||
if (muted) {
|
||||
setCompanionReaction(context, undefined)
|
||||
}
|
||||
onDone(`Buddy ${muted ? 'muted' : 'unmuted'}.`, { display: 'system' })
|
||||
return null
|
||||
}
|
||||
|
||||
if (arg !== '') {
|
||||
showHelp(onDone)
|
||||
return null
|
||||
}
|
||||
|
||||
let companion = getCompanion()
|
||||
if (!companion) {
|
||||
const stored = createStoredCompanion()
|
||||
saveGlobalConfig(current => ({
|
||||
...current,
|
||||
companion: stored,
|
||||
companionMuted: false,
|
||||
}))
|
||||
companion = {
|
||||
...rollWithSeed(`${companionUserId()}:buddy`).bones,
|
||||
...stored,
|
||||
}
|
||||
setCompanionReaction(
|
||||
context,
|
||||
`${companion.name} the ${companion.species} has hatched.`,
|
||||
true,
|
||||
)
|
||||
onDone(
|
||||
`${companion.name} the ${companion.species} is now your buddy. Run /buddy again to pet them.`,
|
||||
{ display: 'system' },
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
const reaction = `${companion.name} ${pickDeterministic(
|
||||
PET_REACTIONS,
|
||||
`${Date.now()}:${companion.name}`,
|
||||
)}`
|
||||
setCompanionReaction(context, reaction, true)
|
||||
onDone(undefined, { display: 'skip' })
|
||||
return null
|
||||
}
|
||||
12
src/commands/buddy/index.ts
Normal file
12
src/commands/buddy/index.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
|
||||
const buddy = {
|
||||
type: 'local-jsx',
|
||||
name: 'buddy',
|
||||
description: 'Hatch, pet, and manage your OpenClaude companion',
|
||||
immediate: true,
|
||||
argumentHint: '[status|mute|unmute|help]',
|
||||
load: () => import('./buddy.js'),
|
||||
} satisfies Command
|
||||
|
||||
export default buddy
|
||||
413
src/commands/cache-probe/cache-probe.ts
Normal file
413
src/commands/cache-probe/cache-probe.ts
Normal file
@@ -0,0 +1,413 @@
|
||||
import { getSessionId } from '../../bootstrap/state.js'
|
||||
import { resolveProviderRequest } from '../../services/api/providerConfig.js'
|
||||
import type { LocalCommandCall } from '../../types/command.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
|
||||
import { getMainLoopModel } from '../../utils/model/model.js'
|
||||
|
||||
const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
// Large system prompt (~6000 chars, ~1500 tokens) to cross the 1024-token cache threshold
|
||||
const SYSTEM_PROMPT = [
|
||||
'You are a coding assistant. Answer concisely.',
|
||||
'CONTEXT: User is working on a TypeScript project with Bun runtime.',
|
||||
...Array.from(
|
||||
{ length: 80 },
|
||||
(_, i) =>
|
||||
`Rule ${i + 1}: Follow best practices for TypeScript including strict typing, error handling, testing, and clean code. Prefer explicit types over any. Use const assertions. Await all async operations.`,
|
||||
),
|
||||
].join('\n\n')
|
||||
|
||||
const USER_MESSAGE = 'Say "hello" and nothing else.'
|
||||
const DELAY_MS = 3000
|
||||
|
||||
/**
|
||||
* Extract model family from a versioned model string.
|
||||
* e.g. "gpt-5.4-0626" → "gpt-5.4", "codex-mini-latest" → "codex-mini"
|
||||
*/
|
||||
function getModelFamily(model: string | undefined): string {
|
||||
if (!model) return 'unknown'
|
||||
return model
|
||||
.replace(/-\d{4,}$/, '')
|
||||
.replace(/-latest$/, '')
|
||||
.replace(/-preview$/, '')
|
||||
}
|
||||
|
||||
function getField(obj: unknown, path: string): unknown {
|
||||
return path
|
||||
.split('.')
|
||||
.reduce((o: any, k: string) => (o != null ? o[k] : undefined), obj)
|
||||
}
|
||||
|
||||
interface ProbeResult {
|
||||
label: string
|
||||
status: number
|
||||
elapsed: number
|
||||
headers: Record<string, string>
|
||||
usage: Record<string, unknown> | null
|
||||
responseId: string | null
|
||||
error: string | null
|
||||
}
|
||||
|
||||
async function sendProbe(
|
||||
url: string,
|
||||
headers: Record<string, string>,
|
||||
body: Record<string, unknown>,
|
||||
label: string,
|
||||
): Promise<ProbeResult> {
|
||||
const start = Date.now()
|
||||
let response: Response
|
||||
try {
|
||||
response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
} catch (err: any) {
|
||||
return {
|
||||
label,
|
||||
status: 0,
|
||||
elapsed: Date.now() - start,
|
||||
headers: {},
|
||||
usage: null,
|
||||
responseId: null,
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
const elapsed = Date.now() - start
|
||||
|
||||
const respHeaders: Record<string, string> = {}
|
||||
response.headers.forEach((value, key) => {
|
||||
respHeaders[key] = value
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorBody = await response.text().catch(() => '')
|
||||
return {
|
||||
label,
|
||||
status: response.status,
|
||||
elapsed,
|
||||
headers: respHeaders,
|
||||
usage: null,
|
||||
responseId: null,
|
||||
error: errorBody,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse SSE stream for usage data
|
||||
const text = await response.text()
|
||||
let usage: Record<string, unknown> | null = null
|
||||
let responseId: string | null = null
|
||||
|
||||
const isResponses = url.endsWith('/responses')
|
||||
for (const chunk of text.split('\n\n')) {
|
||||
const lines = chunk
|
||||
.split('\n')
|
||||
.map((l) => l.trim())
|
||||
.filter(Boolean)
|
||||
|
||||
if (isResponses) {
|
||||
const eventLine = lines.find((l) => l.startsWith('event: '))
|
||||
const dataLines = lines.filter((l) => l.startsWith('data: '))
|
||||
if (!eventLine || !dataLines.length) continue
|
||||
const event = eventLine.slice(7).trim()
|
||||
if (
|
||||
event === 'response.completed' ||
|
||||
event === 'response.incomplete'
|
||||
) {
|
||||
try {
|
||||
const data = JSON.parse(
|
||||
dataLines.map((l) => l.slice(6)).join('\n'),
|
||||
)
|
||||
usage = (data?.response?.usage as Record<string, unknown>) ?? null
|
||||
responseId = (data?.response?.id as string) ?? null
|
||||
} catch {}
|
||||
}
|
||||
} else {
|
||||
for (const line of lines) {
|
||||
if (!line.startsWith('data: ')) continue
|
||||
const raw = line.slice(6).trim()
|
||||
if (raw === '[DONE]') continue
|
||||
try {
|
||||
const data = JSON.parse(raw) as Record<string, unknown>
|
||||
if (data.usage) {
|
||||
usage = data.usage as Record<string, unknown>
|
||||
responseId = (data.id as string) ?? null
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { label, status: response.status, elapsed, headers: respHeaders, usage, responseId, error: null }
|
||||
}
|
||||
|
||||
function formatResult(r: ProbeResult): string {
|
||||
const lines: string[] = [`--- ${r.label} ---`]
|
||||
if (r.error) {
|
||||
lines.push(` ERROR (HTTP ${r.status}): ${r.error.slice(0, 200)}`)
|
||||
return lines.join('\n')
|
||||
}
|
||||
lines.push(` HTTP ${r.status} — ${r.elapsed}ms`)
|
||||
if (r.responseId) lines.push(` response.id: ${r.responseId}`)
|
||||
|
||||
if (r.usage) {
|
||||
lines.push(' Usage:')
|
||||
lines.push(` ${JSON.stringify(r.usage, null, 2).replace(/\n/g, '\n ')}`)
|
||||
} else {
|
||||
lines.push(' Usage: null')
|
||||
}
|
||||
|
||||
// Interesting headers
|
||||
for (const h of [
|
||||
'openai-processing-ms',
|
||||
'x-ratelimit-remaining',
|
||||
'x-ratelimit-limit',
|
||||
'x-ms-region',
|
||||
'x-github-request-id',
|
||||
'x-request-id',
|
||||
]) {
|
||||
if (r.headers[h]) lines.push(` ${h}: ${r.headers[h]}`)
|
||||
}
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
export const call: LocalCommandCall = async (args) => {
|
||||
const parts = (args ?? '').trim().split(/\s+/).filter(Boolean)
|
||||
const noKey = parts.includes('--no-key')
|
||||
const modelOverride = parts.find((p) => !p.startsWith('--')) || undefined
|
||||
const modelStr = modelOverride ?? getMainLoopModel()
|
||||
const request = resolveProviderRequest({ model: modelStr })
|
||||
const isGithub = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
|
||||
// Resolve API key the same way the OpenAI shim does
|
||||
let apiKey = process.env.OPENAI_API_KEY ?? ''
|
||||
if (!apiKey && isGithub) {
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
apiKey =
|
||||
process.env.OPENAI_API_KEY ??
|
||||
process.env.GITHUB_TOKEN ??
|
||||
process.env.GH_TOKEN ??
|
||||
''
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
return {
|
||||
type: 'text',
|
||||
value:
|
||||
'No API key found. Make sure you are in an active OpenAI-compatible or GitHub Copilot session.\n' +
|
||||
'For GitHub Copilot: run /onboard-github first.\n' +
|
||||
'For OpenAI-compatible: set OPENAI_API_KEY.',
|
||||
}
|
||||
}
|
||||
|
||||
const useResponses = request.transport === 'codex_responses'
|
||||
const endpoint = useResponses ? '/responses' : '/chat/completions'
|
||||
const url = `${request.baseUrl}${endpoint}`
|
||||
const family = getModelFamily(request.resolvedModel)
|
||||
const cacheKey = `${getSessionId()}:${family}`
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
originator: 'openclaude',
|
||||
}
|
||||
if (isGithub) {
|
||||
Object.assign(headers, COPILOT_HEADERS)
|
||||
}
|
||||
|
||||
let body: Record<string, unknown>
|
||||
if (useResponses) {
|
||||
body = {
|
||||
model: request.resolvedModel,
|
||||
instructions: SYSTEM_PROMPT,
|
||||
input: [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'user',
|
||||
content: [{ type: 'input_text', text: USER_MESSAGE }],
|
||||
},
|
||||
],
|
||||
stream: true,
|
||||
...(noKey ? {} : {
|
||||
store: false,
|
||||
prompt_cache_key: cacheKey,
|
||||
prompt_cache_retention: '24h',
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
body = {
|
||||
model: request.resolvedModel,
|
||||
messages: [
|
||||
{ role: 'system', content: SYSTEM_PROMPT },
|
||||
{ role: 'user', content: USER_MESSAGE },
|
||||
],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
max_tokens: 20,
|
||||
...(noKey ? {} : {
|
||||
store: false,
|
||||
prompt_cache_key: cacheKey,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Log configuration
|
||||
const config = [
|
||||
`[cache-probe] Starting cache probe${noKey ? ' (--no-key: cache params OMITTED)' : ''}`,
|
||||
` model: ${request.resolvedModel} (family: ${family})`,
|
||||
` transport: ${request.transport}`,
|
||||
` endpoint: ${url}`,
|
||||
` prompt_cache_key: ${noKey ? 'NOT SENT' : cacheKey}`,
|
||||
` store: ${noKey ? 'NOT SENT' : 'false'}`,
|
||||
` system prompt: ~${Math.round(SYSTEM_PROMPT.length / 4)} tokens`,
|
||||
` delay between calls: ${DELAY_MS}ms`,
|
||||
].join('\n')
|
||||
logForDebugging(config)
|
||||
|
||||
// Call 1 — Cold
|
||||
const r1 = await sendProbe(url, headers, body, 'CALL 1 — Cold (no cache)')
|
||||
logForDebugging(`[cache-probe]\n${formatResult(r1)}`)
|
||||
|
||||
if (r1.error) {
|
||||
return {
|
||||
type: 'text',
|
||||
value: `Cache probe failed on first call: HTTP ${r1.status}\n${r1.error.slice(0, 300)}\n\nFull details in debug log.`,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait
|
||||
await new Promise((r) => setTimeout(r, DELAY_MS))
|
||||
|
||||
// Call 2 — Warm
|
||||
const r2 = await sendProbe(url, headers, body, 'CALL 2 — Warm (cache expected)')
|
||||
logForDebugging(`[cache-probe]\n${formatResult(r2)}`)
|
||||
|
||||
// --- Comparison ---
|
||||
const fields = [
|
||||
'input_tokens',
|
||||
'output_tokens',
|
||||
'total_tokens',
|
||||
'prompt_tokens',
|
||||
'completion_tokens',
|
||||
'input_tokens_details.cached_tokens',
|
||||
'prompt_tokens_details.cached_tokens',
|
||||
'output_tokens_details.reasoning_tokens',
|
||||
]
|
||||
|
||||
const comparison: string[] = ['[cache-probe] COMPARISON']
|
||||
comparison.push(
|
||||
` ${'Field'.padEnd(42)} ${'Call 1'.padStart(8)} ${'Call 2'.padStart(8)} ${'Delta'.padStart(8)}`,
|
||||
)
|
||||
comparison.push(` ${'-'.repeat(72)}`)
|
||||
|
||||
for (const f of fields) {
|
||||
const v1 = getField(r1.usage, f)
|
||||
const v2 = getField(r2.usage, f)
|
||||
if (v1 === undefined && v2 === undefined) continue
|
||||
const d =
|
||||
typeof v1 === 'number' && typeof v2 === 'number' ? v2 - v1 : ''
|
||||
comparison.push(
|
||||
` ${f.padEnd(42)} ${String(v1 ?? '-').padStart(8)} ${String(v2 ?? '-').padStart(8)} ${String(d).padStart(8)}`,
|
||||
)
|
||||
}
|
||||
|
||||
comparison.push('')
|
||||
comparison.push(
|
||||
` Latency: ${r1.elapsed}ms → ${r2.elapsed}ms (${r2.elapsed - r1.elapsed > 0 ? '+' : ''}${r2.elapsed - r1.elapsed}ms)`,
|
||||
)
|
||||
|
||||
// Header comparison
|
||||
for (const h of ['openai-processing-ms', 'x-ms-region', 'x-ratelimit-remaining']) {
|
||||
const v1 = r1.headers[h]
|
||||
const v2 = r2.headers[h]
|
||||
if (v1 || v2) {
|
||||
comparison.push(` ${h}: ${v1 ?? '-'} → ${v2 ?? '-'}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Verdict
|
||||
const cached2 =
|
||||
(getField(r2.usage, 'input_tokens_details.cached_tokens') as number) ??
|
||||
(getField(r2.usage, 'prompt_tokens_details.cached_tokens') as number) ??
|
||||
0
|
||||
const input1 =
|
||||
((r1.usage?.input_tokens ?? r1.usage?.prompt_tokens) as number) ?? 0
|
||||
const input2 =
|
||||
((r2.usage?.input_tokens ?? r2.usage?.prompt_tokens) as number) ?? 0
|
||||
|
||||
let verdict: string
|
||||
if (cached2 > 0) {
|
||||
const rate = input2 > 0 ? Math.round((cached2 / input2) * 100) : '?'
|
||||
verdict = `CACHE HIT: ${cached2} cached tokens (${rate}% of input)`
|
||||
} else if (input1 === 0 && input2 === 0) {
|
||||
verdict = 'INCONCLUSIVE: Server returns 0 input_tokens — cannot measure'
|
||||
} else if (r2.elapsed < r1.elapsed * 0.6 && input1 > 100) {
|
||||
verdict = `POSSIBLE SILENT CACHING: Call 2 was ${Math.round((1 - r2.elapsed / r1.elapsed) * 100)}% faster but no cached_tokens reported`
|
||||
} else {
|
||||
verdict = 'NO CACHE DETECTED'
|
||||
}
|
||||
|
||||
comparison.push(`\n Verdict: ${verdict}`)
|
||||
|
||||
// --- Simulate what main's shim code does with this usage ---
|
||||
// codexShim.ts makeUsage() — used for Responses API (GPT-5+/Codex)
|
||||
function mainMakeUsage(u: any) {
|
||||
return {
|
||||
input_tokens: u?.input_tokens ?? 0,
|
||||
output_tokens: u?.output_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0, // ← main hardcodes this to 0
|
||||
}
|
||||
}
|
||||
// openaiShim.ts convertChunkUsage() — used for Chat Completions
|
||||
function mainConvertChunkUsage(u: any) {
|
||||
return {
|
||||
input_tokens: u?.prompt_tokens ?? 0,
|
||||
output_tokens: u?.completion_tokens ?? 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: u?.prompt_tokens_details?.cached_tokens ?? 0,
|
||||
}
|
||||
}
|
||||
|
||||
const shimFn = useResponses ? mainMakeUsage : mainConvertChunkUsage
|
||||
const shim1 = shimFn(r1.usage)
|
||||
const shim2 = shimFn(r2.usage)
|
||||
|
||||
comparison.push('')
|
||||
comparison.push(` --- What main's shim reports (${useResponses ? 'codexShim.makeUsage' : 'openaiShim.convertChunkUsage'}) ---`)
|
||||
comparison.push(` Call 1: cache_read_input_tokens=${shim1.cache_read_input_tokens}`)
|
||||
comparison.push(` Call 2: cache_read_input_tokens=${shim2.cache_read_input_tokens}`)
|
||||
if (useResponses && cached2 > 0) {
|
||||
comparison.push(` BUG: Server returned ${cached2} cached tokens but main's makeUsage() drops it → reports 0`)
|
||||
} else if (!useResponses && shim2.cache_read_input_tokens > 0) {
|
||||
comparison.push(` OK: Chat Completions path on main correctly reads cached_tokens`)
|
||||
}
|
||||
|
||||
logForDebugging(comparison.join('\n'))
|
||||
|
||||
// User-facing summary
|
||||
const mode = noKey ? ' (NO cache key sent)' : ''
|
||||
const shimLabel = useResponses ? 'codexShim.makeUsage()' : 'openaiShim.convertChunkUsage()'
|
||||
const summary = [
|
||||
`Cache Probe — ${request.resolvedModel} via ${useResponses ? 'Responses API' : 'Chat Completions'}${mode}`,
|
||||
'',
|
||||
`Call 1: ${r1.elapsed}ms, input=${input1}, cached=${(getField(r1.usage, 'input_tokens_details.cached_tokens') as number) ?? (getField(r1.usage, 'prompt_tokens_details.cached_tokens') as number) ?? 0}`,
|
||||
`Call 2: ${r2.elapsed}ms, input=${input2}, cached=${cached2}`,
|
||||
'',
|
||||
verdict,
|
||||
'',
|
||||
`What main's ${shimLabel} reports:`,
|
||||
` Call 2 cache_read_input_tokens = ${shim2.cache_read_input_tokens}${useResponses && cached2 > 0 ? ' ← BUG: server sent ' + cached2 + ' but main drops it' : ''}`,
|
||||
'',
|
||||
'Full details written to debug log.',
|
||||
].join('\n')
|
||||
|
||||
return { type: 'text', value: summary }
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user