Compare commits
7 Commits
fix/websea
...
fix/update
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8fb0316e46 | ||
|
|
e346b8d5ec | ||
|
|
b750e9e97d | ||
|
|
28de94df5d | ||
|
|
23e8cfbd5b | ||
|
|
531e3f1059 | ||
|
|
3c4d8435c4 |
22
.env.example
22
.env.example
@@ -149,6 +149,23 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
|
||||
# Use a custom OpenAI-compatible endpoint (optional — defaults to api.openai.com)
|
||||
# OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
|
||||
# Fallback context window size (tokens) when the model is not found in the
|
||||
# built-in table (default: 128000). Increase this for models with larger
|
||||
# context windows (e.g. 200000 for Claude-sized contexts).
|
||||
# CLAUDE_CODE_OPENAI_FALLBACK_CONTEXT_WINDOW=128000
|
||||
|
||||
# Per-model context window overrides as a JSON object.
|
||||
# Takes precedence over the built-in table, so you can register new or
|
||||
# custom models without patching source.
|
||||
# Example: CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS={"my-corp/llm-v3":262144,"gpt-4o-mini":128000}
|
||||
# CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS=
|
||||
|
||||
# Per-model maximum output token overrides as a JSON object.
|
||||
# Use this alongside CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS when your model
|
||||
# supports a different output limit than what the built-in table specifies.
|
||||
# Example: CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS={"my-corp/llm-v3":8192}
|
||||
# CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS=
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Option 3: Google Gemini
|
||||
@@ -272,6 +289,11 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
|
||||
# trigger "Extra required key ... supplied" errors from OpenAI-compatible endpoints
|
||||
# OPENCLAUDE_DISABLE_STRICT_TOOLS=1
|
||||
|
||||
# Disable hidden <system-reminder> messages injected into tool output
|
||||
# Suppresses the file-read cyber-risk reminder and the todo/task tool nudges
|
||||
# Useful for users who want full transparency over what the model sees
|
||||
# OPENCLAUDE_DISABLE_TOOL_REMINDERS=1
|
||||
|
||||
# Custom timeout for API requests in milliseconds (default: varies)
|
||||
# API_TIMEOUT_MS=60000
|
||||
|
||||
|
||||
@@ -169,6 +169,14 @@ describe('Web search result count improvements', () => {
|
||||
|
||||
expect(content).toMatch(/max_uses:\s*15/)
|
||||
})
|
||||
|
||||
test('codex web search path guarantees a non-empty result body', async () => {
|
||||
const content = await file(
|
||||
'tools/WebSearchTool/WebSearchTool.ts',
|
||||
).text()
|
||||
|
||||
expect(content).toContain("results.push('No results found.')")
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@@ -35,15 +35,20 @@ export async function update() {
|
||||
// binary (without it).
|
||||
if (getAPIProvider() !== 'firstParty') {
|
||||
writeToStdout(
|
||||
chalk.yellow('Auto-update is not available for third-party provider builds.\n') +
|
||||
'To update, pull the latest source from the repository and rebuild:\n' +
|
||||
' git pull && bun install && bun run build\n',
|
||||
chalk.yellow(
|
||||
`Auto-update is not available for third-party provider builds.\n`,
|
||||
) +
|
||||
`Current version: ${MACRO.DISPLAY_VERSION}\n\n` +
|
||||
`To update, reinstall from npm:\n` +
|
||||
chalk.bold(` npm install -g ${MACRO.PACKAGE_URL}@latest`) + '\n\n' +
|
||||
`Or, if you built from source, pull and rebuild:\n` +
|
||||
chalk.bold(' git pull && bun install && bun run build') + '\n',
|
||||
)
|
||||
return
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
logEvent('tengu_update_check', {})
|
||||
writeToStdout(`Current version: ${MACRO.VERSION}\n`)
|
||||
writeToStdout(`Current version: ${MACRO.DISPLAY_VERSION}\n`)
|
||||
|
||||
const channel = getInitialSettings()?.autoUpdatesChannel ?? 'latest'
|
||||
writeToStdout(`Checking for updates to ${channel} version...\n`)
|
||||
@@ -123,9 +128,14 @@ export async function update() {
|
||||
if (diagnostic.installationType === 'development') {
|
||||
writeToStdout('\n')
|
||||
writeToStdout(
|
||||
chalk.yellow('Warning: Cannot update development build') + '\n',
|
||||
chalk.yellow('You are running a development build — auto-update is unavailable.') + '\n',
|
||||
)
|
||||
await gracefulShutdown(1)
|
||||
writeToStdout('To update, pull the latest source and rebuild:\n')
|
||||
writeToStdout(chalk.bold(' git pull && bun install && bun run build') + '\n')
|
||||
writeToStdout('\n')
|
||||
writeToStdout('Or reinstall from npm:\n')
|
||||
writeToStdout(chalk.bold(` npm install -g ${MACRO.PACKAGE_URL}@latest`) + '\n')
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
// Check if running from a package manager
|
||||
@@ -136,8 +146,8 @@ export async function update() {
|
||||
if (packageManager === 'homebrew') {
|
||||
writeToStdout('Claude is managed by Homebrew.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(chalk.bold(' brew upgrade claude-code') + '\n')
|
||||
@@ -147,8 +157,8 @@ export async function update() {
|
||||
} else if (packageManager === 'winget') {
|
||||
writeToStdout('Claude is managed by winget.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(
|
||||
@@ -160,8 +170,8 @@ export async function update() {
|
||||
} else if (packageManager === 'apk') {
|
||||
writeToStdout('Claude is managed by apk.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
if (latest && !gte(MACRO.DISPLAY_VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.DISPLAY_VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(chalk.bold(' apk upgrade claude-code') + '\n')
|
||||
@@ -250,14 +260,14 @@ export async function update() {
|
||||
await gracefulShutdown(1)
|
||||
}
|
||||
|
||||
if (result.latestVersion === MACRO.VERSION) {
|
||||
if (result.latestVersion === MACRO.DISPLAY_VERSION) {
|
||||
writeToStdout(
|
||||
chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
|
||||
chalk.green(`OpenClaude is up to date (${MACRO.DISPLAY_VERSION})`) + '\n',
|
||||
)
|
||||
} else {
|
||||
writeToStdout(
|
||||
chalk.green(
|
||||
`Successfully updated from ${MACRO.VERSION} to version ${result.latestVersion}`,
|
||||
`Successfully updated from ${MACRO.DISPLAY_VERSION} to version ${result.latestVersion}`,
|
||||
) + '\n',
|
||||
)
|
||||
await regenerateCompletionCache()
|
||||
@@ -320,15 +330,15 @@ export async function update() {
|
||||
}
|
||||
|
||||
// Check if versions match exactly, including any build metadata (like SHA)
|
||||
if (latestVersion === MACRO.VERSION) {
|
||||
if (latestVersion === MACRO.DISPLAY_VERSION) {
|
||||
writeToStdout(
|
||||
chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
|
||||
chalk.green(`OpenClaude is up to date (${MACRO.DISPLAY_VERSION})`) + '\n',
|
||||
)
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
writeToStdout(
|
||||
`New version available: ${latestVersion} (current: ${MACRO.VERSION})\n`,
|
||||
`New version available: ${latestVersion} (current: ${MACRO.DISPLAY_VERSION})\n`,
|
||||
)
|
||||
writeToStdout('Installing update...\n')
|
||||
|
||||
@@ -388,7 +398,7 @@ export async function update() {
|
||||
case 'success':
|
||||
writeToStdout(
|
||||
chalk.green(
|
||||
`Successfully updated from ${MACRO.VERSION} to version ${latestVersion}`,
|
||||
`Successfully updated from ${MACRO.DISPLAY_VERSION} to version ${latestVersion}`,
|
||||
) + '\n',
|
||||
)
|
||||
await regenerateCompletionCache()
|
||||
|
||||
158
src/components/StartupScreen.test.ts
Normal file
158
src/components/StartupScreen.test.ts
Normal file
@@ -0,0 +1,158 @@
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test'
|
||||
import { detectProvider } from './StartupScreen.js'
|
||||
|
||||
const ENV_KEYS = [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_MISTRAL',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_MODEL',
|
||||
'MISTRAL_MODEL',
|
||||
'ANTHROPIC_MODEL',
|
||||
'NVIDIA_NIM',
|
||||
'MINIMAX_API_KEY',
|
||||
]
|
||||
|
||||
const originalEnv: Record<string, string | undefined> = {}
|
||||
|
||||
beforeEach(() => {
|
||||
for (const key of ENV_KEYS) {
|
||||
originalEnv[key] = process.env[key]
|
||||
delete process.env[key]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const key of ENV_KEYS) {
|
||||
if (originalEnv[key] === undefined) {
|
||||
delete process.env[key]
|
||||
} else {
|
||||
process.env[key] = originalEnv[key]
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
function setupOpenAIMode(baseUrl: string, model: string): void {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = baseUrl
|
||||
process.env.OPENAI_MODEL = model
|
||||
process.env.OPENAI_API_KEY = 'test-key'
|
||||
}
|
||||
|
||||
// --- Issue #855: aggregator URL must win over vendor-prefixed model name ---
|
||||
|
||||
describe('detectProvider — aggregator URL authoritative over model-name substring (#855)', () => {
|
||||
test('OpenRouter + deepseek/deepseek-chat labels as OpenRouter', () => {
|
||||
setupOpenAIMode('https://openrouter.ai/api/v1', 'deepseek/deepseek-chat')
|
||||
expect(detectProvider().name).toBe('OpenRouter')
|
||||
})
|
||||
|
||||
test('OpenRouter + moonshotai/kimi-k2 labels as OpenRouter', () => {
|
||||
setupOpenAIMode('https://openrouter.ai/api/v1', 'moonshotai/kimi-k2')
|
||||
expect(detectProvider().name).toBe('OpenRouter')
|
||||
})
|
||||
|
||||
test('OpenRouter + mistralai/mistral-large labels as OpenRouter', () => {
|
||||
setupOpenAIMode('https://openrouter.ai/api/v1', 'mistralai/mistral-large')
|
||||
expect(detectProvider().name).toBe('OpenRouter')
|
||||
})
|
||||
|
||||
test('OpenRouter + meta-llama/llama-3.3 labels as OpenRouter', () => {
|
||||
setupOpenAIMode('https://openrouter.ai/api/v1', 'meta-llama/llama-3.3-70b-instruct')
|
||||
expect(detectProvider().name).toBe('OpenRouter')
|
||||
})
|
||||
|
||||
test('Together + deepseek-ai/DeepSeek-V3 labels as Together AI', () => {
|
||||
setupOpenAIMode('https://api.together.xyz/v1', 'deepseek-ai/DeepSeek-V3')
|
||||
expect(detectProvider().name).toBe('Together AI')
|
||||
})
|
||||
|
||||
test('Together + meta-llama/Llama-3.3 labels as Together AI', () => {
|
||||
setupOpenAIMode('https://api.together.xyz/v1', 'meta-llama/Llama-3.3-70B-Instruct-Turbo')
|
||||
expect(detectProvider().name).toBe('Together AI')
|
||||
})
|
||||
|
||||
test('Groq + deepseek-r1-distill-llama-70b labels as Groq', () => {
|
||||
setupOpenAIMode('https://api.groq.com/openai/v1', 'deepseek-r1-distill-llama-70b')
|
||||
expect(detectProvider().name).toBe('Groq')
|
||||
})
|
||||
|
||||
test('Groq + llama-3.3-70b-versatile labels as Groq', () => {
|
||||
setupOpenAIMode('https://api.groq.com/openai/v1', 'llama-3.3-70b-versatile')
|
||||
expect(detectProvider().name).toBe('Groq')
|
||||
})
|
||||
|
||||
test('Azure + any deepseek deployment labels as Azure OpenAI', () => {
|
||||
setupOpenAIMode('https://my-resource.openai.azure.com/', 'deepseek-chat')
|
||||
expect(detectProvider().name).toBe('Azure OpenAI')
|
||||
})
|
||||
})
|
||||
|
||||
// --- Direct vendor endpoints still label correctly (regression) ---
|
||||
|
||||
describe('detectProvider — direct vendor endpoints', () => {
|
||||
test('api.deepseek.com labels as DeepSeek', () => {
|
||||
setupOpenAIMode('https://api.deepseek.com/v1', 'deepseek-chat')
|
||||
expect(detectProvider().name).toBe('DeepSeek')
|
||||
})
|
||||
|
||||
test('api.moonshot.cn labels as Moonshot (Kimi)', () => {
|
||||
setupOpenAIMode('https://api.moonshot.cn/v1', 'moonshot-v1-8k')
|
||||
expect(detectProvider().name).toBe('Moonshot (Kimi)')
|
||||
})
|
||||
|
||||
test('api.mistral.ai labels as Mistral', () => {
|
||||
setupOpenAIMode('https://api.mistral.ai/v1', 'mistral-large-latest')
|
||||
expect(detectProvider().name).toBe('Mistral')
|
||||
})
|
||||
|
||||
test('default OpenAI URL + gpt-4o labels as OpenAI', () => {
|
||||
setupOpenAIMode('https://api.openai.com/v1', 'gpt-4o')
|
||||
expect(detectProvider().name).toBe('OpenAI')
|
||||
})
|
||||
})
|
||||
|
||||
// --- rawModel fallback for generic/custom endpoints ---
|
||||
|
||||
describe('detectProvider — rawModel fallback when URL is generic', () => {
|
||||
test('custom proxy + deepseek-chat falls back to DeepSeek', () => {
|
||||
setupOpenAIMode('https://my-proxy.internal/v1', 'deepseek-chat')
|
||||
expect(detectProvider().name).toBe('DeepSeek')
|
||||
})
|
||||
|
||||
test('custom proxy + kimi-k2 falls back to Moonshot (Kimi)', () => {
|
||||
setupOpenAIMode('https://my-proxy.internal/v1', 'kimi-k2-instruct')
|
||||
expect(detectProvider().name).toBe('Moonshot (Kimi)')
|
||||
})
|
||||
|
||||
test('custom proxy + llama-3.3 falls back to Meta Llama', () => {
|
||||
setupOpenAIMode('https://my-proxy.internal/v1', 'llama-3.3-70b')
|
||||
expect(detectProvider().name).toBe('Meta Llama')
|
||||
})
|
||||
|
||||
test('custom proxy + mistral-large falls back to Mistral', () => {
|
||||
setupOpenAIMode('https://my-proxy.internal/v1', 'mistral-large-latest')
|
||||
expect(detectProvider().name).toBe('Mistral')
|
||||
})
|
||||
})
|
||||
|
||||
// --- Explicit env flags win over URL heuristics ---
|
||||
|
||||
describe('detectProvider — explicit dedicated-provider env flags', () => {
|
||||
test('NVIDIA_NIM=1 overrides aggregator URL', () => {
|
||||
setupOpenAIMode('https://openrouter.ai/api/v1', 'some-nim-model')
|
||||
process.env.NVIDIA_NIM = '1'
|
||||
expect(detectProvider().name).toBe('NVIDIA NIM')
|
||||
})
|
||||
|
||||
test('MINIMAX_API_KEY overrides aggregator URL', () => {
|
||||
setupOpenAIMode('https://openrouter.ai/api/v1', 'any-model')
|
||||
process.env.MINIMAX_API_KEY = 'test-key'
|
||||
expect(detectProvider().name).toBe('MiniMax')
|
||||
})
|
||||
})
|
||||
@@ -83,7 +83,7 @@ const LOGO_CLAUDE = [
|
||||
|
||||
// ─── Provider detection ───────────────────────────────────────────────────────
|
||||
|
||||
function detectProvider(): { name: string; model: string; baseUrl: string; isLocal: boolean } {
|
||||
export function detectProvider(): { name: string; model: string; baseUrl: string; isLocal: boolean } {
|
||||
const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true'
|
||||
const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true'
|
||||
const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true'
|
||||
@@ -117,30 +117,34 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
const baseUrl = resolvedRequest.baseUrl
|
||||
const isLocal = isLocalProviderUrl(baseUrl)
|
||||
let name = 'OpenAI'
|
||||
if (/nvidia/i.test(baseUrl) || /nvidia/i.test(rawModel) || process.env.NVIDIA_NIM)
|
||||
name = 'NVIDIA NIM'
|
||||
else if (/minimax/i.test(baseUrl) || /minimax/i.test(rawModel) || process.env.MINIMAX_API_KEY)
|
||||
name = 'MiniMax'
|
||||
else if (resolvedRequest.transport === 'codex_responses' || baseUrl.includes('chatgpt.com/backend-api/codex'))
|
||||
// Explicit dedicated-provider env flags win.
|
||||
if (process.env.NVIDIA_NIM) name = 'NVIDIA NIM'
|
||||
else if (process.env.MINIMAX_API_KEY) name = 'MiniMax'
|
||||
else if (
|
||||
resolvedRequest.transport === 'codex_responses' ||
|
||||
baseUrl.includes('chatgpt.com/backend-api/codex')
|
||||
)
|
||||
name = 'Codex'
|
||||
else if (/moonshot/i.test(baseUrl) || /kimi/i.test(rawModel))
|
||||
name = 'Moonshot (Kimi)'
|
||||
else if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel))
|
||||
name = 'DeepSeek'
|
||||
else if (/openrouter/i.test(baseUrl))
|
||||
name = 'OpenRouter'
|
||||
else if (/together/i.test(baseUrl))
|
||||
name = 'Together AI'
|
||||
else if (/groq/i.test(baseUrl))
|
||||
name = 'Groq'
|
||||
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel))
|
||||
name = 'Mistral'
|
||||
else if (/azure/i.test(baseUrl))
|
||||
name = 'Azure OpenAI'
|
||||
else if (/llama/i.test(rawModel))
|
||||
name = 'Meta Llama'
|
||||
else if (isLocal)
|
||||
name = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
// Base URL is authoritative — must precede rawModel checks so aggregators
|
||||
// (OpenRouter/Together/Groq) aren't mislabelled as DeepSeek/Kimi/etc.
|
||||
// when routed to models whose IDs contain a vendor prefix. See issue #855.
|
||||
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
|
||||
else if (/together/i.test(baseUrl)) name = 'Together AI'
|
||||
else if (/groq/i.test(baseUrl)) name = 'Groq'
|
||||
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
|
||||
else if (/nvidia/i.test(baseUrl)) name = 'NVIDIA NIM'
|
||||
else if (/minimax/i.test(baseUrl)) name = 'MiniMax'
|
||||
else if (/moonshot/i.test(baseUrl)) name = 'Moonshot (Kimi)'
|
||||
else if (/deepseek/i.test(baseUrl)) name = 'DeepSeek'
|
||||
else if (/mistral/i.test(baseUrl)) name = 'Mistral'
|
||||
// rawModel fallback — fires only when base URL is generic/custom.
|
||||
else if (/nvidia/i.test(rawModel)) name = 'NVIDIA NIM'
|
||||
else if (/minimax/i.test(rawModel)) name = 'MiniMax'
|
||||
else if (/kimi/i.test(rawModel)) name = 'Moonshot (Kimi)'
|
||||
else if (/deepseek/i.test(rawModel)) name = 'DeepSeek'
|
||||
else if (/mistral/i.test(rawModel)) name = 'Mistral'
|
||||
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
|
||||
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
|
||||
// Resolve model alias to actual model name + reasoning effort
|
||||
let displayModel = resolvedRequest.resolvedModel
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
convertCodexResponseToAnthropicMessage,
|
||||
convertToolsToResponsesTools,
|
||||
} from './codexShim.js'
|
||||
import { __test as webSearchToolTest } from '../../tools/WebSearchTool/WebSearchTool.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
const originalEnv = {
|
||||
@@ -609,6 +610,164 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('recovers Codex web search text and sources from sparse completed response', () => {
|
||||
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
|
||||
{
|
||||
output: [
|
||||
{
|
||||
type: 'web_search_call',
|
||||
sources: [
|
||||
{
|
||||
title: 'OpenClaude repo',
|
||||
url: 'https://github.com/example/openclaude',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: 'OpenClaude is available on GitHub.',
|
||||
sources: [
|
||||
{
|
||||
title: 'Docs',
|
||||
url: 'https://docs.example.com/openclaude',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
'OpenClaude GitHub 2026',
|
||||
0.42,
|
||||
)
|
||||
|
||||
expect(output.results).toEqual([
|
||||
'OpenClaude is available on GitHub.',
|
||||
{
|
||||
tool_use_id: 'codex-web-search',
|
||||
content: [
|
||||
{
|
||||
title: 'OpenClaude repo',
|
||||
url: 'https://github.com/example/openclaude',
|
||||
},
|
||||
{
|
||||
title: 'Docs',
|
||||
url: 'https://docs.example.com/openclaude',
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('falls back to a non-empty Codex web search result message', () => {
|
||||
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
|
||||
{ output: [] },
|
||||
'OpenClaude GitHub 2026',
|
||||
0.11,
|
||||
)
|
||||
|
||||
expect(output.results).toEqual(['No results found.'])
|
||||
})
|
||||
|
||||
test('surfaces Codex web search failure reason with a message', () => {
|
||||
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
|
||||
{
|
||||
output: [
|
||||
{
|
||||
type: 'web_search_call',
|
||||
status: 'failed',
|
||||
error: { message: 'upstream search provider rate-limited' },
|
||||
},
|
||||
],
|
||||
},
|
||||
'OpenClaude GitHub 2026',
|
||||
0.05,
|
||||
)
|
||||
|
||||
expect(output.results).toEqual([
|
||||
'Web search failed: upstream search provider rate-limited',
|
||||
])
|
||||
})
|
||||
|
||||
test('surfaces Codex web search failure reason nested under action.error', () => {
|
||||
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
|
||||
{
|
||||
output: [
|
||||
{
|
||||
type: 'web_search_call',
|
||||
status: 'failed',
|
||||
action: { error: { message: 'query blocked' } },
|
||||
},
|
||||
],
|
||||
},
|
||||
'OpenClaude GitHub 2026',
|
||||
0.05,
|
||||
)
|
||||
|
||||
expect(output.results).toEqual(['Web search failed: query blocked'])
|
||||
})
|
||||
|
||||
test('handles Codex web search failure with no reason attached', () => {
|
||||
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
|
||||
{
|
||||
output: [
|
||||
{
|
||||
type: 'web_search_call',
|
||||
status: 'failed',
|
||||
},
|
||||
],
|
||||
},
|
||||
'OpenClaude GitHub 2026',
|
||||
0.05,
|
||||
)
|
||||
|
||||
expect(output.results).toEqual(['Web search failed.'])
|
||||
})
|
||||
|
||||
test('a failure item does not suppress sources from a later message item', () => {
|
||||
const output = webSearchToolTest.makeOutputFromCodexWebSearchResponse(
|
||||
{
|
||||
output: [
|
||||
{
|
||||
type: 'web_search_call',
|
||||
status: 'failed',
|
||||
error: { message: 'partial outage' },
|
||||
},
|
||||
{
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'output_text',
|
||||
text: 'Partial results below.',
|
||||
sources: [
|
||||
{ title: 'Docs', url: 'https://docs.example.com/openclaude' },
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
'OpenClaude GitHub 2026',
|
||||
0.05,
|
||||
)
|
||||
|
||||
expect(output.results).toEqual([
|
||||
'Web search failed: partial outage',
|
||||
'Partial results below.',
|
||||
{
|
||||
tool_use_id: 'codex-web-search',
|
||||
content: [
|
||||
{ title: 'Docs', url: 'https://docs.example.com/openclaude' },
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('translates Codex SSE text stream into Anthropic events', async () => {
|
||||
const responseText = [
|
||||
'event: response.output_item.added',
|
||||
|
||||
@@ -733,6 +733,9 @@ export const CYBER_RISK_MITIGATION_REMINDER =
|
||||
const MITIGATION_EXEMPT_MODELS = new Set(['claude-opus-4-6'])
|
||||
|
||||
function shouldIncludeFileReadMitigation(): boolean {
|
||||
if (isEnvTruthy(process.env.OPENCLAUDE_DISABLE_TOOL_REMINDERS)) {
|
||||
return false
|
||||
}
|
||||
const shortName = getCanonicalName(getMainLoopModel())
|
||||
return !MITIGATION_EXEMPT_MODELS.has(shortName)
|
||||
}
|
||||
|
||||
87
src/tools/WebFetchTool/applyPromptFallback.test.ts
Normal file
87
src/tools/WebFetchTool/applyPromptFallback.test.ts
Normal file
@@ -0,0 +1,87 @@
|
||||
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
// Mock the Anthropic-API-side before importing the module under test, so
|
||||
// queryHaiku resolves into whatever the individual test wants (slow, failing,
|
||||
// or successful). We preserve every other export from claude.js so unrelated
|
||||
// transitive imports still work.
|
||||
const haikuMock = mock()
|
||||
|
||||
beforeEach(async () => {
|
||||
haikuMock.mockReset()
|
||||
const actual = await import('../../services/api/claude.js')
|
||||
mock.module('../../services/api/claude.js', () => ({
|
||||
...actual,
|
||||
queryHaiku: haikuMock,
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
async function runApply(markdown = 'Hello world.', signal?: AbortSignal): Promise<string> {
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { applyPromptToMarkdown } =
|
||||
await import(`./utils.js?ts=${nonce}`)
|
||||
const ctrl = new AbortController()
|
||||
return applyPromptToMarkdown(
|
||||
'summarize',
|
||||
markdown,
|
||||
signal ?? ctrl.signal,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
test('returns raw truncated markdown when queryHaiku throws', async () => {
|
||||
haikuMock.mockImplementation(async () => {
|
||||
throw new Error('MiniMax rejected the model name')
|
||||
})
|
||||
|
||||
const output = await runApply('Gitlawb homepage content.')
|
||||
expect(output).toContain('[Secondary-model summarization unavailable')
|
||||
expect(output).toContain('Gitlawb homepage content.')
|
||||
})
|
||||
|
||||
test('returns raw truncated markdown when queryHaiku simulates a timeout', async () => {
|
||||
// Simulating raceWithTimeout's rejection path directly — we can't actually
|
||||
// wait 45s in a test. The error shape matches what raceWithTimeout produces.
|
||||
haikuMock.mockImplementation(async () => {
|
||||
const err = new Error('Secondary-model summarization timed out after 45000ms')
|
||||
;(err as NodeJS.ErrnoException).code = 'SECONDARY_MODEL_TIMEOUT'
|
||||
throw err
|
||||
})
|
||||
|
||||
const output = await runApply('Slow provider content.')
|
||||
expect(output).toContain('[Secondary-model summarization unavailable')
|
||||
expect(output).toContain('Slow provider content.')
|
||||
})
|
||||
|
||||
test('returns the model response when queryHaiku succeeds', async () => {
|
||||
haikuMock.mockImplementation(async () => ({
|
||||
message: {
|
||||
content: [{ type: 'text', text: 'This page is about GitLawb, an AI legal platform.' }],
|
||||
},
|
||||
}))
|
||||
|
||||
const output = await runApply('some page content')
|
||||
expect(output).toBe('This page is about GitLawb, an AI legal platform.')
|
||||
})
|
||||
|
||||
test('returns fallback when queryHaiku resolves with empty content', async () => {
|
||||
haikuMock.mockImplementation(async () => ({ message: { content: [] } }))
|
||||
|
||||
const output = await runApply('some page content')
|
||||
expect(output).toContain('[Secondary-model summarization unavailable')
|
||||
expect(output).toContain('some page content')
|
||||
})
|
||||
|
||||
test('propagates AbortError from the caller signal', async () => {
|
||||
const ctrl = new AbortController()
|
||||
haikuMock.mockImplementation(async () => {
|
||||
ctrl.abort()
|
||||
return new Promise(() => {})
|
||||
})
|
||||
|
||||
await expect(runApply('content', ctrl.signal)).rejects.toThrow()
|
||||
})
|
||||
@@ -20,8 +20,11 @@ afterEach(() => {
|
||||
describe('checkDomainBlocklist', () => {
|
||||
test('returns allowed without API call in OpenAI mode', async () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
const actual = await import('../../utils/model/providers.js')
|
||||
mock.module('../../utils/model/providers.js', () => ({
|
||||
...actual,
|
||||
getAPIProvider: () => 'openai',
|
||||
isFirstPartyAnthropicBaseUrl: () => false,
|
||||
}))
|
||||
const getSpy = mock(() =>
|
||||
Promise.resolve({ status: 200, data: { can_fetch: true } }),
|
||||
@@ -37,8 +40,11 @@ describe('checkDomainBlocklist', () => {
|
||||
|
||||
test('returns allowed without API call in Gemini mode', async () => {
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
||||
const actual = await import('../../utils/model/providers.js')
|
||||
mock.module('../../utils/model/providers.js', () => ({
|
||||
...actual,
|
||||
getAPIProvider: () => 'gemini',
|
||||
isFirstPartyAnthropicBaseUrl: () => false,
|
||||
}))
|
||||
const getSpy = mock(() =>
|
||||
Promise.resolve({ status: 200, data: { can_fetch: true } }),
|
||||
@@ -57,8 +63,11 @@ describe('checkDomainBlocklist', () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
const actual = await import('../../utils/model/providers.js')
|
||||
mock.module('../../utils/model/providers.js', () => ({
|
||||
...actual,
|
||||
getAPIProvider: () => 'firstParty',
|
||||
isFirstPartyAnthropicBaseUrl: () => true,
|
||||
}))
|
||||
const getSpy = mock(() =>
|
||||
Promise.resolve({ status: 200, data: { can_fetch: true } }),
|
||||
|
||||
@@ -275,20 +275,76 @@ export async function getWithPermittedRedirects(
|
||||
if (depth > MAX_REDIRECTS) {
|
||||
throw new Error(`Too many redirects (exceeded ${MAX_REDIRECTS})`)
|
||||
}
|
||||
|
||||
const axiosConfig = {
|
||||
signal,
|
||||
timeout: FETCH_TIMEOUT_MS,
|
||||
maxRedirects: 0,
|
||||
responseType: 'arraybuffer' as const,
|
||||
maxContentLength: MAX_HTTP_CONTENT_LENGTH,
|
||||
lookup: ssrfGuardedLookup,
|
||||
headers: {
|
||||
Accept: 'text/markdown, text/html, */*',
|
||||
'User-Agent': getWebFetchUserAgent(),
|
||||
},
|
||||
}
|
||||
|
||||
try {
|
||||
return await axios.get(url, {
|
||||
signal,
|
||||
timeout: FETCH_TIMEOUT_MS,
|
||||
maxRedirects: 0,
|
||||
responseType: 'arraybuffer',
|
||||
maxContentLength: MAX_HTTP_CONTENT_LENGTH,
|
||||
lookup: ssrfGuardedLookup,
|
||||
headers: {
|
||||
Accept: 'text/markdown, text/html, */*',
|
||||
'User-Agent': getWebFetchUserAgent(),
|
||||
},
|
||||
})
|
||||
return await axios.get(url, axiosConfig)
|
||||
} catch (error) {
|
||||
// Try native fetch as a fallback for timeout / network errors
|
||||
// (Bun/Node bundled contexts occasionally hang with axios + custom lookup.)
|
||||
const isTimeoutLike =
|
||||
axios.isAxiosError(error) &&
|
||||
(!error.response &&
|
||||
(error.code === 'ECONNABORTED' ||
|
||||
error.code === 'ETIMEDOUT' ||
|
||||
error.message?.toLowerCase().includes('timeout')))
|
||||
if (isTimeoutLike && !signal.aborted) {
|
||||
try {
|
||||
const fetchResponse = await fetch(url, {
|
||||
signal,
|
||||
redirect: 'manual',
|
||||
headers: axiosConfig.headers,
|
||||
})
|
||||
// Handle redirects manually
|
||||
if ([301, 302, 307, 308].includes(fetchResponse.status)) {
|
||||
const redirectLocation = fetchResponse.headers.get('location')
|
||||
if (!redirectLocation) {
|
||||
throw new Error('Redirect missing Location header')
|
||||
}
|
||||
const redirectUrl = new URL(redirectLocation, url).toString()
|
||||
if (redirectChecker(url, redirectUrl)) {
|
||||
return getWithPermittedRedirects(
|
||||
redirectUrl,
|
||||
signal,
|
||||
redirectChecker,
|
||||
depth + 1,
|
||||
)
|
||||
} else {
|
||||
return {
|
||||
type: 'redirect' as const,
|
||||
originalUrl: url,
|
||||
redirectUrl,
|
||||
statusCode: fetchResponse.status,
|
||||
}
|
||||
}
|
||||
}
|
||||
const arrayBuffer = await fetchResponse.arrayBuffer()
|
||||
// Build an AxiosResponse-like shape so downstream code stays happy
|
||||
return {
|
||||
data: new Uint8Array(arrayBuffer),
|
||||
status: fetchResponse.status,
|
||||
statusText: fetchResponse.statusText,
|
||||
headers: Object.fromEntries(fetchResponse.headers.entries()),
|
||||
config: axiosConfig,
|
||||
request: undefined,
|
||||
} as unknown as AxiosResponse<ArrayBuffer>
|
||||
} catch {
|
||||
// Fall through to original error handling
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
axios.isAxiosError(error) &&
|
||||
error.response &&
|
||||
@@ -489,6 +545,58 @@ export async function getURLMarkdownContent(
|
||||
return entry
|
||||
}
|
||||
|
||||
// Budget for the secondary-model summarization after fetch. If the small-
|
||||
// fast model is slow (e.g. a 200k-context third-party running a reasoning
|
||||
// pass over ~100KB of markdown), we'd rather fall back to raw truncated
|
||||
// markdown than hang the tool. Also keeps the worst-case WebFetch bounded
|
||||
// to FETCH_TIMEOUT_MS + SECONDARY_MODEL_TIMEOUT_MS regardless of provider.
|
||||
const SECONDARY_MODEL_TIMEOUT_MS = 45_000
|
||||
|
||||
function raceWithTimeout<T>(
|
||||
promise: Promise<T>,
|
||||
timeoutMs: number,
|
||||
signal: AbortSignal,
|
||||
): Promise<T> {
|
||||
return new Promise<T>((resolve, reject) => {
|
||||
const timer = setTimeout(() => {
|
||||
const err = new Error(`Secondary-model summarization timed out after ${timeoutMs}ms`)
|
||||
;(err as NodeJS.ErrnoException).code = 'SECONDARY_MODEL_TIMEOUT'
|
||||
reject(err)
|
||||
}, timeoutMs)
|
||||
const onAbort = () => {
|
||||
clearTimeout(timer)
|
||||
reject(new AbortError())
|
||||
}
|
||||
if (signal.aborted) {
|
||||
clearTimeout(timer)
|
||||
reject(new AbortError())
|
||||
return
|
||||
}
|
||||
signal.addEventListener('abort', onAbort, { once: true })
|
||||
promise.then(
|
||||
value => {
|
||||
clearTimeout(timer)
|
||||
signal.removeEventListener('abort', onAbort)
|
||||
resolve(value)
|
||||
},
|
||||
err => {
|
||||
clearTimeout(timer)
|
||||
signal.removeEventListener('abort', onAbort)
|
||||
reject(err)
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
function buildFallbackMarkdownSummary(truncatedContent: string): string {
|
||||
return [
|
||||
'[Secondary-model summarization unavailable — returning raw fetched content.',
|
||||
'This typically means the configured small-fast model took too long or errored.]',
|
||||
'',
|
||||
truncatedContent,
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
export async function applyPromptToMarkdown(
|
||||
prompt: string,
|
||||
markdownContent: string,
|
||||
@@ -508,18 +616,35 @@ export async function applyPromptToMarkdown(
|
||||
prompt,
|
||||
isPreapprovedDomain,
|
||||
)
|
||||
const assistantMessage = await queryHaiku({
|
||||
systemPrompt: asSystemPrompt([]),
|
||||
userPrompt: modelPrompt,
|
||||
signal,
|
||||
options: {
|
||||
querySource: 'web_fetch_apply',
|
||||
agents: [],
|
||||
isNonInteractiveSession,
|
||||
hasAppendSystemPrompt: false,
|
||||
mcpTools: [],
|
||||
},
|
||||
})
|
||||
let assistantMessage
|
||||
try {
|
||||
assistantMessage = await raceWithTimeout(
|
||||
queryHaiku({
|
||||
systemPrompt: asSystemPrompt([]),
|
||||
userPrompt: modelPrompt,
|
||||
signal,
|
||||
options: {
|
||||
querySource: 'web_fetch_apply',
|
||||
agents: [],
|
||||
isNonInteractiveSession,
|
||||
hasAppendSystemPrompt: false,
|
||||
mcpTools: [],
|
||||
},
|
||||
}),
|
||||
SECONDARY_MODEL_TIMEOUT_MS,
|
||||
signal,
|
||||
)
|
||||
} catch (err) {
|
||||
// User interrupts and SIGINTs still propagate. Everything else (timeout,
|
||||
// provider-side error, unsupported model on third-party endpoint) falls
|
||||
// back to raw markdown so the user still gets usable content rather than
|
||||
// a hang. Log so it's visible in debug traces.
|
||||
if (err instanceof AbortError || (err as Error)?.name === 'AbortError') {
|
||||
throw err
|
||||
}
|
||||
logError(err)
|
||||
return buildFallbackMarkdownSummary(truncatedContent)
|
||||
}
|
||||
|
||||
// We need to bubble this up, so that the tool call throws, causing us to return
|
||||
// an is_error tool_use block to the server, and render a red dot in the UI.
|
||||
@@ -534,5 +659,5 @@ export async function applyPromptToMarkdown(
|
||||
return contentBlock.text
|
||||
}
|
||||
}
|
||||
return 'No response from model'
|
||||
return buildFallbackMarkdownSummary(truncatedContent)
|
||||
}
|
||||
|
||||
@@ -203,6 +203,61 @@ function buildCodexWebSearchInstructions(): string {
|
||||
].join(' ')
|
||||
}
|
||||
|
||||
function pushCodexTextResult(
|
||||
results: (SearchResult | string)[],
|
||||
value: unknown,
|
||||
): void {
|
||||
if (typeof value !== 'string') return
|
||||
const trimmed = value.trim()
|
||||
if (trimmed) {
|
||||
results.push(trimmed)
|
||||
}
|
||||
}
|
||||
|
||||
function addCodexSource(
|
||||
sourceMap: Map<string, { title: string; url: string }>,
|
||||
source: unknown,
|
||||
): void {
|
||||
if (typeof source?.url !== 'string' || !source.url) return
|
||||
sourceMap.set(source.url, {
|
||||
title:
|
||||
typeof source.title === 'string' && source.title
|
||||
? source.title
|
||||
: source.url,
|
||||
url: source.url,
|
||||
})
|
||||
}
|
||||
|
||||
function getCodexSources(item: Record<string, any>): unknown[] {
|
||||
if (Array.isArray(item.action?.sources)) {
|
||||
return item.action.sources
|
||||
}
|
||||
if (Array.isArray(item.sources)) {
|
||||
return item.sources
|
||||
}
|
||||
if (Array.isArray(item.result?.sources)) {
|
||||
return item.result.sources
|
||||
}
|
||||
return []
|
||||
}
|
||||
|
||||
function extractCodexWebSearchFailure(item: Record<string, any>): string | undefined {
|
||||
// Codex web_search_call items can carry a status field. When the tool
|
||||
// call fails (rate limit, upstream error, model-side guardrail), the
|
||||
// parser should surface a meaningful error rather than the generic
|
||||
// "No results found." fallback. Shape observed across recent payloads:
|
||||
// { type: 'web_search_call', status: 'failed', error: { message?: string } }
|
||||
// { type: 'web_search_call', status: 'failed', action: { error?: { message?: string } } }
|
||||
if (item?.status !== 'failed') return undefined
|
||||
const reason =
|
||||
(typeof item.error?.message === 'string' && item.error.message) ||
|
||||
(typeof item.action?.error?.message === 'string' &&
|
||||
item.action.error.message) ||
|
||||
(typeof item.error === 'string' && item.error) ||
|
||||
undefined
|
||||
return reason ? `Web search failed: ${reason}` : 'Web search failed.'
|
||||
}
|
||||
|
||||
function makeOutputFromCodexWebSearchResponse(
|
||||
response: Record<string, unknown>,
|
||||
query: string,
|
||||
@@ -214,18 +269,12 @@ function makeOutputFromCodexWebSearchResponse(
|
||||
|
||||
for (const item of output) {
|
||||
if (item?.type === 'web_search_call') {
|
||||
const sources = Array.isArray(item.action?.sources)
|
||||
? item.action.sources
|
||||
: []
|
||||
for (const source of sources) {
|
||||
if (typeof source?.url !== 'string' || !source.url) continue
|
||||
sourceMap.set(source.url, {
|
||||
title:
|
||||
typeof source.title === 'string' && source.title
|
||||
? source.title
|
||||
: source.url,
|
||||
url: source.url,
|
||||
})
|
||||
const failure = extractCodexWebSearchFailure(item)
|
||||
if (failure) {
|
||||
results.push(failure)
|
||||
}
|
||||
for (const source of getCodexSources(item)) {
|
||||
addCodexSource(sourceMap, source)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -235,11 +284,12 @@ function makeOutputFromCodexWebSearchResponse(
|
||||
}
|
||||
|
||||
for (const part of item.content) {
|
||||
if (part?.type === 'output_text' && typeof part.text === 'string') {
|
||||
const trimmed = part.text.trim()
|
||||
if (trimmed) {
|
||||
results.push(trimmed)
|
||||
}
|
||||
if (part?.type === 'output_text' || part?.type === 'text') {
|
||||
pushCodexTextResult(results, part.text)
|
||||
}
|
||||
|
||||
for (const source of getCodexSources(part)) {
|
||||
addCodexSource(sourceMap, source)
|
||||
}
|
||||
|
||||
const annotations = Array.isArray(part?.annotations)
|
||||
@@ -247,23 +297,13 @@ function makeOutputFromCodexWebSearchResponse(
|
||||
: []
|
||||
for (const annotation of annotations) {
|
||||
if (annotation?.type !== 'url_citation') continue
|
||||
if (typeof annotation.url !== 'string' || !annotation.url) continue
|
||||
sourceMap.set(annotation.url, {
|
||||
title:
|
||||
typeof annotation.title === 'string' && annotation.title
|
||||
? annotation.title
|
||||
: annotation.url,
|
||||
url: annotation.url,
|
||||
})
|
||||
addCodexSource(sourceMap, annotation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (results.length === 0 && typeof response.output_text === 'string') {
|
||||
const trimmed = response.output_text.trim()
|
||||
if (trimmed) {
|
||||
results.push(trimmed)
|
||||
}
|
||||
if (results.length === 0) {
|
||||
pushCodexTextResult(results, response.output_text)
|
||||
}
|
||||
|
||||
if (sourceMap.size > 0) {
|
||||
@@ -273,6 +313,10 @@ function makeOutputFromCodexWebSearchResponse(
|
||||
})
|
||||
}
|
||||
|
||||
if (results.length === 0) {
|
||||
results.push('No results found.')
|
||||
}
|
||||
|
||||
return {
|
||||
query,
|
||||
results,
|
||||
@@ -280,6 +324,10 @@ function makeOutputFromCodexWebSearchResponse(
|
||||
}
|
||||
}
|
||||
|
||||
export const __test = {
|
||||
makeOutputFromCodexWebSearchResponse,
|
||||
}
|
||||
|
||||
async function runCodexWebSearch(
|
||||
input: Input,
|
||||
signal: AbortSignal,
|
||||
@@ -457,6 +505,19 @@ function shouldUseAdapterProvider(): boolean {
|
||||
return getAvailableProviders().length > 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true when the current provider has a working native or Codex
|
||||
* web-search fallback after an adapter failure. OpenAI shim providers
|
||||
* (moonshot, minimax, nvidia-nim, openai, github, etc.) do NOT support
|
||||
* Anthropic's web_search_20250305 tool, so falling through to the native
|
||||
* path silently produces "Did 0 searches".
|
||||
*/
|
||||
function hasNativeSearchFallback(): boolean {
|
||||
if (isCodexResponsesWebSearchEnabled()) return true
|
||||
const provider = getAPIProvider()
|
||||
return provider === 'firstParty' || provider === 'vertex' || provider === 'foundry'
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tool export
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -609,6 +670,17 @@ export const WebSearchTool = buildTool({
|
||||
// Auto mode: only fall through on transient errors (network, timeout, 5xx).
|
||||
// Config / guardrail errors (SSRF, HTTPS, bad URL, etc.) must surface.
|
||||
if (!isTransientError(err)) throw err
|
||||
// No viable fallback for this provider — surface the adapter error
|
||||
// instead of falling through to a broken native path.
|
||||
if (!hasNativeSearchFallback()) {
|
||||
const provider = getAPIProvider()
|
||||
const errMsg = err instanceof Error ? err.message : String(err)
|
||||
throw new Error(
|
||||
`Web search is unavailable for provider "${provider}". ` +
|
||||
`The search adapter failed (${errMsg}). ` +
|
||||
`Try switching to a provider with built-in web search (e.g. Anthropic, Codex) or try again later.`,
|
||||
)
|
||||
}
|
||||
console.error(
|
||||
`[web-search] Adapter failed, falling through to native: ${err}`,
|
||||
)
|
||||
|
||||
@@ -1,6 +1,44 @@
|
||||
import type { SearchInput, SearchProvider } from './types.js'
|
||||
import { applyDomainFilters, type ProviderOutput } from './types.js'
|
||||
|
||||
// DuckDuckGo's HTML scraper aggressively blocks datacenter / repeat IPs with
|
||||
// an "anomaly in the request" response. When that happens we surface an
|
||||
// actionable error instead of the opaque scraper message so users know how
|
||||
// to configure a working backend.
|
||||
const DDG_ANOMALY_HINT =
|
||||
'DuckDuckGo scraping is rate-limited from this network. ' +
|
||||
'Configure a search backend with one of: ' +
|
||||
'FIRECRAWL_API_KEY, TAVILY_API_KEY, EXA_API_KEY, YOU_API_KEY, ' +
|
||||
'JINA_API_KEY, BING_API_KEY, MOJEEK_API_KEY, LINKUP_API_KEY — ' +
|
||||
'or use an Anthropic / Vertex / Foundry provider for native web search.'
|
||||
|
||||
const MAX_RETRIES = 3
|
||||
const INITIAL_BACKOFF_MS = 1000
|
||||
|
||||
function isAnomalyError(message: string): boolean {
|
||||
return /anomaly in the request|likely making requests too quickly/i.test(
|
||||
message,
|
||||
)
|
||||
}
|
||||
|
||||
function isRetryableDDGError(err: unknown): boolean {
|
||||
if (!(err instanceof Error)) return false
|
||||
const msg = err.message.toLowerCase()
|
||||
return (
|
||||
msg.includes('anomaly') ||
|
||||
msg.includes('too quickly') ||
|
||||
msg.includes('rate limit') ||
|
||||
msg.includes('timeout') ||
|
||||
msg.includes('econnreset') ||
|
||||
msg.includes('etimedout') ||
|
||||
msg.includes('econnaborted')
|
||||
)
|
||||
}
|
||||
|
||||
function sleep(ms: number): Promise<void> {
|
||||
return new Promise(r => setTimeout(r, ms))
|
||||
}
|
||||
|
||||
export const duckduckgoProvider: SearchProvider = {
|
||||
name: 'duckduckgo',
|
||||
|
||||
@@ -19,22 +57,44 @@ export const duckduckgoProvider: SearchProvider = {
|
||||
throw new Error('duck-duck-scrape package not installed. Run: npm install duck-duck-scrape')
|
||||
}
|
||||
if (signal?.aborted) throw new DOMException('Aborted', 'AbortError')
|
||||
// TODO: duck-duck-scrape doesn't accept AbortSignal — can't cancel in-flight searches
|
||||
const response = await search(input.query, { safeSearch: SafeSearchType.STRICT })
|
||||
|
||||
const hits = applyDomainFilters(
|
||||
response.results.map(r => ({
|
||||
title: r.title || r.url,
|
||||
url: r.url,
|
||||
description: r.description ?? undefined,
|
||||
})),
|
||||
input,
|
||||
)
|
||||
let lastErr: unknown
|
||||
for (let attempt = 0; attempt < MAX_RETRIES; attempt++) {
|
||||
if (signal?.aborted) throw new DOMException('Aborted', 'AbortError')
|
||||
try {
|
||||
// TODO: duck-duck-scrape doesn't accept AbortSignal — can't cancel in-flight searches
|
||||
const response = await search(input.query, { safeSearch: SafeSearchType.STRICT })
|
||||
|
||||
return {
|
||||
hits,
|
||||
providerName: 'duckduckgo',
|
||||
durationSeconds: (performance.now() - start) / 1000,
|
||||
const hits = applyDomainFilters(
|
||||
response.results.map(r => ({
|
||||
title: r.title || r.url,
|
||||
url: r.url,
|
||||
description: r.description ?? undefined,
|
||||
})),
|
||||
input,
|
||||
)
|
||||
|
||||
return {
|
||||
hits,
|
||||
providerName: 'duckduckgo',
|
||||
durationSeconds: (performance.now() - start) / 1000,
|
||||
}
|
||||
} catch (err) {
|
||||
lastErr = err
|
||||
const msg = err instanceof Error ? err.message : String(err)
|
||||
if (isAnomalyError(msg)) {
|
||||
throw new Error(DDG_ANOMALY_HINT)
|
||||
}
|
||||
if (!isRetryableDDGError(err) || attempt === MAX_RETRIES - 1) {
|
||||
throw err
|
||||
}
|
||||
// Exponential backoff with jitter: 1s, 2s, 4s +/- 20%
|
||||
const baseDelay = INITIAL_BACKOFF_MS * Math.pow(2, attempt)
|
||||
const jitter = baseDelay * 0.2 * (Math.random() * 2 - 1)
|
||||
await sleep(baseDelay + jitter)
|
||||
}
|
||||
}
|
||||
|
||||
throw lastErr
|
||||
},
|
||||
}
|
||||
|
||||
@@ -12,7 +12,12 @@ export const MODEL_CONTEXT_WINDOW_DEFAULT = 200_000
|
||||
// Fallback context window for unknown 3P models. Must be large enough that
|
||||
// the effective context (this minus output token reservation) stays positive,
|
||||
// otherwise auto-compact fires on every message (issue #635).
|
||||
export const OPENAI_FALLBACK_CONTEXT_WINDOW = 128_000
|
||||
// Override via CLAUDE_CODE_OPENAI_FALLBACK_CONTEXT_WINDOW env var to avoid
|
||||
// hardcoding when deploying models not yet in openaiContextWindows.ts.
|
||||
export const OPENAI_FALLBACK_CONTEXT_WINDOW = (() => {
|
||||
const v = parseInt(process.env.CLAUDE_CODE_OPENAI_FALLBACK_CONTEXT_WINDOW ?? '', 10)
|
||||
return !isNaN(v) && v > 0 ? v : 128_000
|
||||
})()
|
||||
|
||||
// Maximum output tokens for compact operations
|
||||
export const COMPACT_MAX_OUTPUT_TOKENS = 20_000
|
||||
|
||||
@@ -75,6 +75,13 @@ async function importHookChainsHarness(
|
||||
getAgentName: () => senderName,
|
||||
getTeamName: () => teamName,
|
||||
getTeammateColor: () => 'blue',
|
||||
// Keep parity with the real module's surface so later tests that
|
||||
// run after this file (mock.module is process-global and mock.restore
|
||||
// does not undo module mocks in Bun) do not see undefined members.
|
||||
isTeammate: () => false,
|
||||
isPlanModeRequired: () => false,
|
||||
getAgentId: () => undefined,
|
||||
getParentSessionId: () => undefined,
|
||||
}))
|
||||
|
||||
mock.module('../bridge/replBridgeHandle.js', () => ({
|
||||
|
||||
@@ -75,6 +75,7 @@ import type {
|
||||
import { isAdvisorBlock } from './advisor.js'
|
||||
import { isAgentSwarmsEnabled } from './agentSwarmsEnabled.js'
|
||||
import { count } from './array.js'
|
||||
import { isEnvTruthy } from './envUtils.js'
|
||||
import {
|
||||
type Attachment,
|
||||
type HookAttachment,
|
||||
@@ -3666,6 +3667,9 @@ Read the team config to discover your teammates' names. Check the task list peri
|
||||
])
|
||||
}
|
||||
case 'todo_reminder': {
|
||||
if (isEnvTruthy(process.env.OPENCLAUDE_DISABLE_TOOL_REMINDERS)) {
|
||||
return []
|
||||
}
|
||||
const todoItems = attachment.content
|
||||
.map((todo, index) => `${index + 1}. [${todo.status}] ${todo.content}`)
|
||||
.join('\n')
|
||||
@@ -3686,6 +3690,9 @@ Read the team config to discover your teammates' names. Check the task list peri
|
||||
if (!isTodoV2Enabled()) {
|
||||
return []
|
||||
}
|
||||
if (isEnvTruthy(process.env.OPENCLAUDE_DISABLE_TOOL_REMINDERS)) {
|
||||
return []
|
||||
}
|
||||
const taskItems = attachment.content
|
||||
.map(task => `#${task.id}. [${task.status}] ${task.subject}`)
|
||||
.join('\n')
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
import { afterEach, beforeEach, expect, test } from 'bun:test'
|
||||
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import { saveGlobalConfig } from '../config.js'
|
||||
import { getUserSpecifiedModelSetting } from './model.js'
|
||||
import {
|
||||
getDefaultHaikuModel,
|
||||
getDefaultOpusModel,
|
||||
getDefaultSonnetModel,
|
||||
getSmallFastModel,
|
||||
getUserSpecifiedModelSetting,
|
||||
} from './model.js'
|
||||
|
||||
const SAVED_ENV = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
@@ -28,6 +34,11 @@ function restoreEnv(key: keyof typeof SAVED_ENV): void {
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
// Other test files (notably modelOptions.github.test.ts) install a
|
||||
// persistent mock.module for './providers.js' that overrides getAPIProvider
|
||||
// globally. Without mock.restore() here, those overrides bleed into this
|
||||
// suite and the provider-kind branches we're testing become unreachable.
|
||||
mock.restore()
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
@@ -113,3 +124,76 @@ test('github provider still reads OPENAI_MODEL (regression guard)', () => {
|
||||
expect(model).toBe('github:copilot')
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Default model helpers — must not fall through to claude-haiku-4-5 etc. for
|
||||
// OpenAI-shim providers whose endpoints don't speak Anthropic model names.
|
||||
// Hitting that fallthrough caused WebFetch to hang for 60s on MiniMax/Codex
|
||||
// because queryHaiku() shipped an unknown model id to the shim endpoint.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('getSmallFastModel returns OPENAI_MODEL for MiniMax (regression: WebFetch hang)', () => {
|
||||
process.env.MINIMAX_API_KEY = 'minimax-test'
|
||||
process.env.OPENAI_MODEL = 'MiniMax-M2.5-highspeed'
|
||||
|
||||
expect(getSmallFastModel()).toBe('MiniMax-M2.5-highspeed')
|
||||
})
|
||||
|
||||
test('getSmallFastModel returns OPENAI_MODEL for Codex (regression)', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
||||
process.env.OPENAI_MODEL = 'codexspark'
|
||||
process.env.CODEX_API_KEY = 'codex-test'
|
||||
process.env.CHATGPT_ACCOUNT_ID = 'acct_test'
|
||||
|
||||
expect(getSmallFastModel()).toBe('codexspark')
|
||||
})
|
||||
|
||||
test('getSmallFastModel returns OPENAI_MODEL for NVIDIA NIM (regression)', () => {
|
||||
process.env.NVIDIA_NIM = '1'
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
|
||||
|
||||
expect(getSmallFastModel()).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
|
||||
})
|
||||
|
||||
test('getDefaultOpusModel returns OPENAI_MODEL for MiniMax', () => {
|
||||
process.env.MINIMAX_API_KEY = 'minimax-test'
|
||||
process.env.OPENAI_MODEL = 'MiniMax-M2.7'
|
||||
|
||||
expect(getDefaultOpusModel()).toBe('MiniMax-M2.7')
|
||||
})
|
||||
|
||||
test('getDefaultSonnetModel returns OPENAI_MODEL for NVIDIA NIM', () => {
|
||||
process.env.NVIDIA_NIM = '1'
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
|
||||
|
||||
expect(getDefaultSonnetModel()).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
|
||||
})
|
||||
|
||||
test('getDefaultHaikuModel returns OPENAI_MODEL for MiniMax', () => {
|
||||
process.env.MINIMAX_API_KEY = 'minimax-test'
|
||||
process.env.OPENAI_MODEL = 'MiniMax-M2.5-highspeed'
|
||||
|
||||
expect(getDefaultHaikuModel()).toBe('MiniMax-M2.5-highspeed')
|
||||
})
|
||||
|
||||
test('default helpers do not leak claude-* names to shim providers', () => {
|
||||
// Umbrella guard: for each OpenAI-shim provider, none of the default-model
|
||||
// helpers may return an Anthropic-branded model name. That was the source
|
||||
// of the WebFetch 60s hang — MiniMax received "claude-haiku-4-5" and sat
|
||||
// on the connection.
|
||||
process.env.MINIMAX_API_KEY = 'minimax-test'
|
||||
process.env.OPENAI_MODEL = 'MiniMax-M2.7'
|
||||
|
||||
for (const fn of [
|
||||
getSmallFastModel,
|
||||
getDefaultOpusModel,
|
||||
getDefaultSonnetModel,
|
||||
getDefaultHaikuModel,
|
||||
]) {
|
||||
const model = fn()
|
||||
expect(model.toLowerCase()).not.toContain('claude')
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -52,10 +52,25 @@ export function getSmallFastModel(): ModelName {
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o-mini'
|
||||
}
|
||||
// Codex provider — OPENAI_MODEL is always set for Codex profiles; only fall
|
||||
// back to a codex-spark alias when an override env strips it.
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'codexspark'
|
||||
}
|
||||
// For GitHub Copilot provider
|
||||
if (getAPIProvider() === 'github') {
|
||||
return process.env.OPENAI_MODEL || 'github:copilot'
|
||||
}
|
||||
// NVIDIA NIM — OPENAI_MODEL carries the user's active NIM model; use a
|
||||
// small Meta Llama variant as the conservative fallback.
|
||||
if (getAPIProvider() === 'nvidia-nim') {
|
||||
return process.env.OPENAI_MODEL || 'meta/llama-3.1-8b-instruct'
|
||||
}
|
||||
// MiniMax — OPENAI_MODEL carries the active MiniMax model; fall back to
|
||||
// the fastest tier (M2.5-highspeed) when missing.
|
||||
if (getAPIProvider() === 'minimax') {
|
||||
return process.env.OPENAI_MODEL || 'MiniMax-M2.5-highspeed'
|
||||
}
|
||||
return getDefaultHaikuModel()
|
||||
}
|
||||
|
||||
@@ -171,6 +186,14 @@ export function getDefaultOpusModel(): ModelName {
|
||||
if (getAPIProvider() === 'github') {
|
||||
return process.env.OPENAI_MODEL || 'github:copilot'
|
||||
}
|
||||
// NVIDIA NIM
|
||||
if (getAPIProvider() === 'nvidia-nim') {
|
||||
return process.env.OPENAI_MODEL || 'nvidia/llama-3.1-nemotron-70b-instruct'
|
||||
}
|
||||
// MiniMax — flagship tier for "opus"-equivalent.
|
||||
if (getAPIProvider() === 'minimax') {
|
||||
return process.env.OPENAI_MODEL || 'MiniMax-M2.7'
|
||||
}
|
||||
// 3P providers (Bedrock, Vertex, Foundry) — kept as a separate branch
|
||||
// even when values match, since 3P availability lags firstParty and
|
||||
// these will diverge again at the next model launch.
|
||||
@@ -205,6 +228,14 @@ export function getDefaultSonnetModel(): ModelName {
|
||||
if (getAPIProvider() === 'github') {
|
||||
return process.env.OPENAI_MODEL || 'github:copilot'
|
||||
}
|
||||
// NVIDIA NIM
|
||||
if (getAPIProvider() === 'nvidia-nim') {
|
||||
return process.env.OPENAI_MODEL || 'nvidia/llama-3.1-nemotron-70b-instruct'
|
||||
}
|
||||
// MiniMax — mid tier for "sonnet"-equivalent.
|
||||
if (getAPIProvider() === 'minimax') {
|
||||
return process.env.OPENAI_MODEL || 'MiniMax-M2.5'
|
||||
}
|
||||
// Default to Sonnet 4.5 for 3P since they may not have 4.6 yet
|
||||
if (getAPIProvider() !== 'firstParty') {
|
||||
return getModelStrings().sonnet45
|
||||
@@ -237,6 +268,14 @@ export function getDefaultHaikuModel(): ModelName {
|
||||
if (getAPIProvider() === 'gemini') {
|
||||
return process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite'
|
||||
}
|
||||
// NVIDIA NIM
|
||||
if (getAPIProvider() === 'nvidia-nim') {
|
||||
return process.env.OPENAI_MODEL || 'meta/llama-3.1-8b-instruct'
|
||||
}
|
||||
// MiniMax — fastest tier for "haiku"-equivalent.
|
||||
if (getAPIProvider() === 'minimax') {
|
||||
return process.env.OPENAI_MODEL || 'MiniMax-M2.5-highspeed'
|
||||
}
|
||||
|
||||
// Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex)
|
||||
return getModelStrings().haiku45
|
||||
|
||||
@@ -413,16 +413,51 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
'moonshot-v1-128k': 32_768,
|
||||
}
|
||||
|
||||
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {
|
||||
// External context-window overrides loaded once at startup.
|
||||
// Set CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS to a JSON object mapping model name
|
||||
// → context-window token count to add or override entries without editing
|
||||
// this file. Example:
|
||||
// CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS='{"my-corp/llm-v2":200000}'
|
||||
const OPENAI_EXTERNAL_CONTEXT_WINDOWS: Record<string, number> = (() => {
|
||||
try {
|
||||
const raw = process.env.CLAUDE_CODE_OPENAI_CONTEXT_WINDOWS
|
||||
if (raw) {
|
||||
const parsed = JSON.parse(raw)
|
||||
if (typeof parsed === 'object' && parsed !== null) return parsed as Record<string, number>
|
||||
}
|
||||
} catch { /* ignore malformed JSON */ }
|
||||
return {}
|
||||
})()
|
||||
|
||||
// External max-output-token overrides.
|
||||
// Set CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS to a JSON object mapping model name
|
||||
// → max output token count.
|
||||
const OPENAI_EXTERNAL_MAX_OUTPUT_TOKENS: Record<string, number> = (() => {
|
||||
try {
|
||||
const raw = process.env.CLAUDE_CODE_OPENAI_MAX_OUTPUT_TOKENS
|
||||
if (raw) {
|
||||
const parsed = JSON.parse(raw)
|
||||
if (typeof parsed === 'object' && parsed !== null) return parsed as Record<string, number>
|
||||
}
|
||||
} catch { /* ignore malformed JSON */ }
|
||||
return {}
|
||||
})()
|
||||
|
||||
function lookupByModel<T>(table: Record<string, T>, externalTable: Record<string, T>, model: string): T | undefined {
|
||||
// Try provider-qualified key first: "{OPENAI_MODEL}:{model}" so that
|
||||
// e.g. "github:copilot:claude-haiku-4.5" can have different limits than
|
||||
// a bare "claude-haiku-4.5" served by another provider.
|
||||
const providerModel = process.env.OPENAI_MODEL?.trim()
|
||||
if (providerModel && providerModel !== model) {
|
||||
const qualified = `${providerModel}:${model}`
|
||||
// External table takes precedence over the built-in table.
|
||||
const externalQualified = lookupByKey(externalTable, qualified)
|
||||
if (externalQualified !== undefined) return externalQualified
|
||||
const qualifiedResult = lookupByKey(table, qualified)
|
||||
if (qualifiedResult !== undefined) return qualifiedResult
|
||||
}
|
||||
const externalResult = lookupByKey(externalTable, model)
|
||||
if (externalResult !== undefined) return externalResult
|
||||
return lookupByKey(table, model)
|
||||
}
|
||||
|
||||
@@ -446,7 +481,7 @@ function lookupByKey<T>(table: Record<string, T>, model: string): T | undefined
|
||||
* "gpt-4o-2024-11-20" resolve to the base "gpt-4o" entry.
|
||||
*/
|
||||
export function getOpenAIContextWindow(model: string): number | undefined {
|
||||
return lookupByModel(OPENAI_CONTEXT_WINDOWS, model)
|
||||
return lookupByModel(OPENAI_CONTEXT_WINDOWS, OPENAI_EXTERNAL_CONTEXT_WINDOWS, model)
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -454,5 +489,5 @@ export function getOpenAIContextWindow(model: string): number | undefined {
|
||||
* Returns undefined if the model is not in the table.
|
||||
*/
|
||||
export function getOpenAIMaxOutputTokens(model: string): number | undefined {
|
||||
return lookupByModel(OPENAI_MAX_OUTPUT_TOKENS, model)
|
||||
return lookupByModel(OPENAI_MAX_OUTPUT_TOKENS, OPENAI_EXTERNAL_MAX_OUTPUT_TOKENS, model)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,12 @@ export function getAPIProvider(): APIProvider {
|
||||
if (isEnvTruthy(process.env.NVIDIA_NIM)) {
|
||||
return 'nvidia-nim'
|
||||
}
|
||||
if (isEnvTruthy(process.env.MINIMAX_API_KEY)) {
|
||||
// MiniMax is signalled by a real API key, not a '1'/'true' flag. Using
|
||||
// isEnvTruthy() here silently treated every MiniMax user as 'firstParty'
|
||||
// (or 'openai' once they set CLAUDE_CODE_USE_OPENAI via the profile),
|
||||
// making every provider-kind-specific branch for 'minimax' elsewhere in
|
||||
// the codebase unreachable. Presence check is the correct signal.
|
||||
if (typeof process.env.MINIMAX_API_KEY === 'string' && process.env.MINIMAX_API_KEY.trim() !== '') {
|
||||
return 'minimax'
|
||||
}
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
|
||||
Reference in New Issue
Block a user