From 25c5987276baf5049339a3193d04be76cd65b015 Mon Sep 17 00:00:00 2001 From: Rithul Kamesh Date: Thu, 2 Apr 2026 11:25:28 +0530 Subject: [PATCH 01/25] feat: add support for GitHub Models provider - Introduced environment variable CLAUDE_CODE_USE_GITHUB to enable GitHub Models. - Added checks for GITHUB_TOKEN or GH_TOKEN for authentication. - Updated base URL handling to include GitHub Models default. - Enhanced provider detection and error handling for GitHub Models. - Updated relevant functions and components to accommodate the new provider. --- docs/github-models-pr-draft.md | 24 ++ scripts/system-check.ts | 79 +++++- src/commands.ts | 2 + src/commands/onboard-github/index.ts | 11 + .../onboard-github/onboard-github.tsx | 228 ++++++++++++++++++ src/components/StartupScreen.ts | 8 + src/entrypoints/cli.tsx | 37 ++- src/main.tsx | 6 +- src/services/api/client.ts | 5 +- src/services/api/openaiShim.ts | 117 +++++++-- .../api/providerConfig.github.test.ts | 41 ++++ src/services/api/providerConfig.ts | 26 +- src/services/github/deviceFlow.test.ts | 94 ++++++++ src/services/github/deviceFlow.ts | 174 +++++++++++++ src/utils/auth.ts | 8 +- src/utils/context.ts | 8 +- .../githubModelsCredentials.hydrate.test.ts | 66 +++++ src/utils/githubModelsCredentials.test.ts | 47 ++++ src/utils/githubModelsCredentials.ts | 73 ++++++ src/utils/managedEnvConstants.ts | 2 + src/utils/model/providers.test.ts | 12 + src/utils/model/providers.ts | 29 ++- src/utils/providerProfile.ts | 2 + src/utils/swarm/spawnUtils.ts | 12 + 24 files changed, 1069 insertions(+), 42 deletions(-) create mode 100644 docs/github-models-pr-draft.md create mode 100644 src/commands/onboard-github/index.ts create mode 100644 src/commands/onboard-github/onboard-github.tsx create mode 100644 src/services/api/providerConfig.github.test.ts create mode 100644 src/services/github/deviceFlow.test.ts create mode 100644 src/services/github/deviceFlow.ts create mode 100644 src/utils/githubModelsCredentials.hydrate.test.ts create mode 100644 src/utils/githubModelsCredentials.test.ts create mode 100644 src/utils/githubModelsCredentials.ts diff --git a/docs/github-models-pr-draft.md b/docs/github-models-pr-draft.md new file mode 100644 index 00000000..21fa7fa8 --- /dev/null +++ b/docs/github-models-pr-draft.md @@ -0,0 +1,24 @@ +# GitHub Models + onboard — PR draft (paste into GitHub) + +**Title:** `feat: GitHub Models provider + interactive onboard (keychain-backed)` + +**Body:** + +## Summary + +- Adds GitHub Models (`models.github.ai`) as an OpenAI-compatible backend via `CLAUDE_CODE_USE_GITHUB` (see existing shim changes). +- Adds `/onboard-github`: interactive Ink flow for GitHub Device Login or PAT, stores token in OS-backed secure storage (macOS Keychain when available, else `~/.claude/.credentials.json`), and writes user settings `env` so no `export GITHUB_TOKEN` is required. +- Applies user settings before provider env validation and hydrates `GITHUB_TOKEN` from secure storage when the GitHub provider flag is on. + +## How to test + +1. Run `openclaude` and execute `/onboard-github` (or launch via command registration). +2. Complete device flow or paste a PAT with Models access. +3. Restart CLI; confirm `CLAUDE_CODE_USE_GITHUB=1` in `~/.claude/settings.json` (or merged file) and that inference works without exporting `GITHUB_TOKEN`. +4. `bun test` (new suites) + `bun run build`. + +## Notes / follow-ups + +- Device flow OAuth app client ID is configurable via `GITHUB_DEVICE_FLOW_CLIENT_ID`; verify scope list against current GitHub Models documentation. +- `/logout` currently deletes all secure storage; GitHub token is cleared too — document or narrow in a follow-up. +- Linux: secure storage is plaintext with chmod 600 today; libsecret is still TODO in `secureStorage`. diff --git a/scripts/system-check.ts b/scripts/system-check.ts index e129685a..6626149a 100644 --- a/scripts/system-check.ts +++ b/scripts/system-check.ts @@ -93,11 +93,15 @@ function isLocalBaseUrl(baseUrl: string): boolean { } const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai' +const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference' function currentBaseUrl(): string { if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) { return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL } + if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) { + return process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE + } return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1' } @@ -126,15 +130,47 @@ function checkGeminiEnv(): CheckResult[] { return results } +function checkGithubEnv(): CheckResult[] { + const results: CheckResult[] = [] + const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE + results.push(pass('Provider mode', 'GitHub Models provider enabled.')) + + const token = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN + if (!token?.trim()) { + results.push(fail('GITHUB_TOKEN', 'Missing. Set GITHUB_TOKEN or GH_TOKEN.')) + } else { + results.push(pass('GITHUB_TOKEN', 'Configured.')) + } + + if (!process.env.OPENAI_MODEL) { + results.push( + pass( + 'OPENAI_MODEL', + 'Not set. Default github:copilot → openai/gpt-4.1 at runtime.', + ), + ) + } else { + results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL)) + } + + results.push(pass('OPENAI_BASE_URL', baseUrl)) + return results +} + function checkOpenAIEnv(): CheckResult[] { const results: CheckResult[] = [] const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) + const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) if (useGemini) { return checkGeminiEnv() } + if (useGithub && !useOpenAI) { + return checkGithubEnv() + } + if (!useOpenAI) { results.push(pass('Provider mode', 'Anthropic login flow enabled (CLAUDE_CODE_USE_OPENAI is off).')) return results @@ -181,10 +217,19 @@ function checkOpenAIEnv(): CheckResult[] { } const key = process.env.OPENAI_API_KEY + const githubToken = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN if (key === 'SUA_CHAVE') { results.push(fail('OPENAI_API_KEY', 'Placeholder value detected: SUA_CHAVE.')) - } else if (!key && !isLocalBaseUrl(request.baseUrl)) { + } else if ( + !key && + !isLocalBaseUrl(request.baseUrl) && + !(useGithub && githubToken?.trim()) + ) { results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.')) + } else if (!key && useGithub && githubToken?.trim()) { + results.push( + pass('OPENAI_API_KEY', 'Not set; GITHUB_TOKEN/GH_TOKEN will be used for GitHub Models.'), + ) } else if (!key) { results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).')) } else { @@ -197,11 +242,19 @@ function checkOpenAIEnv(): CheckResult[] { async function checkBaseUrlReachability(): Promise { const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) + const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) - if (!useGemini && !useOpenAI) { + if (!useGemini && !useOpenAI && !useGithub) { return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).') } + if (useGithub) { + return pass( + 'Provider reachability', + 'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).', + ) + } + const geminiBaseUrl = 'https://generativelanguage.googleapis.com/v1beta/openai' const resolvedBaseUrl = useGemini ? (process.env.GEMINI_BASE_URL ?? geminiBaseUrl) @@ -272,7 +325,11 @@ async function checkBaseUrlReachability(): Promise { } function checkOllamaProcessorMode(): CheckResult { - if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) { + if ( + !isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || + isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ) { return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).') } @@ -319,6 +376,22 @@ function serializeSafeEnvSummary(): Record { GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY), } } + if ( + isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) && + !isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) + ) { + return { + CLAUDE_CODE_USE_GITHUB: true, + OPENAI_MODEL: + process.env.OPENAI_MODEL ?? + '(unset, default: github:copilot → openai/gpt-4.1)', + OPENAI_BASE_URL: + process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE, + GITHUB_TOKEN_SET: Boolean( + process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN, + ), + } + } const request = resolveProviderRequest({ model: process.env.OPENAI_MODEL, baseUrl: process.env.OPENAI_BASE_URL, diff --git a/src/commands.ts b/src/commands.ts index 10f03b22..fe15aa2b 100644 --- a/src/commands.ts +++ b/src/commands.ts @@ -19,6 +19,7 @@ import cost from './commands/cost/index.js' import diff from './commands/diff/index.js' import ctx_viz from './commands/ctx_viz/index.js' import doctor from './commands/doctor/index.js' +import onboardGithub from './commands/onboard-github/index.js' import memory from './commands/memory/index.js' import help from './commands/help/index.js' import ide from './commands/ide/index.js' @@ -288,6 +289,7 @@ const COMMANDS = memoize((): Command[] => [ memory, mobile, model, + onboardGithub, outputStyle, remoteEnv, plugin, diff --git a/src/commands/onboard-github/index.ts b/src/commands/onboard-github/index.ts new file mode 100644 index 00000000..91d67247 --- /dev/null +++ b/src/commands/onboard-github/index.ts @@ -0,0 +1,11 @@ +import type { Command } from '../../commands.js' + +const onboardGithub: Command = { + name: 'onboard-github', + description: + 'Interactive setup for GitHub Models: device login or PAT, saved to secure storage', + type: 'local-jsx', + load: () => import('./onboard-github.js'), +} + +export default onboardGithub diff --git a/src/commands/onboard-github/onboard-github.tsx b/src/commands/onboard-github/onboard-github.tsx new file mode 100644 index 00000000..26088392 --- /dev/null +++ b/src/commands/onboard-github/onboard-github.tsx @@ -0,0 +1,228 @@ +import * as React from 'react' +import { useCallback, useState } from 'react' +import { Select } from '../../components/CustomSelect/select.js' +import { Spinner } from '../../components/Spinner.js' +import TextInput from '../../components/TextInput.js' +import { Box, Text } from '../../ink.js' +import { + openVerificationUri, + pollAccessToken, + requestDeviceCode, +} from '../../services/github/deviceFlow.js' +import type { LocalJSXCommandCall } from '../../types/command.js' +import { + hydrateGithubModelsTokenFromSecureStorage, + saveGithubModelsToken, +} from '../../utils/githubModelsCredentials.js' +import { updateSettingsForSource } from '../../utils/settings/settings.js' + +const DEFAULT_MODEL = 'github:copilot' + +type Step = + | 'menu' + | 'device-busy' + | 'pat' + | 'error' + +function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } { + const { error } = updateSettingsForSource('userSettings', { + env: { + CLAUDE_CODE_USE_GITHUB: '1', + OPENAI_MODEL: model, + }, + }) + if (error) { + return { ok: false, detail: error.message } + } + return { ok: true } +} + +function OnboardGithub(props: { + onDone: Parameters[0] + onChangeAPIKey: () => void +}): React.ReactNode { + const { onDone, onChangeAPIKey } = props + const [step, setStep] = useState('menu') + const [errorMsg, setErrorMsg] = useState(null) + const [deviceHint, setDeviceHint] = useState<{ + user_code: string + verification_uri: string + } | null>(null) + const [patDraft, setPatDraft] = useState('') + + const finalize = useCallback( + async (token: string, model: string = DEFAULT_MODEL) => { + const saved = saveGithubModelsToken(token) + if (!saved.success) { + setErrorMsg(saved.warning ?? 'Could not save token to secure storage.') + setStep('error') + return + } + const merged = mergeUserSettingsEnv(model.trim() || DEFAULT_MODEL) + if (!merged.ok) { + setErrorMsg( + `Token saved, but settings were not updated: ${merged.detail ?? 'unknown error'}. ` + + `Add env CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL to ~/.claude/settings.json manually.`, + ) + setStep('error') + return + } + process.env.CLAUDE_CODE_USE_GITHUB = '1' + process.env.OPENAI_MODEL = model.trim() || DEFAULT_MODEL + hydrateGithubModelsTokenFromSecureStorage() + onChangeAPIKey() + onDone( + 'GitHub Models onboard complete. Token stored in secure storage; user settings updated. Restart if the model does not switch.', + { display: 'user' }, + ) + }, + [onChangeAPIKey, onDone], + ) + + const runDeviceFlow = useCallback(async () => { + setStep('device-busy') + setErrorMsg(null) + setDeviceHint(null) + try { + const device = await requestDeviceCode() + setDeviceHint({ + user_code: device.user_code, + verification_uri: device.verification_uri, + }) + await openVerificationUri(device.verification_uri) + const token = await pollAccessToken(device.device_code, { + initialInterval: device.interval, + timeoutSeconds: device.expires_in, + }) + await finalize(token, DEFAULT_MODEL) + } catch (e) { + setErrorMsg(e instanceof Error ? e.message : String(e)) + setStep('error') + } + }, [finalize]) + + if (step === 'error' && errorMsg) { + const options = [ + { + label: 'Back to menu', + value: 'back' as const, + }, + { + label: 'Exit', + value: 'exit' as const, + }, + ] + return ( + + {errorMsg} + { + if (v === 'cancel') { + onDone('GitHub onboard cancelled', { display: 'system' }) + return + } + if (v === 'pat') { + setStep('pat') + return + } + void runDeviceFlow() + }} + /> + + ) +} + +export const call: LocalJSXCommandCall = async (onDone, context) => { + return ( + + ) +} diff --git a/src/components/StartupScreen.ts b/src/components/StartupScreen.ts index ded4f457..b20d26c1 100644 --- a/src/components/StartupScreen.ts +++ b/src/components/StartupScreen.ts @@ -80,6 +80,7 @@ const LOGO_CLAUDE = [ function detectProvider(): { name: string; model: string; baseUrl: string; isLocal: boolean } { const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true' + const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true' const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true' if (useGemini) { @@ -88,6 +89,13 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc return { name: 'Google Gemini', model, baseUrl, isLocal: false } } + if (useGithub) { + const model = process.env.OPENAI_MODEL || 'github:copilot' + const baseUrl = + process.env.OPENAI_BASE_URL || 'https://models.github.ai/inference' + return { name: 'GitHub Models', model, baseUrl, isLocal: false } + } + if (useOpenAI) { const model = process.env.OPENAI_MODEL || 'gpt-4o' const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' diff --git a/src/entrypoints/cli.tsx b/src/entrypoints/cli.tsx index 71adb260..629dd7db 100644 --- a/src/entrypoints/cli.tsx +++ b/src/entrypoints/cli.tsx @@ -46,7 +46,22 @@ function isLocalProviderUrl(baseUrl: string | undefined): boolean { } function validateProviderEnvOrExit(): void { - if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) { + const useOpenAI = isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) + const useGithub = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + + if (useGithub && !useOpenAI) { + const token = + (process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()) ?? '' + if (!token) { + console.error( + 'GITHUB_TOKEN or GH_TOKEN is required when CLAUDE_CODE_USE_GITHUB=1.', + ) + process.exit(1) + } + return + } + + if (!useOpenAI) { return } @@ -77,8 +92,15 @@ function validateProviderEnvOrExit(): void { } if (!process.env.OPENAI_API_KEY && !isLocalProviderUrl(request.baseUrl)) { - console.error('OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local.') - process.exit(1) + const hasGithubToken = !!( + process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim() + ) + if (!(useGithub && hasGithubToken)) { + console.error( + 'OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local. When CLAUDE_CODE_USE_GITHUB=1, GITHUB_TOKEN or GH_TOKEN may be used instead.', + ) + process.exit(1) + } } } @@ -98,6 +120,15 @@ async function main(): Promise { return; } + { + const { enableConfigs } = await import('../utils/config.js') + enableConfigs() + const { applySafeConfigEnvironmentVariables } = await import('../utils/managedEnv.js') + applySafeConfigEnvironmentVariables() + const { hydrateGithubModelsTokenFromSecureStorage } = await import('../utils/githubModelsCredentials.js') + hydrateGithubModelsTokenFromSecureStorage() + } + validateProviderEnvOrExit() // Print the gradient startup screen before the Ink UI loads diff --git a/src/main.tsx b/src/main.tsx index 07a3a3d2..a08f5899 100644 --- a/src/main.tsx +++ b/src/main.tsx @@ -2313,7 +2313,11 @@ async function run(): Promise { errors } = getSettingsWithErrors(); const nonMcpErrors = errors.filter(e => !e.mcpErrorMetadata); - if (nonMcpErrors.length > 0 && !isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) { + if ( + nonMcpErrors.length > 0 && + !isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) && + !isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ) { await launchInvalidSettingsDialog(root, { settingsErrors: nonMcpErrors, onExit: () => gracefulShutdownSync(1) diff --git a/src/services/api/client.ts b/src/services/api/client.ts index 493f4d73..ee50e35c 100644 --- a/src/services/api/client.ts +++ b/src/services/api/client.ts @@ -154,7 +154,10 @@ export async function getAnthropicClient({ fetch: resolvedFetch, }), } - if (isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) { + if ( + isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ) { const { createOpenAIShimClient } = await import('./openaiShim.js') return createOpenAIShimClient({ defaultHeaders, diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 9b77d07e..f13d2f15 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -14,8 +14,15 @@ * OPENAI_BASE_URL=http://... — base URL (default: https://api.openai.com/v1) * OPENAI_MODEL=gpt-4o — default model override * CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark + * + * GitHub Models (models.github.ai), OpenAI-compatible: + * CLAUDE_CODE_USE_GITHUB=1 — enable GitHub inference (no need for USE_OPENAI) + * GITHUB_TOKEN or GH_TOKEN — PAT with models access (mapped to Bearer auth) + * OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs */ +import { isEnvTruthy } from '../../utils/envUtils.js' +import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js' import { codexStreamToAnthropic, collectCodexCompletedResponse, @@ -30,6 +37,25 @@ import { resolveProviderRequest, } from './providerConfig.js' +const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference' +const GITHUB_API_VERSION = '2022-11-28' +const GITHUB_429_MAX_RETRIES = 3 +const GITHUB_429_BASE_DELAY_SEC = 1 +const GITHUB_429_MAX_DELAY_SEC = 32 + +function isGithubModelsMode(): boolean { + return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) +} + +function formatRetryAfterHint(response: Response): string { + const ra = response.headers.get('retry-after') + return ra ? ` (Retry-After: ${ra})` : '' +} + +function sleepMs(ms: number): Promise { + return new Promise(resolve => setTimeout(resolve, ms)) +} + // --------------------------------------------------------------------------- // Types — minimal subset of Anthropic SDK types we need to produce // --------------------------------------------------------------------------- @@ -254,9 +280,7 @@ function normalizeSchemaForOpenAI( function convertTools( tools: Array<{ name: string; description?: string; input_schema?: Record }>, ): OpenAITool[] { - const isGemini = - process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' + const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) return tools .filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI @@ -666,6 +690,12 @@ class OpenAIShimMessages { body.stream_options = { include_usage: true } } + const isGithub = isGithubModelsMode() + if (isGithub && body.max_completion_tokens !== undefined) { + body.max_tokens = body.max_completion_tokens + delete body.max_completion_tokens + } + if (params.temperature !== undefined) body.temperature = params.temperature if (params.top_p !== undefined) body.top_p = params.top_p @@ -715,6 +745,11 @@ class OpenAIShimMessages { } } + if (isGithub) { + headers.Accept = 'application/vnd.github.v3+json' + headers['X-GitHub-Api-Version'] = GITHUB_API_VERSION + } + // Build the chat completions URL // Azure Cognitive Services / Azure OpenAI require a deployment-specific path // and an api-version query parameter. @@ -737,19 +772,42 @@ class OpenAIShimMessages { chatCompletionsUrl = `${request.baseUrl}/chat/completions` } - const response = await fetch(chatCompletionsUrl, { - method: 'POST', + const fetchInit = { + method: 'POST' as const, headers, body: JSON.stringify(body), signal: options?.signal, - }) - - if (!response.ok) { - const errorBody = await response.text().catch(() => 'unknown error') - throw new Error(`OpenAI API error ${response.status}: ${errorBody}`) } - return response + const maxAttempts = isGithub ? GITHUB_429_MAX_RETRIES : 1 + let response: Response | undefined + for (let attempt = 0; attempt < maxAttempts; attempt++) { + response = await fetch(chatCompletionsUrl, fetchInit) + if (response.ok) { + return response + } + if ( + isGithub && + response.status === 429 && + attempt < maxAttempts - 1 + ) { + await response.text().catch(() => {}) + const delaySec = Math.min( + GITHUB_429_BASE_DELAY_SEC * 2 ** attempt, + GITHUB_429_MAX_DELAY_SEC, + ) + await sleepMs(delaySec * 1000) + continue + } + const errorBody = await response.text().catch(() => 'unknown error') + const rateHint = + isGithub && response.status === 429 ? formatRetryAfterHint(response) : '' + throw new Error( + `OpenAI API error ${response.status}: ${errorBody}${rateHint}`, + ) + } + + throw new Error('OpenAI shim: request loop exited unexpectedly') } private _convertNonStreamingResponse( @@ -759,7 +817,10 @@ class OpenAIShimMessages { choices?: Array<{ message?: { role?: string - content?: string | null + content?: + | string + | null + | Array<{ type?: string; text?: string }> tool_calls?: Array<{ id: string function: { name: string; arguments: string } @@ -778,8 +839,25 @@ class OpenAIShimMessages { const choice = data.choices?.[0] const content: Array> = [] - if (choice?.message?.content) { - content.push({ type: 'text', text: choice.message.content }) + const rawContent = choice?.message?.content + if (typeof rawContent === 'string' && rawContent) { + content.push({ type: 'text', text: rawContent }) + } else if (Array.isArray(rawContent) && rawContent.length > 0) { + const parts: string[] = [] + for (const part of rawContent) { + if ( + part && + typeof part === 'object' && + part.type === 'text' && + typeof part.text === 'string' + ) { + parts.push(part.text) + } + } + const joined = parts.join('\n') + if (joined) { + content.push({ type: 'text', text: joined }) + } } if (choice?.message?.tool_calls) { @@ -838,12 +916,11 @@ export function createOpenAIShimClient(options: { maxRetries?: number timeout?: number }): unknown { + hydrateGithubModelsTokenFromSecureStorage() + // When Gemini provider is active, map Gemini env vars to OpenAI-compatible ones // so the existing providerConfig.ts infrastructure picks them up correctly. - if ( - process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' - ) { + if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) { process.env.OPENAI_BASE_URL ??= process.env.GEMINI_BASE_URL ?? 'https://generativelanguage.googleapis.com/v1beta/openai' @@ -852,6 +929,10 @@ export function createOpenAIShimClient(options: { if (process.env.GEMINI_MODEL && !process.env.OPENAI_MODEL) { process.env.OPENAI_MODEL = process.env.GEMINI_MODEL } + } else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) { + process.env.OPENAI_BASE_URL ??= GITHUB_MODELS_DEFAULT_BASE + process.env.OPENAI_API_KEY ??= + process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? '' } const beta = new OpenAIShimBeta({ diff --git a/src/services/api/providerConfig.github.test.ts b/src/services/api/providerConfig.github.test.ts new file mode 100644 index 00000000..6177a9c6 --- /dev/null +++ b/src/services/api/providerConfig.github.test.ts @@ -0,0 +1,41 @@ +import { afterEach, expect, test } from 'bun:test' + +import { + DEFAULT_GITHUB_MODELS_API_MODEL, + normalizeGithubModelsApiModel, + resolveProviderRequest, +} from './providerConfig.js' + +const originalUseGithub = process.env.CLAUDE_CODE_USE_GITHUB + +afterEach(() => { + if (originalUseGithub === undefined) { + delete process.env.CLAUDE_CODE_USE_GITHUB + } else { + process.env.CLAUDE_CODE_USE_GITHUB = originalUseGithub + } +}) + +test.each([ + ['copilot', DEFAULT_GITHUB_MODELS_API_MODEL], + ['github:copilot', DEFAULT_GITHUB_MODELS_API_MODEL], + ['', DEFAULT_GITHUB_MODELS_API_MODEL], + ['github:gpt-4o', 'gpt-4o'], + ['gpt-4o', 'gpt-4o'], + ['github:copilot?reasoning=high', DEFAULT_GITHUB_MODELS_API_MODEL], +] as const)('normalizeGithubModelsApiModel(%s) -> %s', (input, expected) => { + expect(normalizeGithubModelsApiModel(input)).toBe(expected) +}) + +test('resolveProviderRequest applies GitHub normalization when CLAUDE_CODE_USE_GITHUB=1', () => { + process.env.CLAUDE_CODE_USE_GITHUB = '1' + const r = resolveProviderRequest({ model: 'github:gpt-4o' }) + expect(r.resolvedModel).toBe('gpt-4o') + expect(r.transport).toBe('chat_completions') +}) + +test('resolveProviderRequest leaves model unchanged without GitHub flag', () => { + delete process.env.CLAUDE_CODE_USE_GITHUB + const r = resolveProviderRequest({ model: 'github:gpt-4o' }) + expect(r.resolvedModel).toBe('github:gpt-4o') +}) diff --git a/src/services/api/providerConfig.ts b/src/services/api/providerConfig.ts index b197d785..bbbc2cb9 100644 --- a/src/services/api/providerConfig.ts +++ b/src/services/api/providerConfig.ts @@ -2,8 +2,12 @@ import { existsSync, readFileSync } from 'node:fs' import { homedir } from 'node:os' import { join } from 'node:path' +import { isEnvTruthy } from '../../utils/envUtils.js' + export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1' export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex' +/** Default GitHub Models API model when user selects copilot / github:copilot */ +export const DEFAULT_GITHUB_MODELS_API_MODEL = 'openai/gpt-4.1' const CODEX_ALIAS_MODELS: Record< string, @@ -171,6 +175,20 @@ export function isCodexBaseUrl(baseUrl: string | undefined): boolean { } } +/** + * Normalize user model string for GitHub Models inference (models.github.ai). + * Mirrors runtime devsper `github._normalize_model_id`. + */ +export function normalizeGithubModelsApiModel(requestedModel: string): string { + const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel + const segment = + noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim() + if (!segment || segment.toLowerCase() === 'copilot') { + return DEFAULT_GITHUB_MODELS_API_MODEL + } + return segment +} + export function resolveProviderRequest(options?: { model?: string baseUrl?: string @@ -192,10 +210,16 @@ export function resolveProviderRequest(options?: { ? 'codex_responses' : 'chat_completions' + const resolvedModel = + transport === 'chat_completions' && + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ? normalizeGithubModelsApiModel(requestedModel) + : descriptor.baseModel + return { transport, requestedModel, - resolvedModel: descriptor.baseModel, + resolvedModel, baseUrl: (rawBaseUrl ?? (transport === 'codex_responses' diff --git a/src/services/github/deviceFlow.test.ts b/src/services/github/deviceFlow.test.ts new file mode 100644 index 00000000..4b7ce584 --- /dev/null +++ b/src/services/github/deviceFlow.test.ts @@ -0,0 +1,94 @@ +import { afterEach, describe, expect, mock, test } from 'bun:test' + +import { + GitHubDeviceFlowError, + pollAccessToken, + requestDeviceCode, +} from './deviceFlow.js' + +describe('requestDeviceCode', () => { + const originalFetch = globalThis.fetch + + afterEach(() => { + globalThis.fetch = originalFetch + }) + + test('parses successful device code response', async () => { + globalThis.fetch = mock(() => + Promise.resolve( + new Response( + JSON.stringify({ + device_code: 'abc', + user_code: 'ABCD-1234', + verification_uri: 'https://github.com/login/device', + expires_in: 600, + interval: 5, + }), + { status: 200 }, + ), + ), + ) + + const r = await requestDeviceCode({ + clientId: 'test-client', + fetchImpl: globalThis.fetch, + }) + expect(r.device_code).toBe('abc') + expect(r.user_code).toBe('ABCD-1234') + expect(r.verification_uri).toBe('https://github.com/login/device') + expect(r.expires_in).toBe(600) + expect(r.interval).toBe(5) + }) + + test('throws on HTTP error', async () => { + globalThis.fetch = mock(() => + Promise.resolve(new Response('bad', { status: 500 })), + ) + await expect( + requestDeviceCode({ clientId: 'x', fetchImpl: globalThis.fetch }), + ).rejects.toThrow(GitHubDeviceFlowError) + }) +}) + +describe('pollAccessToken', () => { + const originalFetch = globalThis.fetch + + afterEach(() => { + globalThis.fetch = originalFetch + }) + + test('returns token when GitHub responds with access_token immediately', async () => { + let calls = 0 + globalThis.fetch = mock(() => { + calls++ + return Promise.resolve( + new Response(JSON.stringify({ access_token: 'tok-xyz' }), { + status: 200, + }), + ) + }) + + const token = await pollAccessToken('dev-code', { + clientId: 'cid', + fetchImpl: globalThis.fetch, + }) + expect(token).toBe('tok-xyz') + expect(calls).toBe(1) + }) + + test('throws on access_denied', async () => { + globalThis.fetch = mock(() => + Promise.resolve( + new Response(JSON.stringify({ error: 'access_denied' }), { + status: 200, + }), + ), + ) + await expect( + pollAccessToken('dc', { + clientId: 'c', + fetchImpl: globalThis.fetch, + }), + ).rejects.toThrow(/denied/) + }) +}) diff --git a/src/services/github/deviceFlow.ts b/src/services/github/deviceFlow.ts new file mode 100644 index 00000000..0e207b7f --- /dev/null +++ b/src/services/github/deviceFlow.ts @@ -0,0 +1,174 @@ +/** + * GitHub OAuth device flow for CLI login (https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps#device-flow). + */ + +import { execFileNoThrow } from '../../utils/execFileNoThrow.js' + +export const DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID = 'Ov23liXjWSSui6QIahPl' + +export const GITHUB_DEVICE_CODE_URL = 'https://github.com/login/device/code' +export const GITHUB_DEVICE_ACCESS_TOKEN_URL = + 'https://github.com/login/oauth/access_token' + +/** Match runtime devsper github_oauth DEFAULT_SCOPE */ +export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user' + +export class GitHubDeviceFlowError extends Error { + constructor(message: string) { + super(message) + this.name = 'GitHubDeviceFlowError' + } +} + +export type DeviceCodeResult = { + device_code: string + user_code: string + verification_uri: string + expires_in: number + interval: number +} + +export function getGithubDeviceFlowClientId(): string { + return ( + process.env.GITHUB_DEVICE_FLOW_CLIENT_ID?.trim() || + DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID + ) +} + +function sleep(ms: number): Promise { + return new Promise(resolve => setTimeout(resolve, ms)) +} + +export async function requestDeviceCode(options?: { + clientId?: string + scope?: string + fetchImpl?: typeof fetch +}): Promise { + const clientId = options?.clientId ?? getGithubDeviceFlowClientId() + if (!clientId) { + throw new GitHubDeviceFlowError( + 'No OAuth client ID: set GITHUB_DEVICE_FLOW_CLIENT_ID or paste a PAT instead.', + ) + } + const fetchFn = options?.fetchImpl ?? fetch + const res = await fetchFn(GITHUB_DEVICE_CODE_URL, { + method: 'POST', + headers: { Accept: 'application/json' }, + body: new URLSearchParams({ + client_id: clientId, + scope: options?.scope ?? DEFAULT_GITHUB_DEVICE_SCOPE, + }), + }) + if (!res.ok) { + const text = await res.text().catch(() => '') + throw new GitHubDeviceFlowError( + `Device code request failed: ${res.status} ${text}`, + ) + } + const data = (await res.json()) as Record + const device_code = data.device_code + const user_code = data.user_code + const verification_uri = data.verification_uri + if ( + typeof device_code !== 'string' || + typeof user_code !== 'string' || + typeof verification_uri !== 'string' + ) { + throw new GitHubDeviceFlowError('Malformed device code response from GitHub') + } + return { + device_code, + user_code, + verification_uri, + expires_in: typeof data.expires_in === 'number' ? data.expires_in : 900, + interval: typeof data.interval === 'number' ? data.interval : 5, + } +} + +export type PollOptions = { + clientId?: string + initialInterval?: number + timeoutSeconds?: number + fetchImpl?: typeof fetch +} + +export async function pollAccessToken( + deviceCode: string, + options?: PollOptions, +): Promise { + const clientId = options?.clientId ?? getGithubDeviceFlowClientId() + if (!clientId) { + throw new GitHubDeviceFlowError('client_id required for polling') + } + let interval = Math.max(1, options?.initialInterval ?? 5) + const timeoutSeconds = options?.timeoutSeconds ?? 900 + const fetchFn = options?.fetchImpl ?? fetch + const start = Date.now() + + while ((Date.now() - start) / 1000 < timeoutSeconds) { + const res = await fetchFn(GITHUB_DEVICE_ACCESS_TOKEN_URL, { + method: 'POST', + headers: { Accept: 'application/json' }, + body: new URLSearchParams({ + client_id: clientId, + device_code: deviceCode, + grant_type: 'urn:ietf:params:oauth:grant-type:device_code', + }), + }) + if (!res.ok) { + const text = await res.text().catch(() => '') + throw new GitHubDeviceFlowError( + `Token request failed: ${res.status} ${text}`, + ) + } + const data = (await res.json()) as Record + const err = data.error as string | undefined + if (err == null) { + const token = data.access_token + if (typeof token === 'string' && token) { + return token + } + throw new GitHubDeviceFlowError('No access_token in response') + } + if (err === 'authorization_pending') { + await sleep(interval * 1000) + continue + } + if (err === 'slow_down') { + interval = + typeof data.interval === 'number' ? data.interval : interval + 5 + await sleep(interval * 1000) + continue + } + if (err === 'expired_token') { + throw new GitHubDeviceFlowError( + 'Device code expired. Start the login flow again.', + ) + } + if (err === 'access_denied') { + throw new GitHubDeviceFlowError('Authorization was denied or cancelled.') + } + throw new GitHubDeviceFlowError(`GitHub OAuth error: ${err}`) + } + throw new GitHubDeviceFlowError('Timed out waiting for authorization.') +} + +/** + * Best-effort open browser / OS handler for the verification URL. + */ +export async function openVerificationUri(uri: string): Promise { + try { + if (process.platform === 'darwin') { + await execFileNoThrow('open', [uri], { useCwd: false, timeout: 5000 }) + } else if (process.platform === 'win32') { + await execFileNoThrow('cmd', ['/c', 'start', '', uri], { + useCwd: false, + timeout: 5000, + }) + } else { + await execFileNoThrow('xdg-open', [uri], { useCwd: false, timeout: 5000 }) + } + } catch { + // User can open the URL manually + } +} diff --git a/src/utils/auth.ts b/src/utils/auth.ts index b1cd024e..37d1ca1f 100644 --- a/src/utils/auth.ts +++ b/src/utils/auth.ts @@ -117,7 +117,8 @@ export function isAnthropicAuthEnabled(): boolean { isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) || isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) || isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || - isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) + isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) // Check if user has configured an external API key source // This allows externally-provided API keys to work (without requiring proxy configuration) @@ -1731,14 +1732,15 @@ export function getSubscriptionName(): string { } } -/** Check if using third-party services (Bedrock or Vertex or Foundry or OpenAI-compatible or Gemini) */ +/** Check if using third-party services (Bedrock or Vertex or Foundry or OpenAI-compatible or Gemini or GitHub Models) */ export function isUsing3PServices(): boolean { return !!( isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) || isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) || isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) || isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || - isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) + isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ) } diff --git a/src/utils/context.ts b/src/utils/context.ts index f13b2b0a..7dba02b7 100644 --- a/src/utils/context.ts +++ b/src/utils/context.ts @@ -77,7 +77,9 @@ export function getContextWindowForModel( process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true' || process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' + process.env.CLAUDE_CODE_USE_GEMINI === 'true' || + process.env.CLAUDE_CODE_USE_GITHUB === '1' || + process.env.CLAUDE_CODE_USE_GITHUB === 'true' ) { const openaiWindow = getOpenAIContextWindow(model) if (openaiWindow !== undefined) { @@ -181,7 +183,9 @@ export function getModelMaxOutputTokens(model: string): { process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true' || process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' + process.env.CLAUDE_CODE_USE_GEMINI === 'true' || + process.env.CLAUDE_CODE_USE_GITHUB === '1' || + process.env.CLAUDE_CODE_USE_GITHUB === 'true' ) { const openaiMax = getOpenAIMaxOutputTokens(model) if (openaiMax !== undefined) { diff --git a/src/utils/githubModelsCredentials.hydrate.test.ts b/src/utils/githubModelsCredentials.hydrate.test.ts new file mode 100644 index 00000000..23b0a5ee --- /dev/null +++ b/src/utils/githubModelsCredentials.hydrate.test.ts @@ -0,0 +1,66 @@ +/** + * Hydrate tests live in a separate file with no static import of + * githubModelsCredentials so Bun's mock.module can replace secureStorage + * before that module is first loaded. + */ +import { afterEach, describe, expect, mock, test } from 'bun:test' + +describe('hydrateGithubModelsTokenFromSecureStorage', () => { + const orig = { + CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, + GITHUB_TOKEN: process.env.GITHUB_TOKEN, + GH_TOKEN: process.env.GH_TOKEN, + CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE, + } + + afterEach(() => { + mock.restore() + for (const [k, v] of Object.entries(orig)) { + if (v === undefined) { + delete process.env[k as keyof typeof orig] + } else { + process.env[k as keyof typeof orig] = v + } + } + }) + + test('sets GITHUB_TOKEN from secure storage when USE_GITHUB and env token empty', async () => { + process.env.CLAUDE_CODE_USE_GITHUB = '1' + delete process.env.GITHUB_TOKEN + delete process.env.GH_TOKEN + delete process.env.CLAUDE_CODE_SIMPLE + + mock.module('./secureStorage/index.js', () => ({ + getSecureStorage: () => ({ + read: () => ({ + githubModels: { accessToken: 'stored-secret' }, + }), + }), + })) + + const { hydrateGithubModelsTokenFromSecureStorage } = await import( + './githubModelsCredentials.js' + ) + hydrateGithubModelsTokenFromSecureStorage() + expect(process.env.GITHUB_TOKEN).toBe('stored-secret') + }) + + test('does not override existing GITHUB_TOKEN', async () => { + process.env.CLAUDE_CODE_USE_GITHUB = '1' + process.env.GITHUB_TOKEN = 'already' + + mock.module('./secureStorage/index.js', () => ({ + getSecureStorage: () => ({ + read: () => ({ + githubModels: { accessToken: 'stored-secret' }, + }), + }), + })) + + const { hydrateGithubModelsTokenFromSecureStorage } = await import( + './githubModelsCredentials.js' + ) + hydrateGithubModelsTokenFromSecureStorage() + expect(process.env.GITHUB_TOKEN).toBe('already') + }) +}) diff --git a/src/utils/githubModelsCredentials.test.ts b/src/utils/githubModelsCredentials.test.ts new file mode 100644 index 00000000..81c3cdcc --- /dev/null +++ b/src/utils/githubModelsCredentials.test.ts @@ -0,0 +1,47 @@ +import { describe, expect, test } from 'bun:test' + +import { + clearGithubModelsToken, + readGithubModelsToken, + saveGithubModelsToken, +} from './githubModelsCredentials.js' + +describe('readGithubModelsToken', () => { + test('returns undefined in bare mode', () => { + const prev = process.env.CLAUDE_CODE_SIMPLE + process.env.CLAUDE_CODE_SIMPLE = '1' + expect(readGithubModelsToken()).toBeUndefined() + if (prev === undefined) { + delete process.env.CLAUDE_CODE_SIMPLE + } else { + process.env.CLAUDE_CODE_SIMPLE = prev + } + }) +}) + +describe('saveGithubModelsToken / clearGithubModelsToken', () => { + test('save returns failure in bare mode', () => { + const prev = process.env.CLAUDE_CODE_SIMPLE + process.env.CLAUDE_CODE_SIMPLE = '1' + const r = saveGithubModelsToken('abc') + expect(r.success).toBe(false) + expect(r.warning).toContain('Bare mode') + if (prev === undefined) { + delete process.env.CLAUDE_CODE_SIMPLE + } else { + process.env.CLAUDE_CODE_SIMPLE = prev + } + }) + + test('clear succeeds in bare mode', () => { + const prev = process.env.CLAUDE_CODE_SIMPLE + process.env.CLAUDE_CODE_SIMPLE = '1' + expect(clearGithubModelsToken().success).toBe(true) + if (prev === undefined) { + delete process.env.CLAUDE_CODE_SIMPLE + } else { + process.env.CLAUDE_CODE_SIMPLE = prev + } + }) +}) + diff --git a/src/utils/githubModelsCredentials.ts b/src/utils/githubModelsCredentials.ts new file mode 100644 index 00000000..83d5934c --- /dev/null +++ b/src/utils/githubModelsCredentials.ts @@ -0,0 +1,73 @@ +import { isBareMode, isEnvTruthy } from './envUtils.js' +import { getSecureStorage } from './secureStorage/index.js' + +/** JSON key in the shared OpenClaude secure storage blob. */ +export const GITHUB_MODELS_STORAGE_KEY = 'githubModels' as const + +export type GithubModelsCredentialBlob = { + accessToken: string +} + +export function readGithubModelsToken(): string | undefined { + if (isBareMode()) return undefined + try { + const data = getSecureStorage().read() as + | ({ githubModels?: GithubModelsCredentialBlob } & Record) + | null + const t = data?.githubModels?.accessToken?.trim() + return t || undefined + } catch { + return undefined + } +} + +/** + * If GitHub Models mode is on and no token is in the environment, copy the + * stored token into process.env so the OpenAI shim and validation see it. + */ +export function hydrateGithubModelsTokenFromSecureStorage(): void { + if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) { + return + } + if (process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()) { + return + } + if (isBareMode()) { + return + } + const t = readGithubModelsToken() + if (t) { + process.env.GITHUB_TOKEN = t + } +} + +export function saveGithubModelsToken(token: string): { + success: boolean + warning?: string +} { + if (isBareMode()) { + return { success: false, warning: 'Bare mode: secure storage is disabled.' } + } + const trimmed = token.trim() + if (!trimmed) { + return { success: false, warning: 'Token is empty.' } + } + const secureStorage = getSecureStorage() + const prev = secureStorage.read() || {} + const merged = { + ...(prev as Record), + [GITHUB_MODELS_STORAGE_KEY]: { accessToken: trimmed }, + } + return secureStorage.update(merged as typeof prev) +} + +export function clearGithubModelsToken(): { success: boolean; warning?: string } { + if (isBareMode()) { + return { success: true } + } + const secureStorage = getSecureStorage() + const prev = secureStorage.read() || {} + const next = { ...(prev as Record) } + delete next[GITHUB_MODELS_STORAGE_KEY] + return secureStorage.update(next as typeof prev) +} diff --git a/src/utils/managedEnvConstants.ts b/src/utils/managedEnvConstants.ts index 12c56565..86b2da29 100644 --- a/src/utils/managedEnvConstants.ts +++ b/src/utils/managedEnvConstants.ts @@ -18,6 +18,7 @@ const PROVIDER_MANAGED_ENV_VARS = new Set([ 'CLAUDE_CODE_USE_BEDROCK', 'CLAUDE_CODE_USE_VERTEX', 'CLAUDE_CODE_USE_FOUNDRY', + 'CLAUDE_CODE_USE_GITHUB', // Endpoint config (base URLs, project/resource identifiers) 'ANTHROPIC_BASE_URL', 'ANTHROPIC_BEDROCK_BASE_URL', @@ -147,6 +148,7 @@ export const SAFE_ENV_VARS = new Set([ 'CLAUDE_CODE_SUBAGENT_MODEL', 'CLAUDE_CODE_USE_BEDROCK', 'CLAUDE_CODE_USE_FOUNDRY', + 'CLAUDE_CODE_USE_GITHUB', 'CLAUDE_CODE_USE_VERTEX', 'DISABLE_AUTOUPDATER', 'DISABLE_BUG_COMMAND', diff --git a/src/utils/model/providers.test.ts b/src/utils/model/providers.test.ts index 1da3d596..ea03454f 100644 --- a/src/utils/model/providers.test.ts +++ b/src/utils/model/providers.test.ts @@ -7,6 +7,7 @@ import { const originalEnv = { CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI, + CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI, CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK, CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX, @@ -15,6 +16,7 @@ const originalEnv = { afterEach(() => { process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI + process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX @@ -23,6 +25,7 @@ afterEach(() => { function clearProviderEnv(): void { delete process.env.CLAUDE_CODE_USE_GEMINI + delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_OPENAI delete process.env.CLAUDE_CODE_USE_BEDROCK delete process.env.CLAUDE_CODE_USE_VERTEX @@ -38,6 +41,7 @@ test('first-party provider keeps Anthropic account setup flow enabled', () => { test.each([ ['CLAUDE_CODE_USE_OPENAI', 'openai'], + ['CLAUDE_CODE_USE_GITHUB', 'github'], ['CLAUDE_CODE_USE_GEMINI', 'gemini'], ['CLAUDE_CODE_USE_BEDROCK', 'bedrock'], ['CLAUDE_CODE_USE_VERTEX', 'vertex'], @@ -52,3 +56,11 @@ test.each([ expect(usesAnthropicAccountFlow()).toBe(false) }, ) + +test('GEMINI takes precedence over GitHub when both are set', () => { + clearProviderEnv() + process.env.CLAUDE_CODE_USE_GEMINI = '1' + process.env.CLAUDE_CODE_USE_GITHUB = '1' + + expect(getAPIProvider()).toBe('gemini') +}) diff --git a/src/utils/model/providers.ts b/src/utils/model/providers.ts index 847b5fc3..30a1f1c9 100644 --- a/src/utils/model/providers.ts +++ b/src/utils/model/providers.ts @@ -1,20 +1,29 @@ import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js' import { isEnvTruthy } from '../envUtils.js' -export type APIProvider = 'firstParty' | 'bedrock' | 'vertex' | 'foundry' | 'openai' | 'gemini' +export type APIProvider = + | 'firstParty' + | 'bedrock' + | 'vertex' + | 'foundry' + | 'openai' + | 'gemini' + | 'github' export function getAPIProvider(): APIProvider { return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ? 'gemini' - : isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) - ? 'openai' - : isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) - ? 'bedrock' - : isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) - ? 'vertex' - : isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) - ? 'foundry' - : 'firstParty' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) + ? 'github' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) + ? 'openai' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) + ? 'bedrock' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) + ? 'vertex' + : isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) + ? 'foundry' + : 'firstParty' } export function usesAnthropicAccountFlow(): boolean { diff --git a/src/utils/providerProfile.ts b/src/utils/providerProfile.ts index 866c19c5..e88662ac 100644 --- a/src/utils/providerProfile.ts +++ b/src/utils/providerProfile.ts @@ -190,6 +190,7 @@ export async function buildLaunchEnv(options: { } delete env.CLAUDE_CODE_USE_OPENAI + delete env.CLAUDE_CODE_USE_GITHUB env.GEMINI_MODEL = processEnv.GEMINI_MODEL || @@ -224,6 +225,7 @@ export async function buildLaunchEnv(options: { } delete env.CLAUDE_CODE_USE_GEMINI + delete env.CLAUDE_CODE_USE_GITHUB delete env.GEMINI_API_KEY delete env.GEMINI_MODEL delete env.GEMINI_BASE_URL diff --git a/src/utils/swarm/spawnUtils.ts b/src/utils/swarm/spawnUtils.ts index cfccdf5a..037d273d 100644 --- a/src/utils/swarm/spawnUtils.ts +++ b/src/utils/swarm/spawnUtils.ts @@ -99,6 +99,18 @@ const TEAMMATE_ENV_VARS = [ 'CLAUDE_CODE_USE_BEDROCK', 'CLAUDE_CODE_USE_VERTEX', 'CLAUDE_CODE_USE_FOUNDRY', + 'CLAUDE_CODE_USE_GITHUB', + 'CLAUDE_CODE_USE_GEMINI', + 'CLAUDE_CODE_USE_OPENAI', + 'GITHUB_TOKEN', + 'GH_TOKEN', + 'OPENAI_API_KEY', + 'OPENAI_BASE_URL', + 'OPENAI_MODEL', + 'GEMINI_API_KEY', + 'GEMINI_BASE_URL', + 'GEMINI_MODEL', + 'GOOGLE_API_KEY', // Custom API endpoint 'ANTHROPIC_BASE_URL', // Config directory override From 2619401d34a6547ddfc7d59d3f245b72d75a10c4 Mon Sep 17 00:00:00 2001 From: Rithul Kamesh Date: Thu, 2 Apr 2026 11:26:27 +0530 Subject: [PATCH 02/25] Remove github-models-pr-draft.md --- docs/github-models-pr-draft.md | 24 ------------------------ 1 file changed, 24 deletions(-) delete mode 100644 docs/github-models-pr-draft.md diff --git a/docs/github-models-pr-draft.md b/docs/github-models-pr-draft.md deleted file mode 100644 index 21fa7fa8..00000000 --- a/docs/github-models-pr-draft.md +++ /dev/null @@ -1,24 +0,0 @@ -# GitHub Models + onboard — PR draft (paste into GitHub) - -**Title:** `feat: GitHub Models provider + interactive onboard (keychain-backed)` - -**Body:** - -## Summary - -- Adds GitHub Models (`models.github.ai`) as an OpenAI-compatible backend via `CLAUDE_CODE_USE_GITHUB` (see existing shim changes). -- Adds `/onboard-github`: interactive Ink flow for GitHub Device Login or PAT, stores token in OS-backed secure storage (macOS Keychain when available, else `~/.claude/.credentials.json`), and writes user settings `env` so no `export GITHUB_TOKEN` is required. -- Applies user settings before provider env validation and hydrates `GITHUB_TOKEN` from secure storage when the GitHub provider flag is on. - -## How to test - -1. Run `openclaude` and execute `/onboard-github` (or launch via command registration). -2. Complete device flow or paste a PAT with Models access. -3. Restart CLI; confirm `CLAUDE_CODE_USE_GITHUB=1` in `~/.claude/settings.json` (or merged file) and that inference works without exporting `GITHUB_TOKEN`. -4. `bun test` (new suites) + `bun run build`. - -## Notes / follow-ups - -- Device flow OAuth app client ID is configurable via `GITHUB_DEVICE_FLOW_CLIENT_ID`; verify scope list against current GitHub Models documentation. -- `/logout` currently deletes all secure storage; GitHub token is cleared too — document or narrow in a follow-up. -- Linux: secure storage is plaintext with chmod 600 today; libsecret is still TODO in `secureStorage`. From f07f11b7b6134f07b74024afb299f1f2a258623a Mon Sep 17 00:00:00 2001 From: Rithul Kamesh Date: Thu, 2 Apr 2026 12:53:56 +0530 Subject: [PATCH 03/25] fix: use bun test for provider-recommendation script to resolve module errors --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5f5351b8..0bfa42c6 100644 --- a/package.json +++ b/package.json @@ -30,7 +30,7 @@ "dev:fast": "bun run profile:fast && bun run dev:ollama:fast", "dev:code": "bun run profile:code && bun run dev:profile", "start": "node dist/cli.mjs", - "test:provider-recommendation": "node --test --experimental-strip-types src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts", + "test:provider-recommendation": "bun test src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts", "typecheck": "tsc --noEmit", "smoke": "bun run build && node dist/cli.mjs --version", "test:provider": "bun test src/services/api/*.test.ts src/utils/context.test.ts", From 577e654ae74e868fb7095d5a0804e6d9b42b4e32 Mon Sep 17 00:00:00 2001 From: Misha Skvortsov Date: Wed, 1 Apr 2026 21:42:43 +0300 Subject: [PATCH 04/25] feat: add support for Atomic Chat provider - Introduced a new provider profile for Atomic Chat, allowing it to be used alongside existing providers. - Updated `package.json` to include a new development script for launching Atomic Chat. - Modified `smart_router.py` to recognize Atomic Chat as a local provider that does not require an API key. - Enhanced provider discovery and launch scripts to handle Atomic Chat, including model listing and connection checks. - Added tests to ensure proper environment setup and behavior for Atomic Chat profiles. This update expands the functionality of the application to support local LLMs via Atomic Chat, improving versatility for users. --- atomic_chat_provider.py | 146 ++++++++++++++++++++++++++++++ package.json | 1 + scripts/provider-bootstrap.ts | 21 ++++- scripts/provider-discovery.ts | 56 ++++++++++++ scripts/provider-launch.ts | 36 +++++++- smart_router.py | 13 ++- src/utils/providerProfile.test.ts | 70 ++++++++++++++ src/utils/providerProfile.ts | 37 +++++++- test_atomic_chat_provider.py | 130 ++++++++++++++++++++++++++ 9 files changed, 503 insertions(+), 7 deletions(-) create mode 100644 atomic_chat_provider.py create mode 100644 test_atomic_chat_provider.py diff --git a/atomic_chat_provider.py b/atomic_chat_provider.py new file mode 100644 index 00000000..bf55155f --- /dev/null +++ b/atomic_chat_provider.py @@ -0,0 +1,146 @@ +""" +atomic_chat_provider.py +----------------------- +Adds native Atomic Chat support to openclaude. +Lets Claude Code route requests to any locally-running model via +Atomic Chat (Apple Silicon only) at 127.0.0.1:1337. + +Atomic Chat exposes an OpenAI-compatible API, so messages are forwarded +directly without translation. + +Usage (.env): + PREFERRED_PROVIDER=atomic-chat + ATOMIC_CHAT_BASE_URL=http://127.0.0.1:1337 +""" + +import httpx +import json +import logging +import os +from typing import AsyncIterator + +logger = logging.getLogger(__name__) +ATOMIC_CHAT_BASE_URL = os.getenv("ATOMIC_CHAT_BASE_URL", "http://127.0.0.1:1337") + + +def _api_url(path: str) -> str: + return f"{ATOMIC_CHAT_BASE_URL}/v1{path}" + + +async def check_atomic_chat_running() -> bool: + try: + async with httpx.AsyncClient(timeout=3.0) as client: + resp = await client.get(_api_url("/models")) + return resp.status_code == 200 + except Exception: + return False + + +async def list_atomic_chat_models() -> list[str]: + try: + async with httpx.AsyncClient(timeout=5.0) as client: + resp = await client.get(_api_url("/models")) + resp.raise_for_status() + data = resp.json() + return [m["id"] for m in data.get("data", [])] + except Exception as e: + logger.warning(f"Could not list Atomic Chat models: {e}") + return [] + + +async def atomic_chat( + model: str, + messages: list[dict], + system: str | None = None, + max_tokens: int = 4096, + temperature: float = 1.0, +) -> dict: + chat_messages = list(messages) + if system: + chat_messages.insert(0, {"role": "system", "content": system}) + + payload = { + "model": model, + "messages": chat_messages, + "max_tokens": max_tokens, + "temperature": temperature, + "stream": False, + } + + async with httpx.AsyncClient(timeout=120.0) as client: + resp = await client.post(_api_url("/chat/completions"), json=payload) + resp.raise_for_status() + data = resp.json() + + choice = data.get("choices", [{}])[0] + assistant_text = choice.get("message", {}).get("content", "") + usage = data.get("usage", {}) + + return { + "id": data.get("id", "msg_atomic_chat"), + "type": "message", + "role": "assistant", + "content": [{"type": "text", "text": assistant_text}], + "model": model, + "stop_reason": "end_turn", + "stop_sequence": None, + "usage": { + "input_tokens": usage.get("prompt_tokens", 0), + "output_tokens": usage.get("completion_tokens", 0), + }, + } + + +async def atomic_chat_stream( + model: str, + messages: list[dict], + system: str | None = None, + max_tokens: int = 4096, + temperature: float = 1.0, +) -> AsyncIterator[str]: + chat_messages = list(messages) + if system: + chat_messages.insert(0, {"role": "system", "content": system}) + + payload = { + "model": model, + "messages": chat_messages, + "max_tokens": max_tokens, + "temperature": temperature, + "stream": True, + } + + yield "event: message_start\n" + yield f'data: {json.dumps({"type": "message_start", "message": {"id": "msg_atomic_chat_stream", "type": "message", "role": "assistant", "content": [], "model": model, "stop_reason": None, "usage": {"input_tokens": 0, "output_tokens": 0}}})}\n\n' + yield "event: content_block_start\n" + yield f'data: {json.dumps({"type": "content_block_start", "index": 0, "content_block": {"type": "text", "text": ""}})}\n\n' + + async with httpx.AsyncClient(timeout=120.0) as client: + async with client.stream("POST", _api_url("/chat/completions"), json=payload) as resp: + resp.raise_for_status() + async for line in resp.aiter_lines(): + if not line or not line.startswith("data: "): + continue + raw = line[len("data: "):] + if raw.strip() == "[DONE]": + break + try: + chunk = json.loads(raw) + delta = chunk.get("choices", [{}])[0].get("delta", {}) + delta_text = delta.get("content", "") + if delta_text: + yield "event: content_block_delta\n" + yield f'data: {json.dumps({"type": "content_block_delta", "index": 0, "delta": {"type": "text_delta", "text": delta_text}})}\n\n' + + finish_reason = chunk.get("choices", [{}])[0].get("finish_reason") + if finish_reason: + usage = chunk.get("usage", {}) + yield "event: content_block_stop\n" + yield f'data: {json.dumps({"type": "content_block_stop", "index": 0})}\n\n' + yield "event: message_delta\n" + yield f'data: {json.dumps({"type": "message_delta", "delta": {"stop_reason": "end_turn", "stop_sequence": None}, "usage": {"output_tokens": usage.get("completion_tokens", 0)}})}\n\n' + yield "event: message_stop\n" + yield f'data: {json.dumps({"type": "message_stop"})}\n\n' + break + except json.JSONDecodeError: + continue diff --git a/package.json b/package.json index 47052352..03abde72 100644 --- a/package.json +++ b/package.json @@ -21,6 +21,7 @@ "dev:gemini": "bun run scripts/provider-launch.ts gemini", "dev:ollama": "bun run scripts/provider-launch.ts ollama", "dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare", + "dev:atomic-chat": "bun run scripts/provider-launch.ts atomic-chat", "profile:init": "bun run scripts/provider-bootstrap.ts", "profile:recommend": "bun run scripts/provider-recommend.ts", "profile:auto": "bun run scripts/provider-recommend.ts --apply", diff --git a/scripts/provider-bootstrap.ts b/scripts/provider-bootstrap.ts index 82ebbbb6..f39e3e50 100644 --- a/scripts/provider-bootstrap.ts +++ b/scripts/provider-bootstrap.ts @@ -10,6 +10,7 @@ import { recommendOllamaModel, } from '../src/utils/providerRecommendation.ts' import { + buildAtomicChatProfileEnv, buildCodexProfileEnv, buildGeminiProfileEnv, buildOllamaProfileEnv, @@ -20,8 +21,11 @@ import { type ProviderProfile, } from '../src/utils/providerProfile.ts' import { + getAtomicChatChatBaseUrl, getOllamaChatBaseUrl, + hasLocalAtomicChat, hasLocalOllama, + listAtomicChatModels, listOllamaModels, } from './provider-discovery.ts' @@ -34,7 +38,7 @@ function parseArg(name: string): string | null { function parseProviderArg(): ProviderProfile | 'auto' { const p = parseArg('--provider')?.toLowerCase() - if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini') return p + if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p return 'auto' } @@ -102,6 +106,21 @@ async function main(): Promise { getOllamaChatBaseUrl, }, ) + } else if (selected === 'atomic-chat') { + const model = argModel || (await listAtomicChatModels(argBaseUrl || undefined))[0] + if (!model) { + if (!(await hasLocalAtomicChat(argBaseUrl || undefined))) { + console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.') + } else { + console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.') + } + process.exit(1) + } + + env = buildAtomicChatProfileEnv(model, { + baseUrl: argBaseUrl, + getAtomicChatChatBaseUrl, + }) } else if (selected === 'codex') { const builtEnv = buildCodexProfileEnv({ model: argModel, diff --git a/scripts/provider-discovery.ts b/scripts/provider-discovery.ts index 9e3aacda..9c463f2f 100644 --- a/scripts/provider-discovery.ts +++ b/scripts/provider-discovery.ts @@ -1,6 +1,7 @@ import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts' export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434' +export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337' function withTimeoutSignal(timeoutMs: number): { signal: AbortSignal @@ -93,6 +94,61 @@ export async function listOllamaModels( } } +// ── Atomic Chat discovery (Apple Silicon local LLMs at 127.0.0.1:1337) ────── + +export function getAtomicChatApiBaseUrl(baseUrl?: string): string { + const raw = baseUrl || process.env.ATOMIC_CHAT_BASE_URL || DEFAULT_ATOMIC_CHAT_BASE_URL + return trimTrailingSlash(raw) +} + +export function getAtomicChatChatBaseUrl(baseUrl?: string): string { + return `${getAtomicChatApiBaseUrl(baseUrl)}/v1` +} + +export async function hasLocalAtomicChat(baseUrl?: string): Promise { + const { signal, clear } = withTimeoutSignal(1200) + try { + const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, { + method: 'GET', + signal, + }) + return response.ok + } catch { + return false + } finally { + clear() + } +} + +export async function listAtomicChatModels( + baseUrl?: string, +): Promise { + const { signal, clear } = withTimeoutSignal(5000) + try { + const response = await fetch(`${getAtomicChatChatBaseUrl(baseUrl)}/models`, { + method: 'GET', + signal, + }) + if (!response.ok) { + return [] + } + + const data = await response.json() as { + data?: Array<{ id?: string }> + } + + return (data.data ?? []) + .filter(model => Boolean(model.id)) + .map(model => model.id!) + } catch { + return [] + } finally { + clear() + } +} + +// ── Ollama benchmarking ───────────────────────────────────────────────────── + export async function benchmarkOllamaModel( modelName: string, baseUrl?: string, diff --git a/scripts/provider-launch.ts b/scripts/provider-launch.ts index 2859e9e8..17f11fb8 100644 --- a/scripts/provider-launch.ts +++ b/scripts/provider-launch.ts @@ -16,8 +16,11 @@ import { type ProviderProfile, } from '../src/utils/providerProfile.ts' import { + getAtomicChatChatBaseUrl, getOllamaChatBaseUrl, + hasLocalAtomicChat, hasLocalOllama, + listAtomicChatModels, listOllamaModels, } from './provider-discovery.ts' @@ -48,7 +51,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions { continue } - if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') { + if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') { requestedProfile = lower as ProviderProfile | 'auto' continue } @@ -79,7 +82,7 @@ function loadPersistedProfile(): ProfileFile | null { if (!existsSync(path)) return null try { const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile - if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') { + if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini' || parsed.profile === 'atomic-chat') { return parsed } return null @@ -96,6 +99,11 @@ async function resolveOllamaDefaultModel( return recommended?.name ?? null } +async function resolveAtomicChatDefaultModel(): Promise { + const models = await listAtomicChatModels() + return models[0] ?? null +} + function runCommand(command: string, env: NodeJS.ProcessEnv): Promise { return runProcess(command, [], env) } @@ -132,6 +140,10 @@ function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void { console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`) console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`) console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`) + } else if (profile === 'atomic-chat') { + console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`) + console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`) + console.log('OPENAI_API_KEY_SET=false (local provider, no key required)') } else { console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`) console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`) @@ -143,7 +155,7 @@ async function main(): Promise { const options = parseLaunchOptions(process.argv.slice(2)) const requestedProfile = options.requestedProfile if (!requestedProfile) { - console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal ] [-- ]') + console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal ] [-- ]') process.exit(1) } @@ -175,12 +187,30 @@ async function main(): Promise { } } + let resolvedAtomicChatModel: string | null = null + if ( + profile === 'atomic-chat' && + (persisted?.profile !== 'atomic-chat' || !persisted?.env?.OPENAI_MODEL) + ) { + if (!(await hasLocalAtomicChat())) { + console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.') + process.exit(1) + } + resolvedAtomicChatModel = await resolveAtomicChatDefaultModel() + if (!resolvedAtomicChatModel) { + console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.') + process.exit(1) + } + } + const env = await buildLaunchEnv({ profile, persisted, goal: options.goal, getOllamaChatBaseUrl, resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b', + getAtomicChatChatBaseUrl, + resolveAtomicChatDefaultModel: async () => resolvedAtomicChatModel, }) if (options.fast) { applyFastFlags(env) diff --git a/smart_router.py b/smart_router.py index 0a54a791..14b90c03 100644 --- a/smart_router.py +++ b/smart_router.py @@ -57,8 +57,8 @@ class Provider: @property def is_configured(self) -> bool: """True if the provider has an API key set.""" - if self.name == "ollama": - return True # Ollama needs no API key + if self.name in ("ollama", "atomic-chat"): + return True # Local providers need no API key return bool(self.api_key) @property @@ -93,6 +93,7 @@ def build_default_providers() -> list[Provider]: big = os.getenv("BIG_MODEL", "gpt-4.1") small = os.getenv("SMALL_MODEL", "gpt-4.1-mini") ollama_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434") + atomic_chat_url = os.getenv("ATOMIC_CHAT_BASE_URL", "http://127.0.0.1:1337") return [ Provider( @@ -119,6 +120,14 @@ def build_default_providers() -> list[Provider]: big_model=big if "gemini" not in big and "gpt" not in big else "llama3:8b", small_model=small if "gemini" not in small and "gpt" not in small else "llama3:8b", ), + Provider( + name="atomic-chat", + ping_url=f"{atomic_chat_url}/v1/models", + api_key_env="", + cost_per_1k_tokens=0.0, # free — local (Apple Silicon) + big_model=big if "gemini" not in big and "gpt" not in big else "llama3:8b", + small_model=small if "gemini" not in small and "gpt" not in small else "llama3:8b", + ), ] diff --git a/src/utils/providerProfile.test.ts b/src/utils/providerProfile.test.ts index e90746c6..b953e1b6 100644 --- a/src/utils/providerProfile.test.ts +++ b/src/utils/providerProfile.test.ts @@ -5,6 +5,7 @@ import { join } from 'node:path' import test from 'node:test' import { + buildAtomicChatProfileEnv, buildCodexProfileEnv, buildGeminiProfileEnv, buildLaunchEnv, @@ -381,3 +382,72 @@ test('auto profile falls back to openai when no viable ollama model exists', () assert.equal(selectAutoProfile(null), 'openai') assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama') }) + +// ── Atomic Chat profile tests ──────────────────────────────────────────────── + +test('atomic-chat profiles never persist openai api keys', () => { + const env = buildAtomicChatProfileEnv('some-local-model', { + getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1', + }) + + assert.deepEqual(env, { + OPENAI_BASE_URL: 'http://127.0.0.1:1337/v1', + OPENAI_MODEL: 'some-local-model', + }) + assert.equal('OPENAI_API_KEY' in env, false) +}) + +test('atomic-chat profiles respect custom base url', () => { + const env = buildAtomicChatProfileEnv('my-model', { + baseUrl: 'http://192.168.1.100:1337', + getAtomicChatChatBaseUrl: (baseUrl?: string) => + baseUrl ? `${baseUrl}/v1` : 'http://127.0.0.1:1337/v1', + }) + + assert.equal(env.OPENAI_BASE_URL, 'http://192.168.1.100:1337/v1') + assert.equal(env.OPENAI_MODEL, 'my-model') +}) + +test('matching persisted atomic-chat env is reused for atomic-chat launch', async () => { + const env = await buildLaunchEnv({ + profile: 'atomic-chat', + persisted: profile('atomic-chat', { + OPENAI_BASE_URL: 'http://127.0.0.1:1337/v1', + OPENAI_MODEL: 'llama-3.1-8b', + }), + goal: 'balanced', + processEnv: {}, + getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1', + resolveAtomicChatDefaultModel: async () => 'other-model', + }) + + assert.equal(env.OPENAI_BASE_URL, 'http://127.0.0.1:1337/v1') + assert.equal(env.OPENAI_MODEL, 'llama-3.1-8b') + assert.equal(env.OPENAI_API_KEY, undefined) + assert.equal(env.CODEX_API_KEY, undefined) +}) + +test('atomic-chat launch ignores mismatched persisted openai env', async () => { + const env = await buildLaunchEnv({ + profile: 'atomic-chat', + persisted: profile('openai', { + OPENAI_BASE_URL: 'https://api.openai.com/v1', + OPENAI_MODEL: 'gpt-4o', + OPENAI_API_KEY: 'sk-persisted', + }), + goal: 'balanced', + processEnv: { + OPENAI_API_KEY: 'sk-live', + CODEX_API_KEY: 'codex-live', + CHATGPT_ACCOUNT_ID: 'acct_live', + }, + getAtomicChatChatBaseUrl: () => 'http://127.0.0.1:1337/v1', + resolveAtomicChatDefaultModel: async () => 'local-model', + }) + + assert.equal(env.OPENAI_BASE_URL, 'http://127.0.0.1:1337/v1') + assert.equal(env.OPENAI_MODEL, 'local-model') + assert.equal(env.OPENAI_API_KEY, undefined) + assert.equal(env.CODEX_API_KEY, undefined) + assert.equal(env.CHATGPT_ACCOUNT_ID, undefined) +}) diff --git a/src/utils/providerProfile.ts b/src/utils/providerProfile.ts index 866c19c5..d85af0c6 100644 --- a/src/utils/providerProfile.ts +++ b/src/utils/providerProfile.ts @@ -13,7 +13,7 @@ import { const DEFAULT_GEMINI_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai' const DEFAULT_GEMINI_MODEL = 'gemini-2.0-flash' -export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' +export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' | 'atomic-chat' export type ProfileEnv = { OPENAI_BASE_URL?: string @@ -53,6 +53,19 @@ export function buildOllamaProfileEnv( } } +export function buildAtomicChatProfileEnv( + model: string, + options: { + baseUrl?: string | null + getAtomicChatChatBaseUrl: (baseUrl?: string) => string + }, +): ProfileEnv { + return { + OPENAI_BASE_URL: options.getAtomicChatChatBaseUrl(options.baseUrl ?? undefined), + OPENAI_MODEL: model, + } +} + export function buildGeminiProfileEnv(options: { model?: string | null baseUrl?: string | null @@ -171,6 +184,8 @@ export async function buildLaunchEnv(options: { processEnv?: NodeJS.ProcessEnv getOllamaChatBaseUrl?: (baseUrl?: string) => string resolveOllamaDefaultModel?: (goal: RecommendationGoal) => Promise + getAtomicChatChatBaseUrl?: (baseUrl?: string) => string + resolveAtomicChatDefaultModel?: () => Promise }): Promise { const processEnv = options.processEnv ?? process.env const persistedEnv = @@ -248,6 +263,26 @@ export async function buildLaunchEnv(options: { return env } + if (options.profile === 'atomic-chat') { + const getAtomicChatBaseUrl = + options.getAtomicChatChatBaseUrl ?? (() => 'http://127.0.0.1:1337/v1') + const resolveModel = + options.resolveAtomicChatDefaultModel ?? (async () => null as string | null) + + env.OPENAI_BASE_URL = persistedEnv.OPENAI_BASE_URL || getAtomicChatBaseUrl() + env.OPENAI_MODEL = + persistedEnv.OPENAI_MODEL || + (await resolveModel()) || + '' + + delete env.OPENAI_API_KEY + delete env.CODEX_API_KEY + delete env.CHATGPT_ACCOUNT_ID + delete env.CODEX_ACCOUNT_ID + + return env + } + if (options.profile === 'codex') { env.OPENAI_BASE_URL = persistedEnv.OPENAI_BASE_URL && isCodexBaseUrl(persistedEnv.OPENAI_BASE_URL) diff --git a/test_atomic_chat_provider.py b/test_atomic_chat_provider.py new file mode 100644 index 00000000..819c610c --- /dev/null +++ b/test_atomic_chat_provider.py @@ -0,0 +1,130 @@ +""" +test_atomic_chat_provider.py +Run: pytest test_atomic_chat_provider.py -v +""" + +import pytest +from unittest.mock import AsyncMock, MagicMock, patch +from atomic_chat_provider import ( + atomic_chat, + list_atomic_chat_models, + check_atomic_chat_running, +) + + +@pytest.mark.asyncio +async def test_atomic_chat_running_true(): + mock_response = MagicMock() + mock_response.status_code = 200 + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response) + result = await check_atomic_chat_running() + assert result is True + + +@pytest.mark.asyncio +async def test_atomic_chat_running_false_on_exception(): + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.get = AsyncMock(side_effect=Exception("refused")) + result = await check_atomic_chat_running() + assert result is False + + +@pytest.mark.asyncio +async def test_list_models_returns_ids(): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "data": [{"id": "llama-3.1-8b"}, {"id": "mistral-7b"}], + } + mock_response.raise_for_status = MagicMock() + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response) + models = await list_atomic_chat_models() + assert "llama-3.1-8b" in models + assert "mistral-7b" in models + + +@pytest.mark.asyncio +async def test_list_models_empty_on_failure(): + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.get = AsyncMock(side_effect=Exception("down")) + models = await list_atomic_chat_models() + assert models == [] + + +@pytest.mark.asyncio +async def test_atomic_chat_returns_anthropic_format(): + mock_response = MagicMock() + mock_response.raise_for_status = MagicMock() + mock_response.json.return_value = { + "id": "chatcmpl-abc123", + "choices": [{"message": {"content": "42 is the answer."}}], + "usage": {"prompt_tokens": 10, "completion_tokens": 8}, + } + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response) + result = await atomic_chat( + model="llama-3.1-8b", + messages=[{"role": "user", "content": "What is 6*7?"}], + ) + assert result["type"] == "message" + assert result["role"] == "assistant" + assert "42" in result["content"][0]["text"] + assert result["usage"]["input_tokens"] == 10 + assert result["usage"]["output_tokens"] == 8 + + +@pytest.mark.asyncio +async def test_atomic_chat_prepends_system(): + captured = {} + + async def mock_post(url, json=None, **kwargs): + captured.update(json or {}) + m = MagicMock() + m.raise_for_status = MagicMock() + m.json.return_value = { + "id": "chatcmpl-xyz", + "choices": [{"message": {"content": "ok"}}], + "usage": {"prompt_tokens": 1, "completion_tokens": 1}, + } + return m + + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.post = mock_post + await atomic_chat( + model="llama-3.1-8b", + messages=[{"role": "user", "content": "Hi"}], + system="Be helpful.", + ) + assert captured["messages"][0]["role"] == "system" + assert "helpful" in captured["messages"][0]["content"] + + +@pytest.mark.asyncio +async def test_atomic_chat_sends_correct_payload(): + captured = {} + + async def mock_post(url, json=None, **kwargs): + captured.update(json or {}) + m = MagicMock() + m.raise_for_status = MagicMock() + m.json.return_value = { + "id": "chatcmpl-xyz", + "choices": [{"message": {"content": "ok"}}], + "usage": {"prompt_tokens": 1, "completion_tokens": 1}, + } + return m + + with patch("atomic_chat_provider.httpx.AsyncClient") as MockClient: + MockClient.return_value.__aenter__.return_value.post = mock_post + await atomic_chat( + model="test-model", + messages=[{"role": "user", "content": "Test"}], + max_tokens=2048, + temperature=0.5, + ) + assert captured["model"] == "test-model" + assert captured["max_tokens"] == 2048 + assert captured["temperature"] == 0.5 + assert captured["stream"] is False From 3b7b9740f2dd3493a887e1277a7570c129bae3d9 Mon Sep 17 00:00:00 2001 From: Misha Skvortsov Date: Wed, 1 Apr 2026 23:06:25 +0300 Subject: [PATCH 05/25] fix: update OPENAI_API_KEY message and add Atomic Chat URL check - Updated the message for the OPENAI_API_KEY check to include Atomic Chat as an allowed local provider. - Introduced a new function to check if the base URL corresponds to Atomic Chat, enhancing the system's ability to identify local providers. - Adjusted the Ollama processor mode check to skip processing when an Atomic Chat local provider is detected. --- hello/world | 1 + scripts/system-check.ts | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 hello/world diff --git a/hello/world b/hello/world new file mode 100644 index 00000000..8ab686ea --- /dev/null +++ b/hello/world @@ -0,0 +1 @@ +Hello, World! diff --git a/scripts/system-check.ts b/scripts/system-check.ts index e129685a..dfb9db21 100644 --- a/scripts/system-check.ts +++ b/scripts/system-check.ts @@ -186,7 +186,7 @@ function checkOpenAIEnv(): CheckResult[] { } else if (!key && !isLocalBaseUrl(request.baseUrl)) { results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.')) } else if (!key) { - results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).')) + results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Atomic Chat/Ollama/LM Studio).')) } else { results.push(pass('OPENAI_API_KEY', 'Configured.')) } @@ -271,6 +271,15 @@ async function checkBaseUrlReachability(): Promise { } } +function isAtomicChatUrl(baseUrl: string): boolean { + try { + const parsed = new URL(baseUrl) + return parsed.port === '1337' && isLocalBaseUrl(baseUrl) + } catch { + return false + } +} + function checkOllamaProcessorMode(): CheckResult { if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) { return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).') @@ -281,6 +290,10 @@ function checkOllamaProcessorMode(): CheckResult { return pass('Ollama processor mode', 'Skipped (provider URL is not local).') } + if (isAtomicChatUrl(baseUrl)) { + return pass('Ollama processor mode', 'Skipped (Atomic Chat local provider detected, not Ollama).') + } + const result = spawnSync('ollama', ['ps'], { cwd: process.cwd(), encoding: 'utf8', From 4f78bde08581fda7e636d000ec08558c68d0cf6a Mon Sep 17 00:00:00 2001 From: Mike <71440932+Vect0rM@users.noreply.github.com> Date: Wed, 1 Apr 2026 23:15:10 +0300 Subject: [PATCH 06/25] Delete hello/world --- hello/world | 1 - 1 file changed, 1 deletion(-) delete mode 100644 hello/world diff --git a/hello/world b/hello/world deleted file mode 100644 index 8ab686ea..00000000 --- a/hello/world +++ /dev/null @@ -1 +0,0 @@ -Hello, World! From 08f0b6030e75bc5ed6e3d1643b84c4b5f5f85e1c Mon Sep 17 00:00:00 2001 From: Vasanthdev2004 Date: Thu, 2 Apr 2026 13:13:50 +0530 Subject: [PATCH 07/25] feat: add guided /provider setup --- scripts/provider-bootstrap.ts | 6 +- scripts/provider-discovery.ts | 137 +-- scripts/provider-launch.ts | 15 +- scripts/provider-recommend.ts | 9 +- src/commands.ts | 2 + src/commands/provider/index.ts | 12 + src/commands/provider/provider.test.tsx | 228 +++++ src/commands/provider/provider.tsx | 1148 +++++++++++++++++++++++ src/components/ConsoleOAuthFlow.tsx | 11 +- src/entrypoints/cli.tsx | 69 +- src/services/api/openaiShim.ts | 6 +- src/utils/providerDiscovery.ts | 129 +++ src/utils/providerProfile.test.ts | 150 ++- src/utils/providerProfile.ts | 363 ++++++- src/utils/status.tsx | 59 +- 15 files changed, 2111 insertions(+), 233 deletions(-) create mode 100644 src/commands/provider/index.ts create mode 100644 src/commands/provider/provider.test.tsx create mode 100644 src/commands/provider/provider.tsx create mode 100644 src/utils/providerDiscovery.ts diff --git a/scripts/provider-bootstrap.ts b/scripts/provider-bootstrap.ts index ad3f9bd3..cef21aef 100644 --- a/scripts/provider-bootstrap.ts +++ b/scripts/provider-bootstrap.ts @@ -1,6 +1,4 @@ // @ts-nocheck -import { writeFileSync } from 'node:fs' -import { resolve } from 'node:path' import { resolveCodexApiCredentials, } from '../src/services/api/providerConfig.js' @@ -15,6 +13,7 @@ import { buildOllamaProfileEnv, buildOpenAIProfileEnv, createProfileFile, + saveProfileFile, selectAutoProfile, type ProfileFile, type ProviderProfile, @@ -147,8 +146,7 @@ async function main(): Promise { const profile = createProfileFile(selected, env) - const outputPath = resolve(process.cwd(), '.openclaude-profile.json') - writeFileSync(outputPath, JSON.stringify(profile, null, 2), { encoding: 'utf8', mode: 0o600 }) + const outputPath = saveProfileFile(profile) console.log(`Saved profile: ${selected}`) console.log(`Goal: ${goal}`) diff --git a/scripts/provider-discovery.ts b/scripts/provider-discovery.ts index 9e3aacda..126e0d22 100644 --- a/scripts/provider-discovery.ts +++ b/scripts/provider-discovery.ts @@ -1,129 +1,8 @@ -import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts' - -export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434' - -function withTimeoutSignal(timeoutMs: number): { - signal: AbortSignal - clear: () => void -} { - const controller = new AbortController() - const timeout = setTimeout(() => controller.abort(), timeoutMs) - return { - signal: controller.signal, - clear: () => clearTimeout(timeout), - } -} - -function trimTrailingSlash(value: string): string { - return value.replace(/\/+$/, '') -} - -export function getOllamaApiBaseUrl(baseUrl?: string): string { - const parsed = new URL( - baseUrl || process.env.OLLAMA_BASE_URL || DEFAULT_OLLAMA_BASE_URL, - ) - const pathname = trimTrailingSlash(parsed.pathname) - parsed.pathname = pathname.endsWith('/v1') - ? pathname.slice(0, -3) || '/' - : pathname || '/' - parsed.search = '' - parsed.hash = '' - return trimTrailingSlash(parsed.toString()) -} - -export function getOllamaChatBaseUrl(baseUrl?: string): string { - return `${getOllamaApiBaseUrl(baseUrl)}/v1` -} - -export async function hasLocalOllama(baseUrl?: string): Promise { - const { signal, clear } = withTimeoutSignal(1200) - try { - const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, { - method: 'GET', - signal, - }) - return response.ok - } catch { - return false - } finally { - clear() - } -} - -export async function listOllamaModels( - baseUrl?: string, -): Promise { - const { signal, clear } = withTimeoutSignal(5000) - try { - const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, { - method: 'GET', - signal, - }) - if (!response.ok) { - return [] - } - - const data = await response.json() as { - models?: Array<{ - name?: string - size?: number - details?: { - family?: string - families?: string[] - parameter_size?: string - quantization_level?: string - } - }> - } - - return (data.models ?? []) - .filter(model => Boolean(model.name)) - .map(model => ({ - name: model.name!, - sizeBytes: typeof model.size === 'number' ? model.size : null, - family: model.details?.family ?? null, - families: model.details?.families ?? [], - parameterSize: model.details?.parameter_size ?? null, - quantizationLevel: model.details?.quantization_level ?? null, - })) - } catch { - return [] - } finally { - clear() - } -} - -export async function benchmarkOllamaModel( - modelName: string, - baseUrl?: string, -): Promise { - const start = Date.now() - const { signal, clear } = withTimeoutSignal(20000) - try { - const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/chat`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - signal, - body: JSON.stringify({ - model: modelName, - stream: false, - messages: [{ role: 'user', content: 'Reply with OK.' }], - options: { - temperature: 0, - num_predict: 8, - }, - }), - }) - if (!response.ok) { - return null - } - await response.json() - return Date.now() - start - } catch { - return null - } finally { - clear() - } -} +export { + benchmarkOllamaModel, + DEFAULT_OLLAMA_BASE_URL, + getOllamaApiBaseUrl, + getOllamaChatBaseUrl, + hasLocalOllama, + listOllamaModels, +} from '../src/utils/providerDiscovery.ts' diff --git a/scripts/provider-launch.ts b/scripts/provider-launch.ts index 2859e9e8..1c79d795 100644 --- a/scripts/provider-launch.ts +++ b/scripts/provider-launch.ts @@ -1,7 +1,5 @@ // @ts-nocheck import { spawn } from 'node:child_process' -import { existsSync, readFileSync } from 'node:fs' -import { resolve } from 'node:path' import { resolveCodexApiCredentials, } from '../src/services/api/providerConfig.js' @@ -11,6 +9,7 @@ import { } from '../src/utils/providerRecommendation.ts' import { buildLaunchEnv, + loadProfileFile, selectAutoProfile, type ProfileFile, type ProviderProfile, @@ -75,17 +74,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions { } function loadPersistedProfile(): ProfileFile | null { - const path = resolve(process.cwd(), '.openclaude-profile.json') - if (!existsSync(path)) return null - try { - const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile - if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') { - return parsed - } - return null - } catch { - return null - } + return loadProfileFile() } async function resolveOllamaDefaultModel( diff --git a/scripts/provider-recommend.ts b/scripts/provider-recommend.ts index eca811e6..8dc23835 100644 --- a/scripts/provider-recommend.ts +++ b/scripts/provider-recommend.ts @@ -1,6 +1,4 @@ // @ts-nocheck -import { writeFileSync } from 'node:fs' -import { resolve } from 'node:path' import { applyBenchmarkLatency, @@ -16,6 +14,7 @@ import { buildOllamaProfileEnv, buildOpenAIProfileEnv, createProfileFile, + saveProfileFile, sanitizeApiKey, type ProfileFile, type ProviderProfile, @@ -153,11 +152,7 @@ async function maybeApplyProfile( const profileFile = createProfileFile(profile, env) - writeFileSync( - resolve(process.cwd(), '.openclaude-profile.json'), - JSON.stringify(profileFile, null, 2), - 'utf8', - ) + saveProfileFile(profileFile) return true } diff --git a/src/commands.ts b/src/commands.ts index 10f03b22..3858e62f 100644 --- a/src/commands.ts +++ b/src/commands.ts @@ -128,6 +128,7 @@ import plan from './commands/plan/index.js' import fast from './commands/fast/index.js' import passes from './commands/passes/index.js' import privacySettings from './commands/privacy-settings/index.js' +import provider from './commands/provider/index.js' import hooks from './commands/hooks/index.js' import files from './commands/files/index.js' import branch from './commands/branch/index.js' @@ -291,6 +292,7 @@ const COMMANDS = memoize((): Command[] => [ outputStyle, remoteEnv, plugin, + provider, pr_comments, releaseNotes, reloadPlugins, diff --git a/src/commands/provider/index.ts b/src/commands/provider/index.ts new file mode 100644 index 00000000..9cd14daa --- /dev/null +++ b/src/commands/provider/index.ts @@ -0,0 +1,12 @@ +import type { Command } from '../../commands.js' +import { shouldInferenceConfigCommandBeImmediate } from '../../utils/immediateCommand.js' + +export default { + type: 'local-jsx', + name: 'provider', + description: 'Set up and save a third-party provider profile for OpenClaude', + get immediate() { + return shouldInferenceConfigCommandBeImmediate() + }, + load: () => import('./provider.js'), +} satisfies Command diff --git a/src/commands/provider/provider.test.tsx b/src/commands/provider/provider.test.tsx new file mode 100644 index 00000000..7f5560dc --- /dev/null +++ b/src/commands/provider/provider.test.tsx @@ -0,0 +1,228 @@ +import { PassThrough } from 'node:stream' + +import { expect, test } from 'bun:test' +import React from 'react' +import stripAnsi from 'strip-ansi' + +import { createRoot, render, useApp } from '../../ink.js' +import { AppStateProvider } from '../../state/AppState.js' +import { + buildCurrentProviderSummary, + buildProfileSaveMessage, + getProviderWizardDefaults, + TextEntryDialog, +} from './provider.js' + +const SYNC_START = '\x1B[?2026h' +const SYNC_END = '\x1B[?2026l' + +function extractLastFrame(output: string): string { + let lastFrame: string | null = null + let cursor = 0 + + while (cursor < output.length) { + const start = output.indexOf(SYNC_START, cursor) + if (start === -1) { + break + } + + const contentStart = start + SYNC_START.length + const end = output.indexOf(SYNC_END, contentStart) + if (end === -1) { + break + } + + const frame = output.slice(contentStart, end) + if (frame.trim().length > 0) { + lastFrame = frame + } + cursor = end + SYNC_END.length + } + + return lastFrame ?? output +} + +async function renderFinalFrame(node: React.ReactNode): Promise { + let output = '' + const { stdout, stdin, getOutput } = createTestStreams() + + const instance = await render(node, { + stdout: stdout as unknown as NodeJS.WriteStream, + stdin: stdin as unknown as NodeJS.ReadStream, + patchConsole: false, + }) + + await instance.waitUntilExit() + return stripAnsi(extractLastFrame(getOutput())) +} + +function createTestStreams(): { + stdout: PassThrough + stdin: PassThrough & { + isTTY: boolean + setRawMode: (mode: boolean) => void + ref: () => void + unref: () => void + } + getOutput: () => string +} { + let output = '' + const stdout = new PassThrough() + const stdin = new PassThrough() as PassThrough & { + isTTY: boolean + setRawMode: (mode: boolean) => void + ref: () => void + unref: () => void + } + stdin.isTTY = true + stdin.setRawMode = () => {} + stdin.ref = () => {} + stdin.unref = () => {} + ;(stdout as unknown as { columns: number }).columns = 120 + stdout.on('data', chunk => { + output += chunk.toString() + }) + + return { + stdout, + stdin, + getOutput: () => output, + } +} + +function StepChangeHarness(): React.ReactNode { + const { exit } = useApp() + const [step, setStep] = React.useState<'api' | 'model'>('api') + + React.useLayoutEffect(() => { + if (step === 'api') { + setStep('model') + return + } + + const timer = setTimeout(exit, 0) + return () => clearTimeout(timer) + }, [exit, step]) + + return ( + + {}} + onCancel={() => {}} + /> + + ) +} + +test('TextEntryDialog resets its input state when initialValue changes', async () => { + const output = await renderFinalFrame() + + expect(output).toContain('Model step') + expect(output).toContain('fresh-model-name') + expect(output).not.toContain('stale-secret-key') +}) + +test('wizard step remount prevents a typed API key from leaking into the next field', async () => { + const { stdout, stdin, getOutput } = createTestStreams() + const root = await createRoot({ + stdout: stdout as unknown as NodeJS.WriteStream, + stdin: stdin as unknown as NodeJS.ReadStream, + patchConsole: false, + }) + + root.render( + + {}} + onCancel={() => {}} + /> + , + ) + + await Bun.sleep(25) + stdin.write('sk-secret-12345678') + await Bun.sleep(25) + + root.render( + + {}} + onCancel={() => {}} + /> + , + ) + + await Bun.sleep(25) + root.unmount() + stdin.end() + stdout.end() + await Bun.sleep(25) + + const output = stripAnsi(extractLastFrame(getOutput())) + expect(output).toContain('Model step') + expect(output).not.toContain('sk-secret-12345678') +}) + +test('buildProfileSaveMessage maps provider fields without echoing secrets', () => { + const message = buildProfileSaveMessage( + 'openai', + { + OPENAI_API_KEY: 'sk-secret-12345678', + OPENAI_MODEL: 'gpt-4o', + OPENAI_BASE_URL: 'https://api.openai.com/v1', + }, + 'D:/codings/Opensource/openclaude/.openclaude-profile.json', + ) + + expect(message).toContain('Saved OpenAI-compatible profile.') + expect(message).toContain('Model: gpt-4o') + expect(message).toContain('Endpoint: https://api.openai.com/v1') + expect(message).toContain('Credentials: configured') + expect(message).not.toContain('sk-secret-12345678') +}) + +test('buildCurrentProviderSummary redacts poisoned model and endpoint values', () => { + const summary = buildCurrentProviderSummary({ + processEnv: { + CLAUDE_CODE_USE_OPENAI: '1', + OPENAI_API_KEY: 'sk-secret-12345678', + OPENAI_MODEL: 'sk-secret-12345678', + OPENAI_BASE_URL: 'sk-secret-12345678', + }, + persisted: null, + }) + + expect(summary.providerLabel).toBe('OpenAI-compatible') + expect(summary.modelLabel).toBe('sk-...5678') + expect(summary.endpointLabel).toBe('sk-...5678') +}) + +test('getProviderWizardDefaults ignores poisoned current provider values', () => { + const defaults = getProviderWizardDefaults({ + OPENAI_API_KEY: 'sk-secret-12345678', + OPENAI_MODEL: 'sk-secret-12345678', + OPENAI_BASE_URL: 'sk-secret-12345678', + GEMINI_API_KEY: 'AIzaSecret12345678', + GEMINI_MODEL: 'AIzaSecret12345678', + }) + + expect(defaults.openAIModel).toBe('gpt-4o') + expect(defaults.openAIBaseUrl).toBe('https://api.openai.com/v1') + expect(defaults.geminiModel).toBe('gemini-2.0-flash') +}) diff --git a/src/commands/provider/provider.tsx b/src/commands/provider/provider.tsx new file mode 100644 index 00000000..95109e7d --- /dev/null +++ b/src/commands/provider/provider.tsx @@ -0,0 +1,1148 @@ +import * as React from 'react' + +import type { LocalJSXCommandCall, LocalJSXCommandOnDone } from '../../types/command.js' +import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js' +import TextInput from '../../components/TextInput.js' +import { + Select, + type OptionWithDescription, +} from '../../components/CustomSelect/index.js' +import { Dialog } from '../../components/design-system/Dialog.js' +import { LoadingState } from '../../components/design-system/LoadingState.js' +import { useTerminalSize } from '../../hooks/useTerminalSize.js' +import { Box, Text } from '../../ink.js' +import { + DEFAULT_CODEX_BASE_URL, + DEFAULT_OPENAI_BASE_URL, + resolveCodexApiCredentials, + resolveProviderRequest, +} from '../../services/api/providerConfig.js' +import { + buildCodexProfileEnv, + buildGeminiProfileEnv, + buildOllamaProfileEnv, + buildOpenAIProfileEnv, + createProfileFile, + DEFAULT_GEMINI_BASE_URL, + DEFAULT_GEMINI_MODEL, + deleteProfileFile, + loadProfileFile, + maskSecretForDisplay, + redactSecretValueForDisplay, + sanitizeApiKey, + sanitizeProviderConfigValue, + saveProfileFile, + type ProfileEnv, + type ProfileFile, + type ProviderProfile, +} from '../../utils/providerProfile.js' +import { + getGoalDefaultOpenAIModel, + normalizeRecommendationGoal, + rankOllamaModels, + recommendOllamaModel, + type RecommendationGoal, +} from '../../utils/providerRecommendation.js' +import { hasLocalOllama, listOllamaModels } from '../../utils/providerDiscovery.js' + +type ProviderChoice = 'auto' | ProviderProfile | 'clear' + +type Step = + | { name: 'choose' } + | { name: 'auto-goal' } + | { name: 'auto-detect'; goal: RecommendationGoal } + | { name: 'ollama-detect' } + | { name: 'openai-key'; defaultModel: string } + | { name: 'openai-base'; apiKey: string; defaultModel: string } + | { + name: 'openai-model' + apiKey: string + baseUrl: string | null + defaultModel: string + } + | { name: 'gemini-key' } + | { name: 'gemini-model'; apiKey: string } + | { name: 'codex-check' } + +type CurrentProviderSummary = { + providerLabel: string + modelLabel: string + endpointLabel: string + savedProfileLabel: string +} + +type SavedProfileSummary = { + providerLabel: string + modelLabel: string + endpointLabel: string + credentialLabel?: string +} + +type TextEntryDialogProps = { + title: string + subtitle?: string + resetStateKey?: string + description: React.ReactNode + initialValue: string + placeholder?: string + mask?: string + allowEmpty?: boolean + validate?: (value: string) => string | null + onSubmit: (value: string) => void + onCancel: () => void +} + +type ProviderWizardDefaults = { + openAIModel: string + openAIBaseUrl: string + geminiModel: string +} + +function isEnvTruthy(value: string | undefined): boolean { + if (!value) return false + const normalized = value.trim().toLowerCase() + return normalized !== '' && normalized !== '0' && normalized !== 'false' && normalized !== 'no' +} + +function getSafeDisplayValue( + value: string | undefined, + processEnv: NodeJS.ProcessEnv, + profileEnv?: ProfileEnv, + fallback = '(not set)', +): string { + return ( + redactSecretValueForDisplay(value, processEnv, profileEnv) ?? fallback + ) +} + +export function getProviderWizardDefaults( + processEnv: NodeJS.ProcessEnv = process.env, +): ProviderWizardDefaults { + const safeOpenAIModel = + sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, processEnv) || + 'gpt-4o' + const safeOpenAIBaseUrl = + sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, processEnv) || + DEFAULT_OPENAI_BASE_URL + const safeGeminiModel = + sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, processEnv) || + DEFAULT_GEMINI_MODEL + + return { + openAIModel: safeOpenAIModel, + openAIBaseUrl: safeOpenAIBaseUrl, + geminiModel: safeGeminiModel, + } +} + +export function buildCurrentProviderSummary(options?: { + processEnv?: NodeJS.ProcessEnv + persisted?: ProfileFile | null +}): CurrentProviderSummary { + const processEnv = options?.processEnv ?? process.env + const persisted = options?.persisted ?? loadProfileFile() + const savedProfileLabel = persisted?.profile ?? 'none' + + if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GEMINI)) { + return { + providerLabel: 'Google Gemini', + modelLabel: getSafeDisplayValue( + processEnv.GEMINI_MODEL ?? DEFAULT_GEMINI_MODEL, + processEnv, + ), + endpointLabel: getSafeDisplayValue( + processEnv.GEMINI_BASE_URL ?? DEFAULT_GEMINI_BASE_URL, + processEnv, + ), + savedProfileLabel, + } + } + + if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_OPENAI)) { + const request = resolveProviderRequest({ + model: processEnv.OPENAI_MODEL, + baseUrl: processEnv.OPENAI_BASE_URL, + }) + + let providerLabel = 'OpenAI-compatible' + if (request.transport === 'codex_responses') { + providerLabel = 'Codex' + } else if (request.baseUrl.includes('localhost:11434')) { + providerLabel = 'Ollama' + } else if (request.baseUrl.includes('localhost:1234')) { + providerLabel = 'LM Studio' + } + + return { + providerLabel, + modelLabel: getSafeDisplayValue(request.requestedModel, processEnv), + endpointLabel: getSafeDisplayValue(request.baseUrl, processEnv), + savedProfileLabel, + } + } + + return { + providerLabel: 'Anthropic', + modelLabel: getSafeDisplayValue( + processEnv.ANTHROPIC_MODEL ?? + processEnv.CLAUDE_MODEL ?? + 'claude-sonnet-4-6', + processEnv, + ), + endpointLabel: getSafeDisplayValue( + processEnv.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com', + processEnv, + ), + savedProfileLabel, + } +} + +function buildSavedProfileSummary( + profile: ProviderProfile, + env: ProfileEnv, +): SavedProfileSummary { + switch (profile) { + case 'gemini': + return { + providerLabel: 'Google Gemini', + modelLabel: getSafeDisplayValue( + env.GEMINI_MODEL ?? DEFAULT_GEMINI_MODEL, + process.env, + env, + ), + endpointLabel: getSafeDisplayValue( + env.GEMINI_BASE_URL ?? DEFAULT_GEMINI_BASE_URL, + process.env, + env, + ), + credentialLabel: + maskSecretForDisplay(env.GEMINI_API_KEY) !== undefined + ? 'configured' + : undefined, + } + case 'codex': + return { + providerLabel: 'Codex', + modelLabel: getSafeDisplayValue( + env.OPENAI_MODEL ?? 'codexplan', + process.env, + env, + ), + endpointLabel: getSafeDisplayValue( + env.OPENAI_BASE_URL ?? DEFAULT_CODEX_BASE_URL, + process.env, + env, + ), + credentialLabel: + maskSecretForDisplay(env.CODEX_API_KEY) !== undefined + ? 'configured' + : undefined, + } + case 'ollama': + return { + providerLabel: 'Ollama', + modelLabel: getSafeDisplayValue( + env.OPENAI_MODEL, + process.env, + env, + ), + endpointLabel: getSafeDisplayValue( + env.OPENAI_BASE_URL, + process.env, + env, + ), + } + case 'openai': + default: + return { + providerLabel: 'OpenAI-compatible', + modelLabel: getSafeDisplayValue( + env.OPENAI_MODEL ?? 'gpt-4o', + process.env, + env, + ), + endpointLabel: getSafeDisplayValue( + env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL, + process.env, + env, + ), + credentialLabel: + maskSecretForDisplay(env.OPENAI_API_KEY) !== undefined + ? 'configured' + : undefined, + } + } +} + +export function buildProfileSaveMessage( + profile: ProviderProfile, + env: ProfileEnv, + filePath: string, +): string { + const summary = buildSavedProfileSummary(profile, env) + const lines = [ + `Saved ${summary.providerLabel} profile.`, + `Model: ${summary.modelLabel}`, + `Endpoint: ${summary.endpointLabel}`, + ] + + if (summary.credentialLabel) { + lines.push(`Credentials: ${summary.credentialLabel}`) + } + + lines.push(`Profile: ${filePath}`) + lines.push('Restart OpenClaude to use it.') + + return lines.join('\n') +} + +function buildUsageText(): string { + const summary = buildCurrentProviderSummary() + return [ + 'Usage: /provider', + '', + 'Guided setup for saved provider profiles.', + '', + `Current provider: ${summary.providerLabel}`, + `Current model: ${summary.modelLabel}`, + `Current endpoint: ${summary.endpointLabel}`, + `Saved profile: ${summary.savedProfileLabel}`, + '', + 'Choose Auto, Ollama, OpenAI-compatible, Gemini, or Codex, then save a profile for the next OpenClaude restart.', + ].join('\n') +} + +function finishProfileSave( + onDone: LocalJSXCommandOnDone, + profile: ProviderProfile, + env: ProfileEnv, +): void { + try { + const profileFile = createProfileFile(profile, env) + const filePath = saveProfileFile(profileFile) + onDone(buildProfileSaveMessage(profile, env, filePath), { + display: 'system', + }) + } catch (error) { + const message = error instanceof Error ? error.message : String(error) + onDone(`Failed to save provider profile: ${message}`, { + display: 'system', + }) + } +} + +export function TextEntryDialog({ + title, + subtitle, + resetStateKey, + description, + initialValue, + placeholder, + mask, + allowEmpty = false, + validate, + onSubmit, + onCancel, +}: TextEntryDialogProps): React.ReactNode { + const { columns } = useTerminalSize() + const [value, setValue] = React.useState(initialValue) + const [cursorOffset, setCursorOffset] = React.useState(initialValue.length) + const [error, setError] = React.useState(null) + + React.useLayoutEffect(() => { + setValue(initialValue) + setCursorOffset(initialValue.length) + setError(null) + }, [initialValue, resetStateKey]) + + const inputColumns = Math.max(30, columns - 6) + + const handleSubmit = React.useCallback( + (nextValue: string) => { + if (!allowEmpty && nextValue.trim().length === 0) { + setError('A value is required for this step.') + return + } + + const validationError = validate?.(nextValue) + if (validationError) { + setError(validationError) + return + } + + setError(null) + onSubmit(nextValue) + }, + [allowEmpty, onSubmit, validate], + ) + + return ( + + + {description} + + {error ? {error} : null} + + + ) +} + +function ProviderChooser({ + onChoose, + onCancel, +}: { + onChoose: (value: ProviderChoice) => void + onCancel: () => void +}): React.ReactNode { + const summary = buildCurrentProviderSummary() + const options: OptionWithDescription[] = [ + { + label: 'Auto', + value: 'auto', + description: + 'Prefer local Ollama when available, otherwise guide you into OpenAI-compatible setup', + }, + { + label: 'Ollama', + value: 'ollama', + description: 'Use a local Ollama model with no API key', + }, + { + label: 'OpenAI-compatible', + value: 'openai', + description: + 'GPT-4o, DeepSeek, OpenRouter, Groq, LM Studio, and similar APIs', + }, + { + label: 'Gemini', + value: 'gemini', + description: 'Use a Google Gemini API key', + }, + { + label: 'Codex', + value: 'codex', + description: 'Use existing ChatGPT Codex CLI auth or env credentials', + }, + ] + + if (summary.savedProfileLabel !== 'none') { + options.push({ + label: 'Clear saved profile', + value: 'clear', + description: 'Remove .openclaude-profile.json and return to normal startup', + }) + } + + return ( + + + + Save a provider profile for the next OpenClaude restart without + editing environment variables first. + + + Current model: {summary.modelLabel} + Current endpoint: {summary.endpointLabel} + Saved profile: {summary.savedProfileLabel} + + + + + ) +} + +function AutoRecommendationStep({ + goal, + onBack, + onSave, + onNeedOpenAI, + onCancel, +}: { + goal: RecommendationGoal + onBack: () => void + onSave: (profile: ProviderProfile, env: ProfileEnv) => void + onNeedOpenAI: (defaultModel: string) => void + onCancel: () => void +}): React.ReactNode { + const [status, setStatus] = React.useState< + | { + state: 'loading' + } + | { + state: 'ollama' + model: string + summary: string + } + | { + state: 'openai' + defaultModel: string + } + | { + state: 'error' + message: string + } + >({ state: 'loading' }) + + React.useEffect(() => { + let cancelled = false + + void (async () => { + const defaultModel = getGoalDefaultOpenAIModel(goal) + try { + const ollamaAvailable = await hasLocalOllama() + if (!ollamaAvailable) { + if (!cancelled) { + setStatus({ state: 'openai', defaultModel }) + } + return + } + + const models = await listOllamaModels() + const recommended = recommendOllamaModel(models, goal) + if (!recommended) { + if (!cancelled) { + setStatus({ state: 'openai', defaultModel }) + } + return + } + + if (!cancelled) { + setStatus({ + state: 'ollama', + model: recommended.name, + summary: recommended.summary, + }) + } + } catch (error) { + if (!cancelled) { + setStatus({ + state: 'error', + message: error instanceof Error ? error.message : String(error), + }) + } + } + })() + + return () => { + cancelled = true + } + }, [goal]) + + if (status.state === 'loading') { + return + } + + if (status.state === 'error') { + return ( + + + {status.message} + { + if (value === 'continue') { + onNeedOpenAI(status.defaultModel) + } else if (value === 'back') { + onBack() + } else { + onCancel() + } + }} + onCancel={onCancel} + /> + + + ) + } + + return ( + + + + Auto setup recommends a local Ollama profile for {goal} based on the + models currently available on this machine. + + + Recommended model: {status.model} + {status.summary ? ` · ${status.summary}` : ''} + + (value === 'back' ? onBack() : onCancel())} + onCancel={onCancel} + /> + + + ) + } + + return ( + + + + Pick one of the installed Ollama models to save into a local provider + profile. + + (value === 'back' ? onBack() : onCancel())} + onCancel={onCancel} + /> + + + ) + } + + const options: OptionWithDescription[] = [ + { + label: 'codexplan', + value: 'codexplan', + description: 'GPT-5.4 with higher reasoning on the Codex backend', + }, + { + label: 'codexspark', + value: 'codexspark', + description: 'Faster Codex Spark tool loop profile', + }, + ] + + return ( + + + + Reuse your existing Codex credentials from{' '} + {credentials.sourceDescription} and save a model alias profile. + + { + onChange={(v: string) => { if (v === 'back') { setStep('menu') setErrorMsg(null) @@ -161,7 +167,7 @@ function OnboardGithub(props: { value={patDraft} mask="*" onChange={setPatDraft} - onSubmit={async value => { + onSubmit={async (value: string) => { const t = value.trim() if (!t) { return @@ -172,6 +178,9 @@ function OnboardGithub(props: { setStep('menu') setPatDraft('') }} + columns={80} + cursorOffset={cursorOffset} + onChangeCursorOffset={setCursorOffset} /> ) @@ -202,7 +211,7 @@ function OnboardGithub(props: { + + + + + + + + + + + + ) +} + +function EffortOptionLabel({ level, text, isCurrent }: { level: EffortLevel | 'auto', text: string, isCurrent: boolean }) { + const symbol = level === 'auto' ? '⊘' : effortLevelToSymbol(level as EffortLevel) + const color = isCurrent ? 'remember' : level === 'auto' ? 'subtle' : 'suggestion' + + return ( + <> + {symbol} + {text} + {isCurrent && (current)} + + ) +} diff --git a/src/components/StartupScreen.ts b/src/components/StartupScreen.ts index b20d26c1..e38a4111 100644 --- a/src/components/StartupScreen.ts +++ b/src/components/StartupScreen.ts @@ -97,21 +97,45 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc } if (useOpenAI) { - const model = process.env.OPENAI_MODEL || 'gpt-4o' + const rawModel = process.env.OPENAI_MODEL || 'gpt-4o' const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl) let name = 'OpenAI' - if (/deepseek/i.test(baseUrl) || /deepseek/i.test(model)) name = 'DeepSeek' + if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek' else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter' else if (/together/i.test(baseUrl)) name = 'Together AI' else if (/groq/i.test(baseUrl)) name = 'Groq' - else if (/mistral/i.test(baseUrl) || /mistral/i.test(model)) name = 'Mistral' + else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral' else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI' else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama' else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio' - else if (/llama/i.test(model)) name = 'Meta Llama' + else if (/llama/i.test(rawModel)) name = 'Meta Llama' else if (isLocal) name = 'Local' - return { name, model, baseUrl, isLocal } + + // Resolve model alias to actual model name + reasoning effort + let displayModel = rawModel + const codexAliases: Record = { + codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' }, + 'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' }, + 'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' }, + 'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' }, + codexspark: { model: 'gpt-5.3-codex-spark' }, + 'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' }, + 'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' }, + 'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' }, + 'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' }, + 'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' }, + } + const alias = rawModel.toLowerCase() + if (alias in codexAliases) { + const resolved = codexAliases[alias] + displayModel = resolved.model + if (resolved.reasoningEffort) { + displayModel = `${displayModel} (${resolved.reasoningEffort})` + } + } + + return { name, model: displayModel, baseUrl, isLocal } } // Default: Anthropic diff --git a/src/hooks/useTypeahead.tsx b/src/hooks/useTypeahead.tsx index a269902b..8183a011 100644 --- a/src/hooks/useTypeahead.tsx +++ b/src/hooks/useTypeahead.tsx @@ -1242,17 +1242,25 @@ export function useTypeahead({ const handleAutocompletePrevious = useCallback(() => { setSuggestionsState(prev => ({ ...prev, - selectedSuggestion: prev.selectedSuggestion <= 0 ? suggestions.length - 1 : prev.selectedSuggestion - 1 + selectedSuggestion: prev.suggestions.length === 0 + ? -1 + : prev.selectedSuggestion <= 0 + ? prev.suggestions.length - 1 + : Math.min(prev.selectedSuggestion - 1, prev.suggestions.length - 1) })); - }, [suggestions.length, setSuggestionsState]); + }, [setSuggestionsState]); // Handler for autocomplete:next - selects next suggestion const handleAutocompleteNext = useCallback(() => { setSuggestionsState(prev => ({ ...prev, - selectedSuggestion: prev.selectedSuggestion >= suggestions.length - 1 ? 0 : prev.selectedSuggestion + 1 + selectedSuggestion: prev.suggestions.length === 0 + ? -1 + : prev.selectedSuggestion >= prev.suggestions.length - 1 + ? 0 + : Math.max(0, prev.selectedSuggestion + 1) })); - }, [suggestions.length, setSuggestionsState]); + }, [setSuggestionsState]); // Autocomplete context keybindings - only active when suggestions are visible const autocompleteHandlers = useMemo(() => ({ diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 1f99b7c4..c456e9e4 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -655,9 +655,11 @@ class OpenAIShimStream { class OpenAIShimMessages { private defaultHeaders: Record + private reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh' - constructor(defaultHeaders: Record) { + constructor(defaultHeaders: Record, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh') { this.defaultHeaders = defaultHeaders + this.reasoningEffort = reasoningEffort } create( @@ -667,7 +669,7 @@ class OpenAIShimMessages { const self = this const promise = (async () => { - const request = resolveProviderRequest({ model: params.model }) + const request = resolveProviderRequest({ model: params.model, reasoningEffortOverride: self.reasoningEffort }) const response = await self._doRequest(request, params, options) if (params.stream) { @@ -993,9 +995,11 @@ class OpenAIShimMessages { class OpenAIShimBeta { messages: OpenAIShimMessages + reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh' - constructor(defaultHeaders: Record) { - this.messages = new OpenAIShimMessages(defaultHeaders) + constructor(defaultHeaders: Record, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh') { + this.messages = new OpenAIShimMessages(defaultHeaders, reasoningEffort) + this.reasoningEffort = reasoningEffort } } @@ -1003,6 +1007,7 @@ export function createOpenAIShimClient(options: { defaultHeaders?: Record maxRetries?: number timeout?: number + reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh' }): unknown { hydrateGithubModelsTokenFromSecureStorage() @@ -1025,7 +1030,7 @@ export function createOpenAIShimClient(options: { const beta = new OpenAIShimBeta({ ...(options.defaultHeaders ?? {}), - }) + }, options.reasoningEffort) return { beta, diff --git a/src/services/api/providerConfig.ts b/src/services/api/providerConfig.ts index 90643aa1..1c3097db 100644 --- a/src/services/api/providerConfig.ts +++ b/src/services/api/providerConfig.ts @@ -20,13 +20,43 @@ const CODEX_ALIAS_MODELS: Record< model: 'gpt-5.4', reasoningEffort: 'high', }, + 'gpt-5.4': { + model: 'gpt-5.4', + reasoningEffort: 'high', + }, + 'gpt-5.3-codex': { + model: 'gpt-5.3-codex', + reasoningEffort: 'high', + }, + 'gpt-5.3-codex-spark': { + model: 'gpt-5.3-codex-spark', + }, codexspark: { model: 'gpt-5.3-codex-spark', }, + 'gpt-5.2-codex': { + model: 'gpt-5.2-codex', + reasoningEffort: 'high', + }, + 'gpt-5.1-codex-max': { + model: 'gpt-5.1-codex-max', + reasoningEffort: 'high', + }, + 'gpt-5.1-codex-mini': { + model: 'gpt-5.1-codex-mini', + }, + 'gpt-5.4-mini': { + model: 'gpt-5.4-mini', + reasoningEffort: 'medium', + }, + 'gpt-5.2': { + model: 'gpt-5.2', + reasoningEffort: 'medium', + }, } as const type CodexAlias = keyof typeof CODEX_ALIAS_MODELS -type ReasoningEffort = 'low' | 'medium' | 'high' +type ReasoningEffort = 'low' | 'medium' | 'high' | 'xhigh' export type ProviderTransport = 'chat_completions' | 'codex_responses' @@ -102,7 +132,7 @@ function decodeJwtPayload(token: string): Record | undefined { function parseReasoningEffort(value: string | undefined): ReasoningEffort | undefined { if (!value) return undefined const normalized = value.trim().toLowerCase() - if (normalized === 'low' || normalized === 'medium' || normalized === 'high') { + if (normalized === 'low' || normalized === 'medium' || normalized === 'high' || normalized === 'xhigh') { return normalized } return undefined @@ -193,6 +223,7 @@ export function resolveProviderRequest(options?: { model?: string baseUrl?: string fallbackModel?: string + reasoningEffortOverride?: ReasoningEffort }): ResolvedProviderRequest { const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) const requestedModel = @@ -217,6 +248,11 @@ export function resolveProviderRequest(options?: { ? normalizeGithubModelsApiModel(requestedModel) : descriptor.baseModel + const reasoning = options?.reasoningEffortOverride + ? { effort: options.reasoningEffortOverride } + : descriptor.reasoning + + return { transport, requestedModel, @@ -227,7 +263,7 @@ export function resolveProviderRequest(options?: { ? DEFAULT_CODEX_BASE_URL : DEFAULT_OPENAI_BASE_URL) ).replace(/\/+$/, ''), - reasoning: descriptor.reasoning, + reasoning, } } @@ -336,3 +372,11 @@ export function resolveCodexApiCredentials( source: 'auth.json', } } + +export function getReasoningEffortForModel(model: string): ReasoningEffort | undefined { + const normalized = model.trim().toLowerCase() + const base = normalized.split('?', 1)[0] ?? normalized + const alias = base as CodexAlias + const aliasConfig = CODEX_ALIAS_MODELS[alias] + return aliasConfig?.reasoningEffort +} diff --git a/src/utils/effort.ts b/src/utils/effort.ts index cafcf3de..2a391ee6 100644 --- a/src/utils/effort.ts +++ b/src/utils/effort.ts @@ -17,6 +17,14 @@ export const EFFORT_LEVELS = [ 'max', ] as const satisfies readonly EffortLevel[] +export const OPENAI_EFFORT_LEVELS = [ + 'low', + 'medium', + 'high', + 'xhigh', +] as const + +export type OpenAIEffortLevel = typeof OPENAI_EFFORT_LEVELS[number] export type EffortValue = EffortLevel | number // @[MODEL LAUNCH]: Add the new model to the allowlist if it supports the effort parameter. @@ -68,6 +76,46 @@ export function isEffortLevel(value: string): value is EffortLevel { return (EFFORT_LEVELS as readonly string[]).includes(value) } +export function isOpenAIEffortLevel(value: string): value is OpenAIEffortLevel { + return (OPENAI_EFFORT_LEVELS as readonly string[]).includes(value) +} + +export function modelUsesOpenAIEffort(model: string): boolean { + const provider = getAPIProvider() + return provider === 'openai' || provider === 'codex' +} + +export function getAvailableEffortLevels(model: string): EffortLevel[] | OpenAIEffortLevel[] { + if (modelUsesOpenAIEffort(model)) { + return [...OPENAI_EFFORT_LEVELS] as OpenAIEffortLevel[] + } + const levels: EffortLevel[] = ['low', 'medium', 'high'] + if (modelSupportsMaxEffort(model)) { + levels.push('max') + } + return levels +} + +export function getEffortLevelLabel(level: EffortLevel | OpenAIEffortLevel): string { + if (level === 'xhigh') return 'Extra High' + if (level === 'max') return 'Max' + return capitalize(level) +} + +export function openAIEffortToStandard(level: OpenAIEffortLevel): EffortLevel { + if (level === 'xhigh') return 'max' + return level +} + +export function standardEffortToOpenAI(level: EffortLevel): OpenAIEffortLevel { + if (level === 'max') return 'xhigh' + return level as OpenAIEffortLevel +} + +function capitalize(s: string): string { + return s.charAt(0).toUpperCase() + s.slice(1) +} + export function parseEffortValue(value: unknown): EffortValue | undefined { if (value === undefined || value === null || value === '') { return undefined @@ -221,7 +269,7 @@ export function convertEffortValueToLevel(value: EffortValue): EffortLevel { * @param level The effort level to describe * @returns Human-readable description */ -export function getEffortLevelDescription(level: EffortLevel): string { +export function getEffortLevelDescription(level: EffortLevel | OpenAIEffortLevel): string { switch (level) { case 'low': return 'Quick, straightforward implementation with minimal overhead' @@ -231,6 +279,8 @@ export function getEffortLevelDescription(level: EffortLevel): string { return 'Comprehensive implementation with extensive testing and documentation' case 'max': return 'Maximum capability with deepest reasoning (Opus 4.6 only)' + case 'xhigh': + return 'Extra high reasoning effort for complex tasks (OpenAI/Codex)' } } diff --git a/src/utils/model/aliases.ts b/src/utils/model/aliases.ts index 91514da1..75ae388c 100644 --- a/src/utils/model/aliases.ts +++ b/src/utils/model/aliases.ts @@ -6,8 +6,6 @@ export const MODEL_ALIASES = [ 'sonnet[1m]', 'opus[1m]', 'opusplan', - 'codexplan', - 'codexspark', ] as const export type ModelAlias = (typeof MODEL_ALIASES)[number] diff --git a/src/utils/model/model.ts b/src/utils/model/model.ts index 6c81a8ef..97a74d95 100644 --- a/src/utils/model/model.ts +++ b/src/utils/model/model.ts @@ -123,6 +123,10 @@ export function getDefaultOpusModel(): ModelName { if (getAPIProvider() === 'openai') { return process.env.OPENAI_MODEL || 'gpt-4o' } + // Codex provider: use user-specified model or default to gpt-5.4 + if (getAPIProvider() === 'codex') { + return process.env.OPENAI_MODEL || 'gpt-5.4' + } // 3P providers (Bedrock, Vertex, Foundry) — kept as a separate branch // even when values match, since 3P availability lags firstParty and // these will diverge again at the next model launch. @@ -145,6 +149,10 @@ export function getDefaultSonnetModel(): ModelName { if (getAPIProvider() === 'openai') { return process.env.OPENAI_MODEL || 'gpt-4o' } + // Codex provider + if (getAPIProvider() === 'codex') { + return process.env.OPENAI_MODEL || 'gpt-5.4' + } // Default to Sonnet 4.5 for 3P since they may not have 4.6 yet if (getAPIProvider() !== 'firstParty') { return getModelStrings().sonnet45 @@ -165,6 +173,10 @@ export function getDefaultHaikuModel(): ModelName { if (getAPIProvider() === 'openai') { return process.env.OPENAI_MODEL || 'gpt-4o-mini' } + // Codex provider + if (getAPIProvider() === 'codex') { + return process.env.OPENAI_MODEL || 'gpt-5.4' + } // Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex) return getModelStrings().haiku45 @@ -217,6 +229,10 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias { if (getAPIProvider() === 'openai') { return process.env.OPENAI_MODEL || 'gpt-4o' } + // Codex provider: always use the configured Codex model (default gpt-5.4) + if (getAPIProvider() === 'codex') { + return process.env.OPENAI_MODEL || 'gpt-5.4' + } // Ants default to defaultModel from flag config, or Opus 1M if not configured if (process.env.USER_TYPE === 'ant') { @@ -343,12 +359,6 @@ export function renderDefaultModelSetting( if (setting === 'opusplan') { return 'Opus 4.6 in plan mode, else Sonnet 4.6' } - if (setting === 'codexplan') { - return 'Codex Plan (GPT-5.4 high reasoning)' - } - if (setting === 'codexspark') { - return 'Codex Spark (GPT-5.3 Codex Spark)' - } return renderModelName(parseUserSpecifiedModel(setting)) } @@ -383,11 +393,12 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string { if (setting === 'opusplan') { return 'Opus Plan' } + // Handle Codex models - show actual model name + resolved model if (setting === 'codexplan') { - return 'Codex Plan' + return 'codexplan (gpt-5.4)' } if (setting === 'codexspark') { - return 'Codex Spark' + return 'codexspark (gpt-5.3-codex-spark)' } if (isModelAlias(setting)) { return capitalize(setting) @@ -401,8 +412,8 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string { * if the model is not recognized as a public model. */ export function getPublicModelDisplayName(model: ModelName): string | null { - // For OpenAI/Gemini providers, show the actual model name not a Claude alias - if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini') { + // For OpenAI/Gemini/Codex providers, show the actual model name not a Claude alias + if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex') { return null } switch (model) { @@ -517,10 +528,6 @@ export function parseUserSpecifiedModel( if (isModelAlias(modelString)) { switch (modelString) { - case 'codexplan': - return modelInputTrimmed - case 'codexspark': - return modelInputTrimmed case 'opusplan': return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode case 'sonnet': @@ -535,6 +542,14 @@ export function parseUserSpecifiedModel( } } + // Handle Codex aliases - map to actual model names + if (modelString === 'codexplan') { + return 'gpt-5.4' + } + if (modelString === 'codexspark') { + return 'gpt-5.3-codex-spark' + } + // Opus 4/4.1 are no longer available on the first-party API (same as // Claude.ai) — silently remap to the current Opus default. The 'opus' // alias already resolves to 4.6, so the only users on these explicit diff --git a/src/utils/model/modelOptions.ts b/src/utils/model/modelOptions.ts index 0c464d6a..84371c84 100644 --- a/src/utils/model/modelOptions.ts +++ b/src/utils/model/modelOptions.ts @@ -268,20 +268,65 @@ function getOpusPlanOption(): ModelOption { function getCodexPlanOption(): ModelOption { return { - value: 'codexplan', - label: 'Codex Plan', + value: 'gpt-5.4', + label: 'gpt-5.4', description: 'GPT-5.4 on the Codex backend with high reasoning', } } function getCodexSparkOption(): ModelOption { return { - value: 'codexspark', - label: 'Codex Spark', + value: 'gpt-5.3-codex-spark', + label: 'gpt-5.3-codex-spark', description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops', } } +function getCodexModelOptions(): ModelOption[] { + return [ + { + value: 'gpt-5.4', + label: 'gpt-5.4', + description: 'GPT-5.4 with high reasoning', + }, + { + value: 'gpt-5.3-codex', + label: 'gpt-5.3-codex', + description: 'GPT-5.3 Codex with high reasoning', + }, + { + value: 'gpt-5.3-codex-spark', + label: 'gpt-5.3-codex-spark', + description: 'GPT-5.3 Codex Spark for fast tool loops', + }, + { + value: 'codexspark', + label: 'codexspark', + description: 'GPT-5.3 Codex Spark alias for fast tool loops', + }, + { + value: 'gpt-5.2-codex', + label: 'gpt-5.2-codex', + description: 'GPT-5.2 Codex with high reasoning', + }, + { + value: 'gpt-5.1-codex-max', + label: 'gpt-5.1-codex-max', + description: 'GPT-5.1 Codex Max for deep reasoning', + }, + { + value: 'gpt-5.1-codex-mini', + label: 'gpt-5.1-codex-mini', + description: 'GPT-5.1 Codex Mini - faster, cheaper', + }, + { + value: 'gpt-5.4-mini', + label: 'gpt-5.4-mini', + description: 'GPT-5.4 Mini - faster, cheaper', + }, + ] +} + // @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model. // Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list. function getModelOptionsBase(fastMode = false): ModelOption[] { @@ -360,8 +405,9 @@ function getModelOptionsBase(fastMode = false): ModelOption[] { // PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1 const payg3pOptions = [getDefaultOptionForUser(fastMode)] - if (getAPIProvider() === 'openai') { - payg3pOptions.push(getCodexPlanOption(), getCodexSparkOption()) + // Add Codex models for openai and codex providers + if (getAPIProvider() === 'openai' || getAPIProvider() === 'codex') { + payg3pOptions.push(...getCodexModelOptions()) } const customSonnet = getCustomSonnetOption() @@ -517,9 +563,9 @@ export function getModelOptions(fastMode = false): ModelOption[] { return filterModelOptionsByAllowlist(options) } else if (customModel === 'opusplan') { return filterModelOptionsByAllowlist([...options, getOpusPlanOption()]) - } else if (customModel === 'codexplan') { + } else if (customModel === 'gpt-5.4') { return filterModelOptionsByAllowlist([...options, getCodexPlanOption()]) - } else if (customModel === 'codexspark') { + } else if (customModel === 'gpt-5.3-codex-spark') { return filterModelOptionsByAllowlist([...options, getCodexSparkOption()]) } else if (customModel === 'opus' && getAPIProvider() === 'firstParty') { return filterModelOptionsByAllowlist([ @@ -554,11 +600,23 @@ export function getModelOptions(fastMode = false): ModelOption[] { */ function filterModelOptionsByAllowlist(options: ModelOption[]): ModelOption[] { const settings = getSettings_DEPRECATED() || {} - if (!settings.availableModels) { - return options // No restrictions - } - return options.filter( + const filtered = !settings.availableModels + ? options // No restrictions + : options.filter( opt => opt.value === null || (opt.value !== null && isModelAllowed(opt.value)), ) + + // Select state uses option values as identity keys. If two entries share the + // same value (e.g. provider-specific aliases collapsing to one model ID), + // navigation/focus can become inconsistent and appear as duplicate rendering. + const seen = new Set() + return filtered.filter(opt => { + const key = String(opt.value) + if (seen.has(key)) { + return false + } + seen.add(key) + return true + }) } diff --git a/src/utils/model/modelStrings.ts b/src/utils/model/modelStrings.ts index 5b7be104..4d8399d1 100644 --- a/src/utils/model/modelStrings.ts +++ b/src/utils/model/modelStrings.ts @@ -23,9 +23,12 @@ export type ModelStrings = Record const MODEL_KEYS = Object.keys(ALL_MODEL_CONFIGS) as ModelKey[] function getBuiltinModelStrings(provider: APIProvider): ModelStrings { + // Codex piggybacks on the OpenAI provider transport for Anthropic tier aliases. + // Reuse OpenAI mappings so model string lookups never return undefined. + const providerKey = provider === 'codex' ? 'openai' : provider const out = {} as ModelStrings for (const key of MODEL_KEYS) { - out[key] = ALL_MODEL_CONFIGS[key][provider] + out[key] = ALL_MODEL_CONFIGS[key][providerKey] } return out } diff --git a/src/utils/model/providers.ts b/src/utils/model/providers.ts index 30a1f1c9..6b6d627e 100644 --- a/src/utils/model/providers.ts +++ b/src/utils/model/providers.ts @@ -9,6 +9,7 @@ export type APIProvider = | 'openai' | 'gemini' | 'github' + | 'codex' export function getAPIProvider(): APIProvider { return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) @@ -16,7 +17,9 @@ export function getAPIProvider(): APIProvider { : isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ? 'github' : isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) - ? 'openai' + ? isCodexModel() + ? 'codex' + : 'openai' : isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) ? 'bedrock' : isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) @@ -29,6 +32,19 @@ export function getAPIProvider(): APIProvider { export function usesAnthropicAccountFlow(): boolean { return getAPIProvider() === 'firstParty' } +function isCodexModel(): boolean { + const model = (process.env.OPENAI_MODEL || '').toLowerCase() + return ( + model === 'codexplan' || + model === 'codexspark' || + model === 'gpt-5.4' || + model === 'gpt-5.3-codex' || + model === 'gpt-5.3-codex-spark' || + model === 'gpt-5.2-codex' || + model === 'gpt-5.1-codex-max' || + model === 'gpt-5.1-codex-mini' + ) +} export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS { return getAPIProvider() as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS diff --git a/src/utils/status.tsx b/src/utils/status.tsx index bc159cdb..97e8c742 100644 --- a/src/utils/status.tsx +++ b/src/utils/status.tsx @@ -12,6 +12,7 @@ import { formatNumber } from './format.js'; import { getIdeClientName, type IDEExtensionInstallationStatus, isJetBrainsIde, toIDEDisplayName } from './ide.js'; import { getClaudeAiUserDefaultModelDescription, modelDisplayString } from './model/model.js'; import { getAPIProvider } from './model/providers.js'; +import { resolveProviderRequest } from '../services/api/providerConfig.js'; import { getMTLSConfig } from './mtls.js'; import { checkInstall } from './nativeInstaller/index.js'; import { getProxyUrl } from './proxy.js'; @@ -246,6 +247,7 @@ export function buildAPIProviderProperties(): Property[] { vertex: 'Google Vertex AI', foundry: 'Microsoft Foundry', openai: 'OpenAI-compatible', + codex: 'Codex', gemini: 'Google Gemini', }[apiProvider]; properties.push({ @@ -332,9 +334,46 @@ export function buildAPIProviderProperties(): Property[] { } const openaiModel = process.env.OPENAI_MODEL; if (openaiModel) { + // Build display model string with resolved model + reasoning effort + let modelDisplay = openaiModel; + const resolvedModel = resolveProviderRequest({ model: openaiModel }).resolvedModel; + const reasoningEffort = resolveProviderRequest({ model: openaiModel }).reasoning?.effort; + if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) { + // Show resolved model name + modelDisplay = resolvedModel; + } + if (reasoningEffort) { + modelDisplay = `${modelDisplay} (${reasoningEffort})`; + } properties.push({ label: 'Model', - value: openaiModel + value: modelDisplay + }); + } + } else if (apiProvider === 'codex') { + const codexBaseUrl = process.env.OPENAI_BASE_URL; + if (codexBaseUrl) { + properties.push({ + label: 'Codex base URL', + value: codexBaseUrl + }); + } + const openaiModel = process.env.OPENAI_MODEL; + if (openaiModel) { + // Build display model string with resolved model + reasoning effort + let modelDisplay = openaiModel; + const resolvedModel = resolveProviderRequest({ model: openaiModel }).resolvedModel; + const reasoningEffort = resolveProviderRequest({ model: openaiModel }).reasoning?.effort; + if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) { + // Show resolved model name + modelDisplay = resolvedModel; + } + if (reasoningEffort) { + modelDisplay = `${modelDisplay} (${reasoningEffort})`; + } + properties.push({ + label: 'Model', + value: modelDisplay }); } } else if (apiProvider === 'gemini') { diff --git a/src/utils/suggestions/commandSuggestions.ts b/src/utils/suggestions/commandSuggestions.ts index 4a90db55..2f83ae6f 100644 --- a/src/utils/suggestions/commandSuggestions.ts +++ b/src/utils/suggestions/commandSuggestions.ts @@ -286,6 +286,25 @@ function createCommandSuggestionItem( } } +/** + * Ensure suggestion IDs are unique for React keys and selection logic. + * If duplicates exist, append a stable numeric suffix to subsequent entries. + */ +function ensureUniqueSuggestionIds(items: SuggestionItem[]): SuggestionItem[] { + const counts = new Map() + return items.map(item => { + const seen = counts.get(item.id) ?? 0 + counts.set(item.id, seen + 1) + if (seen === 0) { + return item + } + return { + ...item, + id: `${item.id}#${seen + 1}`, + } + }) +} + /** * Generate command suggestions based on input */ @@ -369,14 +388,14 @@ export function generateCommandSuggestions( // Combine with built-in commands prioritized after recently used, // so they remain visible even when many skills are installed - return [ + return ensureUniqueSuggestionIds([ ...recentlyUsed, ...builtinCommands, ...userCommands, ...projectCommands, ...policyCommands, ...otherCommands, - ].map(cmd => createCommandSuggestionItem(cmd)) + ].map(cmd => createCommandSuggestionItem(cmd))) } // The Fuse index filters isHidden at build time and is keyed on the @@ -491,10 +510,13 @@ export function generateCommandSuggestions( if (hiddenExact) { const hiddenId = getCommandId(hiddenExact) if (!fuseSuggestions.some(s => s.id === hiddenId)) { - return [createCommandSuggestionItem(hiddenExact), ...fuseSuggestions] + return ensureUniqueSuggestionIds([ + createCommandSuggestionItem(hiddenExact), + ...fuseSuggestions, + ]) } } - return fuseSuggestions + return ensureUniqueSuggestionIds(fuseSuggestions) } /** From aac326fa3f032494800a79e64d94ebfb1509e5c5 Mon Sep 17 00:00:00 2001 From: gnanam1990 Date: Thu, 2 Apr 2026 18:09:04 +0530 Subject: [PATCH 19/25] docs(setup): add beginner and advanced guides Split the setup documentation into a simple beginner path and a separate advanced path. Add OS-specific quick starts for Windows and macOS/Linux so non-technical users can copy and paste the right commands without sorting through Bun and source-build instructions. --- README.md | 291 +++++++--------------------------- docs/advanced-setup.md | 234 +++++++++++++++++++++++++++ docs/non-technical-setup.md | 116 ++++++++++++++ docs/quick-start-mac-linux.md | 108 +++++++++++++ docs/quick-start-windows.md | 108 +++++++++++++ 5 files changed, 621 insertions(+), 236 deletions(-) create mode 100644 docs/advanced-setup.md create mode 100644 docs/non-technical-setup.md create mode 100644 docs/quick-start-mac-linux.md create mode 100644 docs/quick-start-windows.md diff --git a/README.md b/README.md index 5c94ed80..f337fe41 100644 --- a/README.md +++ b/README.md @@ -8,277 +8,96 @@ All of Claude Code's tools work — bash, file read/write/edit, grep, glob, agen --- -## Install +## Start Here -### Option A: npm (recommended) +If you are new to terminals or just want the easiest path, start with the beginner guides: + +- [Non-Technical Setup](docs/non-technical-setup.md) +- [Windows Quick Start](docs/quick-start-windows.md) +- [macOS / Linux Quick Start](docs/quick-start-mac-linux.md) + +If you want source builds, Bun workflows, profile launchers, or full provider examples, use: + +- [Advanced Setup](docs/advanced-setup.md) + +--- + +## Beginner Install + +For most users, install the npm package: ```bash npm install -g @gitlawb/openclaude ``` -### Option B: From source (requires Bun) - -Use Bun `1.3.11` or newer for source builds on Windows. Older Bun versions such as `1.3.4` can fail with a large batch of unresolved module errors during `bun run build`. +The package name is `@gitlawb/openclaude`, but the command you run is: ```bash -# Clone from gitlawb -git clone https://node.gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude.git -cd openclaude - -# Install dependencies -bun install - -# Build -bun run build - -# Link globally (optional) -npm link -``` - -### Option C: Run directly with Bun (no build step) - -```bash -git clone https://node.gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude.git -cd openclaude -bun install -bun run dev +openclaude ``` --- -## Quick Start +## Fastest Setup -### 1. Set 3 environment variables +### Windows PowerShell + +```powershell +npm install -g @gitlawb/openclaude + +$env:CLAUDE_CODE_USE_OPENAI="1" +$env:OPENAI_API_KEY="sk-your-key-here" +$env:OPENAI_MODEL="gpt-4o" + +openclaude +``` + +### macOS / Linux ```bash +npm install -g @gitlawb/openclaude + export CLAUDE_CODE_USE_OPENAI=1 export OPENAI_API_KEY=sk-your-key-here export OPENAI_MODEL=gpt-4o -``` -### 2. Run it - -```bash -# If installed via npm openclaude - -# If built from source -bun run dev -# or after build: -node dist/cli.mjs ``` -That's it. The tool system, streaming, file editing, multi-step reasoning — everything works through the model you picked. - -The npm package name is `@gitlawb/openclaude`, but the installed CLI command is still `openclaude`. +That is enough to start with OpenAI. --- -## Provider Examples +## Choose Your Guide + +### Beginner + +- Want the easiest setup with copy-paste steps: [Non-Technical Setup](docs/non-technical-setup.md) +- On Windows: [Windows Quick Start](docs/quick-start-windows.md) +- On macOS or Linux: [macOS / Linux Quick Start](docs/quick-start-mac-linux.md) + +### Advanced + +- Want source builds, Bun, local profiles, and runtime checks: [Advanced Setup](docs/advanced-setup.md) + +--- + +## Common Beginner Choices ### OpenAI -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_API_KEY=sk-... -export OPENAI_MODEL=gpt-4o -``` +Best default if you already have an OpenAI API key. -### Codex via ChatGPT auth +### Ollama -`codexplan` maps to GPT-5.4 on the Codex backend with high reasoning. -`codexspark` maps to GPT-5.3 Codex Spark for faster loops. +Best if you want to run models locally on your own machine. -If you already use the Codex CLI, OpenClaude will read `~/.codex/auth.json` -automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or -override the token directly with `CODEX_API_KEY`. +### Codex -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_MODEL=codexplan - -# optional if you do not already have ~/.codex/auth.json -export CODEX_API_KEY=... - -openclaude -``` - -### DeepSeek - -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_API_KEY=sk-... -export OPENAI_BASE_URL=https://api.deepseek.com/v1 -export OPENAI_MODEL=deepseek-chat -``` - -### Google Gemini (via OpenRouter) - -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_API_KEY=sk-or-... -export OPENAI_BASE_URL=https://openrouter.ai/api/v1 -export OPENAI_MODEL=google/gemini-2.0-flash -``` - -### Ollama (local, free) - -```bash -ollama pull llama3.3:70b - -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_BASE_URL=http://localhost:11434/v1 -export OPENAI_MODEL=llama3.3:70b -# no API key needed for local models -``` - -### LM Studio (local) - -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_BASE_URL=http://localhost:1234/v1 -export OPENAI_MODEL=your-model-name -``` - -### Together AI - -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_API_KEY=... -export OPENAI_BASE_URL=https://api.together.xyz/v1 -export OPENAI_MODEL=meta-llama/Llama-3.3-70B-Instruct-Turbo -``` - -### Groq - -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_API_KEY=gsk_... -export OPENAI_BASE_URL=https://api.groq.com/openai/v1 -export OPENAI_MODEL=llama-3.3-70b-versatile -``` - -### Mistral - -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_API_KEY=... -export OPENAI_BASE_URL=https://api.mistral.ai/v1 -export OPENAI_MODEL=mistral-large-latest -``` - -### Azure OpenAI - -```bash -export CLAUDE_CODE_USE_OPENAI=1 -export OPENAI_API_KEY=your-azure-key -export OPENAI_BASE_URL=https://your-resource.openai.azure.com/openai/deployments/your-deployment/v1 -export OPENAI_MODEL=gpt-4o -``` +Best if you already use the Codex CLI or ChatGPT Codex backend. --- -## Environment Variables - -| Variable | Required | Description | -|----------|----------|-------------| -| `CLAUDE_CODE_USE_OPENAI` | Yes | Set to `1` to enable the OpenAI provider | -| `OPENAI_API_KEY` | Yes* | Your API key (*not needed for local models like Ollama) | -| `OPENAI_MODEL` | Yes | Model name (e.g. `gpt-4o`, `deepseek-chat`, `llama3.3:70b`) | -| `OPENAI_BASE_URL` | No | API endpoint (defaults to `https://api.openai.com/v1`) | -| `CODEX_API_KEY` | Codex only | Codex/ChatGPT access token override | -| `CODEX_AUTH_JSON_PATH` | Codex only | Path to a Codex CLI `auth.json` file | -| `CODEX_HOME` | Codex only | Alternative Codex home directory (`auth.json` will be read from here) | -| `OPENCLAUDE_DISABLE_CO_AUTHORED_BY` | No | Set to `1` to suppress the default `Co-Authored-By` trailer in generated git commit messages | - -You can also use `ANTHROPIC_MODEL` to override the model name. `OPENAI_MODEL` takes priority. - -OpenClaude PR bodies use OpenClaude branding by default. `OPENCLAUDE_DISABLE_CO_AUTHORED_BY` only affects the commit trailer, not PR attribution text. - ---- - -## Runtime Hardening - -Use these commands to keep the CLI stable and catch environment mistakes early: - -```bash -# quick startup sanity check -bun run smoke - -# validate provider env + reachability -bun run doctor:runtime - -# print machine-readable runtime diagnostics -bun run doctor:runtime:json - -# persist a diagnostics report to reports/doctor-runtime.json -bun run doctor:report - -# full local hardening check (smoke + runtime doctor) -bun run hardening:check - -# strict hardening (includes project-wide typecheck) -bun run hardening:strict -``` - -Notes: -- `doctor:runtime` fails fast if `CLAUDE_CODE_USE_OPENAI=1` with a placeholder key (`SUA_CHAVE`) or a missing key for non-local providers. -- Local providers (for example `http://localhost:11434/v1`) can run without `OPENAI_API_KEY`. -- Codex profiles validate `CODEX_API_KEY` or the Codex CLI auth file and probe `POST /responses` instead of `GET /models`. - -### Provider Launch Profiles - -Use profile launchers to avoid repeated environment setup: - -```bash -# one-time profile bootstrap (prefer viable local Ollama, otherwise OpenAI) -bun run profile:init - -# preview the best provider/model for your goal -bun run profile:recommend -- --goal coding --benchmark - -# auto-apply the best available local/openai provider/model for your goal -bun run profile:auto -- --goal latency - -# codex bootstrap (defaults to codexplan and ~/.codex/auth.json) -bun run profile:codex - -# openai bootstrap with explicit key -bun run profile:init -- --provider openai --api-key sk-... - -# ollama bootstrap with custom model -bun run profile:init -- --provider ollama --model llama3.1:8b - -# ollama bootstrap with intelligent model auto-selection -bun run profile:init -- --provider ollama --goal coding - -# codex bootstrap with a fast model alias -bun run profile:init -- --provider codex --model codexspark - -# launch using persisted profile (.openclaude-profile.json) -bun run dev:profile - -# codex profile (uses CODEX_API_KEY or ~/.codex/auth.json) -bun run dev:codex - -# OpenAI profile (requires OPENAI_API_KEY in your shell) -bun run dev:openai - -# Ollama profile (defaults: localhost:11434, llama3.1:8b) -bun run dev:ollama -``` - -`profile:recommend` ranks installed Ollama models for `latency`, `balanced`, or `coding`, and `profile:auto` can persist the recommendation directly. -If no profile exists yet, `dev:profile` now uses the same goal-aware defaults when picking the initial model. - -Use `--provider ollama` when you want a local-only path. Auto mode falls back to OpenAI when no viable local chat model is installed. -Goal-based Ollama selection only recommends among models that are already installed and reachable from Ollama. - -Use `profile:codex` or `--provider codex` when you want the ChatGPT Codex backend. - -`dev:openai`, `dev:ollama`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass. -For `dev:ollama`, make sure Ollama is running locally before launch. - --- ## What Works diff --git a/docs/advanced-setup.md b/docs/advanced-setup.md new file mode 100644 index 00000000..f2e75d59 --- /dev/null +++ b/docs/advanced-setup.md @@ -0,0 +1,234 @@ +# OpenClaude Advanced Setup + +This guide is for users who want source builds, Bun workflows, provider profiles, diagnostics, or more control over runtime behavior. + +## Install Options + +### Option A: npm + +```bash +npm install -g @gitlawb/openclaude +``` + +### Option B: From source with Bun + +Use Bun `1.3.11` or newer for source builds on Windows. Older Bun versions can fail during `bun run build`. + +```bash +git clone https://node.gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude.git +cd openclaude + +bun install +bun run build +npm link +``` + +### Option C: Run directly with Bun + +```bash +git clone https://node.gitlawb.com/z6MkqDnb7Siv3Cwj7pGJq4T5EsUisECqR8KpnDLwcaZq5TPr/openclaude.git +cd openclaude + +bun install +bun run dev +``` + +## Provider Examples + +### OpenAI + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=sk-... +export OPENAI_MODEL=gpt-4o +``` + +### Codex via ChatGPT auth + +`codexplan` maps to GPT-5.4 on the Codex backend with high reasoning. +`codexspark` maps to GPT-5.3 Codex Spark for faster loops. + +If you already use the Codex CLI, OpenClaude reads `~/.codex/auth.json` automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or override the token directly with `CODEX_API_KEY`. + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_MODEL=codexplan + +# optional if you do not already have ~/.codex/auth.json +export CODEX_API_KEY=... + +openclaude +``` + +### DeepSeek + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=sk-... +export OPENAI_BASE_URL=https://api.deepseek.com/v1 +export OPENAI_MODEL=deepseek-chat +``` + +### Google Gemini via OpenRouter + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=sk-or-... +export OPENAI_BASE_URL=https://openrouter.ai/api/v1 +export OPENAI_MODEL=google/gemini-2.0-flash-001 +``` + +OpenRouter model availability changes over time. If a model stops working, try another current OpenRouter model before assuming the integration is broken. + +### Ollama + +```bash +ollama pull llama3.3:70b + +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_BASE_URL=http://localhost:11434/v1 +export OPENAI_MODEL=llama3.3:70b +``` + +### LM Studio + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_BASE_URL=http://localhost:1234/v1 +export OPENAI_MODEL=your-model-name +``` + +### Together AI + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=... +export OPENAI_BASE_URL=https://api.together.xyz/v1 +export OPENAI_MODEL=meta-llama/Llama-3.3-70B-Instruct-Turbo +``` + +### Groq + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=gsk_... +export OPENAI_BASE_URL=https://api.groq.com/openai/v1 +export OPENAI_MODEL=llama-3.3-70b-versatile +``` + +### Mistral + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=... +export OPENAI_BASE_URL=https://api.mistral.ai/v1 +export OPENAI_MODEL=mistral-large-latest +``` + +### Azure OpenAI + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=your-azure-key +export OPENAI_BASE_URL=https://your-resource.openai.azure.com/openai/deployments/your-deployment/v1 +export OPENAI_MODEL=gpt-4o +``` + +## Environment Variables + +| Variable | Required | Description | +|----------|----------|-------------| +| `CLAUDE_CODE_USE_OPENAI` | Yes | Set to `1` to enable the OpenAI provider | +| `OPENAI_API_KEY` | Yes* | Your API key (`*` not needed for local models like Ollama) | +| `OPENAI_MODEL` | Yes | Model name such as `gpt-4o`, `deepseek-chat`, or `llama3.3:70b` | +| `OPENAI_BASE_URL` | No | API endpoint, defaulting to `https://api.openai.com/v1` | +| `CODEX_API_KEY` | Codex only | Codex or ChatGPT access token override | +| `CODEX_AUTH_JSON_PATH` | Codex only | Path to a Codex CLI `auth.json` file | +| `CODEX_HOME` | Codex only | Alternative Codex home directory | +| `OPENCLAUDE_DISABLE_CO_AUTHORED_BY` | No | Suppress the default `Co-Authored-By` trailer in generated git commits | + +You can also use `ANTHROPIC_MODEL` to override the model name. `OPENAI_MODEL` takes priority. + +## Runtime Hardening + +Use these commands to validate your setup and catch mistakes early: + +```bash +# quick startup sanity check +bun run smoke + +# validate provider env + reachability +bun run doctor:runtime + +# print machine-readable runtime diagnostics +bun run doctor:runtime:json + +# persist a diagnostics report to reports/doctor-runtime.json +bun run doctor:report + +# full local hardening check (smoke + runtime doctor) +bun run hardening:check + +# strict hardening (includes project-wide typecheck) +bun run hardening:strict +``` + +Notes: + +- `doctor:runtime` fails fast if `CLAUDE_CODE_USE_OPENAI=1` with a placeholder key or a missing key for non-local providers. +- Local providers such as `http://localhost:11434/v1` can run without `OPENAI_API_KEY`. +- Codex profiles validate `CODEX_API_KEY` or the Codex CLI auth file and probe `POST /responses` instead of `GET /models`. + +## Provider Launch Profiles + +Use profile launchers to avoid repeated environment setup: + +```bash +# one-time profile bootstrap (prefer viable local Ollama, otherwise OpenAI) +bun run profile:init + +# preview the best provider/model for your goal +bun run profile:recommend -- --goal coding --benchmark + +# auto-apply the best available local/openai provider/model for your goal +bun run profile:auto -- --goal latency + +# codex bootstrap (defaults to codexplan and ~/.codex/auth.json) +bun run profile:codex + +# openai bootstrap with explicit key +bun run profile:init -- --provider openai --api-key sk-... + +# ollama bootstrap with custom model +bun run profile:init -- --provider ollama --model llama3.1:8b + +# ollama bootstrap with intelligent model auto-selection +bun run profile:init -- --provider ollama --goal coding + +# codex bootstrap with a fast model alias +bun run profile:init -- --provider codex --model codexspark + +# launch using persisted profile (.openclaude-profile.json) +bun run dev:profile + +# codex profile (uses CODEX_API_KEY or ~/.codex/auth.json) +bun run dev:codex + +# OpenAI profile (requires OPENAI_API_KEY in your shell) +bun run dev:openai + +# Ollama profile (defaults: localhost:11434, llama3.1:8b) +bun run dev:ollama +``` + +`profile:recommend` ranks installed Ollama models for `latency`, `balanced`, or `coding`, and `profile:auto` can persist the recommendation directly. + +If no profile exists yet, `dev:profile` uses the same goal-aware defaults when picking the initial model. + +Use `--provider ollama` when you want a local-only path. Auto mode falls back to OpenAI when no viable local chat model is installed. + +Use `profile:codex` or `--provider codex` when you want the ChatGPT Codex backend. + +`dev:openai`, `dev:ollama`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass. + +For `dev:ollama`, make sure Ollama is running locally before launch. diff --git a/docs/non-technical-setup.md b/docs/non-technical-setup.md new file mode 100644 index 00000000..9efca0f6 --- /dev/null +++ b/docs/non-technical-setup.md @@ -0,0 +1,116 @@ +# OpenClaude for Non-Technical Users + +This guide is for people who want the easiest setup path. + +You do not need to build from source. You do not need Bun. You do not need to understand the full codebase. + +If you can copy and paste commands into a terminal, you can set this up. + +## What OpenClaude Does + +OpenClaude lets you use an AI coding assistant with different model providers such as: + +- OpenAI +- DeepSeek +- Gemini +- Ollama +- Codex + +For most first-time users, OpenAI is the easiest option. + +## Before You Start + +You need: + +1. Node.js 20 or newer installed +2. A terminal window +3. An API key from your provider, unless you are using a local model like Ollama + +## Fastest Path + +1. Install OpenClaude with npm +2. Set 3 environment variables +3. Run `openclaude` + +## Choose Your Operating System + +- Windows: [Windows Quick Start](quick-start-windows.md) +- macOS / Linux: [macOS / Linux Quick Start](quick-start-mac-linux.md) + +## Which Provider Should You Choose? + +### OpenAI + +Choose this if: + +- you want the easiest setup +- you already have an OpenAI API key + +### Ollama + +Choose this if: + +- you want to run models locally +- you do not want to depend on a cloud API for testing + +### Codex + +Choose this if: + +- you already use the Codex CLI +- you already have Codex or ChatGPT auth configured + +## What Success Looks Like + +After you run `openclaude`, the CLI should start and wait for your prompt. + +At that point, you can ask it to: + +- explain code +- edit files +- run commands +- review changes + +## Common Problems + +### `openclaude` command not found + +Cause: + +- npm installed the package, but your terminal has not refreshed yet + +Fix: + +1. Close the terminal +2. Open a new terminal +3. Run `openclaude` again + +### Invalid API key + +Cause: + +- the key is wrong, expired, or copied incorrectly + +Fix: + +1. Get a fresh key from your provider +2. Paste it again carefully +3. Re-run `openclaude` + +### Ollama not working + +Cause: + +- Ollama is not installed or not running + +Fix: + +1. Install Ollama from `https://ollama.com/download` +2. Start Ollama +3. Try again + +## Want More Control? + +If you want source builds, advanced provider profiles, diagnostics, or Bun-based workflows, use: + +- [Advanced Setup](advanced-setup.md) diff --git a/docs/quick-start-mac-linux.md b/docs/quick-start-mac-linux.md new file mode 100644 index 00000000..7e8cb96e --- /dev/null +++ b/docs/quick-start-mac-linux.md @@ -0,0 +1,108 @@ +# OpenClaude Quick Start for macOS and Linux + +This guide uses a standard shell such as Terminal, iTerm, bash, or zsh. + +## 1. Install Node.js + +Install Node.js 20 or newer from: + +- `https://nodejs.org/` + +Then check it: + +```bash +node --version +npm --version +``` + +## 2. Install OpenClaude + +```bash +npm install -g @gitlawb/openclaude +``` + +## 3. Pick One Provider + +### Option A: OpenAI + +Replace `sk-your-key-here` with your real key. + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=sk-your-key-here +export OPENAI_MODEL=gpt-4o + +openclaude +``` + +### Option B: DeepSeek + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_API_KEY=sk-your-key-here +export OPENAI_BASE_URL=https://api.deepseek.com/v1 +export OPENAI_MODEL=deepseek-chat + +openclaude +``` + +### Option C: Ollama + +Install Ollama first from: + +- `https://ollama.com/download` + +Then run: + +```bash +ollama pull llama3.1:8b + +export CLAUDE_CODE_USE_OPENAI=1 +export OPENAI_BASE_URL=http://localhost:11434/v1 +export OPENAI_MODEL=llama3.1:8b + +openclaude +``` + +No API key is needed for Ollama local models. + +## 4. If `openclaude` Is Not Found + +Close the terminal, open a new one, and try again: + +```bash +openclaude +``` + +## 5. If Your Provider Fails + +Check the basics: + +### For OpenAI or DeepSeek + +- make sure the key is real +- make sure you copied it fully + +### For Ollama + +- make sure Ollama is installed +- make sure Ollama is running +- make sure the model was pulled successfully + +## 6. Updating OpenClaude + +```bash +npm install -g @gitlawb/openclaude@latest +``` + +## 7. Uninstalling OpenClaude + +```bash +npm uninstall -g @gitlawb/openclaude +``` + +## Need Advanced Setup? + +Use: + +- [Advanced Setup](advanced-setup.md) diff --git a/docs/quick-start-windows.md b/docs/quick-start-windows.md new file mode 100644 index 00000000..dfac8782 --- /dev/null +++ b/docs/quick-start-windows.md @@ -0,0 +1,108 @@ +# OpenClaude Quick Start for Windows + +This guide uses Windows PowerShell. + +## 1. Install Node.js + +Install Node.js 20 or newer from: + +- `https://nodejs.org/` + +Then open PowerShell and check it: + +```powershell +node --version +npm --version +``` + +## 2. Install OpenClaude + +```powershell +npm install -g @gitlawb/openclaude +``` + +## 3. Pick One Provider + +### Option A: OpenAI + +Replace `sk-your-key-here` with your real key. + +```powershell +$env:CLAUDE_CODE_USE_OPENAI="1" +$env:OPENAI_API_KEY="sk-your-key-here" +$env:OPENAI_MODEL="gpt-4o" + +openclaude +``` + +### Option B: DeepSeek + +```powershell +$env:CLAUDE_CODE_USE_OPENAI="1" +$env:OPENAI_API_KEY="sk-your-key-here" +$env:OPENAI_BASE_URL="https://api.deepseek.com/v1" +$env:OPENAI_MODEL="deepseek-chat" + +openclaude +``` + +### Option C: Ollama + +Install Ollama first from: + +- `https://ollama.com/download/windows` + +Then run: + +```powershell +ollama pull llama3.1:8b + +$env:CLAUDE_CODE_USE_OPENAI="1" +$env:OPENAI_BASE_URL="http://localhost:11434/v1" +$env:OPENAI_MODEL="llama3.1:8b" + +openclaude +``` + +No API key is needed for Ollama local models. + +## 4. If `openclaude` Is Not Found + +Close PowerShell, open a new one, and try again: + +```powershell +openclaude +``` + +## 5. If Your Provider Fails + +Check the basics: + +### For OpenAI or DeepSeek + +- make sure the key is real +- make sure you copied it fully + +### For Ollama + +- make sure Ollama is installed +- make sure Ollama is running +- make sure the model was pulled successfully + +## 6. Updating OpenClaude + +```powershell +npm install -g @gitlawb/openclaude@latest +``` + +## 7. Uninstalling OpenClaude + +```powershell +npm uninstall -g @gitlawb/openclaude +``` + +## Need Advanced Setup? + +Use: + +- [Advanced Setup](advanced-setup.md) From f4818dc213de9a409447cb6afad8558c111384d7 Mon Sep 17 00:00:00 2001 From: Juan Camilo Date: Thu, 2 Apr 2026 14:41:40 +0200 Subject: [PATCH 20/25] fix: shim reliability and protocol compliance overhaul MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses the most critical remaining issues in the provider shim layer, building on top of #124 (recursive schema normalization + try/finally). openaiShim.ts: - Throw APIError via SDK factory instead of plain Error — enables retry on 429/503 (was completely broken: zero retries for all 3P providers) - Guard stop_reason !== null before emitting usage-only message_delta (Azure/Groq send usage before finish_reason) - Fix assistant content: join text parts instead of invalid as-string cast (Mistral rejects array content on assistant role) - Expose real HTTP Response in withResponse() for header inspection - Skip stream_options for local providers (Ollama < 0.5 compatibility) codexShim.ts: - Throw APIError at all 4 throw sites (HTTP + 3 streaming errors) - Add tool_choice 'none' mapping (was silently ignored) - Forward is_error flag with Error: prefix (matching openaiShim) --- src/services/api/codexShim.ts | 36 +++++++++++++++++++++------------ src/services/api/openaiShim.ts | 37 ++++++++++++++++++++++++++-------- 2 files changed, 52 insertions(+), 21 deletions(-) diff --git a/src/services/api/codexShim.ts b/src/services/api/codexShim.ts index c65abdf0..1a2c375c 100644 --- a/src/services/api/codexShim.ts +++ b/src/services/api/codexShim.ts @@ -1,3 +1,4 @@ +import { APIError } from '@anthropic-ai/sdk' import type { ResolvedCodexCredentials, ResolvedProviderRequest, @@ -234,7 +235,10 @@ export function convertAnthropicMessagesToResponsesInput( items.push({ type: 'function_call_output', call_id: callId, - output: convertToolResultToText(toolResult.content), + output: (() => { + const out = convertToolResultToText(toolResult.content) + return toolResult.is_error ? `Error: ${out}` : out + })(), }) } @@ -453,6 +457,7 @@ function convertToolChoice(toolChoice: unknown): unknown { if (!choice?.type) return undefined if (choice.type === 'auto') return 'auto' if (choice.type === 'any') return 'required' + if (choice.type === 'none') return 'none' if (choice.type === 'tool' && choice.name) { return { type: 'function', @@ -553,7 +558,13 @@ export async function performCodexRequest(options: { if (!response.ok) { const errorBody = await response.text().catch(() => 'unknown error') - throw new Error(`Codex API error ${response.status}: ${errorBody}`) + let errorResponse: object | undefined + try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ } + throw APIError.generate( + response.status, errorResponse, + `Codex API error ${response.status}: ${errorBody}`, + response.headers as unknown as Record, + ) } return response @@ -633,11 +644,9 @@ export async function collectCodexCompletedResponse( for await (const event of readSseEvents(response)) { if (event.event === 'response.failed') { - throw new Error( - event.data?.response?.error?.message ?? - event.data?.error?.message ?? - 'Codex response failed', - ) + const msg = event.data?.response?.error?.message ?? + event.data?.error?.message ?? 'Codex response failed' + throw APIError.generate(500, undefined, msg, {} as Record) } if ( @@ -650,7 +659,10 @@ export async function collectCodexCompletedResponse( } if (!completedResponse) { - throw new Error('Codex response ended without a completed payload') + throw APIError.generate( + 500, undefined, 'Codex response ended without a completed payload', + {} as Record, + ) } return completedResponse @@ -806,11 +818,9 @@ export async function* codexStreamToAnthropic( } if (event.event === 'response.failed') { - throw new Error( - payload?.response?.error?.message ?? - payload?.error?.message ?? - 'Codex response failed', - ) + const msg = payload?.response?.error?.message ?? + payload?.error?.message ?? 'Codex response failed' + throw APIError.generate(500, undefined, msg, {} as Record) } } diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index 1f99b7c4..645e602b 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -21,6 +21,7 @@ * OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs */ +import { APIError } from '@anthropic-ai/sdk' import { isEnvTruthy } from '../../utils/envUtils.js' import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js' import { @@ -33,6 +34,7 @@ import { type ShimCreateParams, } from './codexShim.js' import { + isLocalProviderUrl, resolveCodexApiCredentials, resolveProviderRequest, } from './providerConfig.js' @@ -213,7 +215,10 @@ function convertMessages( const assistantMsg: OpenAIMessage = { role: 'assistant', - content: convertContentBlocks(textContent) as string, + content: (() => { + const c = convertContentBlocks(textContent) + return typeof c === 'string' ? c : Array.isArray(c) ? c.map((p: { text?: string }) => p.text ?? '').join('') : '' + })(), } if (toolUses.length > 0) { @@ -242,7 +247,10 @@ function convertMessages( } else { result.push({ role: 'assistant', - content: convertContentBlocks(content) as string, + content: (() => { + const c = convertContentBlocks(content) + return typeof c === 'string' ? c : Array.isArray(c) ? c.map((p: { text?: string }) => p.text ?? '').join('') : '' + })(), }) } } @@ -617,7 +625,8 @@ async function* openaiStreamToAnthropic( if ( !hasEmittedFinalUsage && chunkUsage && - (chunk.choices?.length ?? 0) === 0 + (chunk.choices?.length ?? 0) === 0 && + lastStopReason !== null ) { yield { type: 'message_delta', @@ -666,9 +675,12 @@ class OpenAIShimMessages { ) { const self = this + let httpResponse: Response | undefined + const promise = (async () => { const request = resolveProviderRequest({ model: params.model }) const response = await self._doRequest(request, params, options) + httpResponse = response if (params.stream) { return new OpenAIShimStream( @@ -695,8 +707,9 @@ class OpenAIShimMessages { const data = await promise return { data, - response: new Response(), - request_id: makeMessageId(), + response: httpResponse ?? new Response(), + request_id: + httpResponse?.headers.get('x-request-id') ?? makeMessageId(), } } @@ -774,7 +787,7 @@ class OpenAIShimMessages { body.max_completion_tokens = maxCompletionTokensValue } - if (params.stream) { + if (params.stream && !isLocalProviderUrl(request.baseUrl)) { body.stream_options = { include_usage: true } } @@ -890,12 +903,20 @@ class OpenAIShimMessages { const errorBody = await response.text().catch(() => 'unknown error') const rateHint = isGithub && response.status === 429 ? formatRetryAfterHint(response) : '' - throw new Error( + let errorResponse: object | undefined + try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ } + throw APIError.generate( + response.status, + errorResponse, `OpenAI API error ${response.status}: ${errorBody}${rateHint}`, + response.headers as unknown as Record, ) } - throw new Error('OpenAI shim: request loop exited unexpectedly') + throw APIError.generate( + 500, undefined, 'OpenAI shim: request loop exited unexpectedly', + {} as Record, + ) } private _convertNonStreamingResponse( From f385740bd6e72f9ae6ff145d8f328f2a0b3dc7be Mon Sep 17 00:00:00 2001 From: Juan Camilo Date: Thu, 2 Apr 2026 14:43:03 +0200 Subject: [PATCH 21/25] fix: use isEnvTruthy() for provider detection in context window lookup Replace raw === '1' || === 'true' comparisons with isEnvTruthy() in context.ts for consistency with getAPIProvider() in providers.ts. This also covers the newly added CLAUDE_CODE_USE_GITHUB provider. Add native Gemini model entries (without google/ prefix) to both context window and max output token tables. Corrects gemini-2.5-pro and gemini-2.5-flash max output tokens to 65,536 (was 8,192/32,768). --- src/utils/context.ts | 18 ++++++------------ src/utils/model/openaiContextWindows.ts | 12 +++++++++++- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/utils/context.ts b/src/utils/context.ts index 7dba02b7..4eae1782 100644 --- a/src/utils/context.ts +++ b/src/utils/context.ts @@ -74,12 +74,9 @@ export function getContextWindowForModel( // OpenAI-compatible provider — use known context windows for the model if ( - process.env.CLAUDE_CODE_USE_OPENAI === '1' || - process.env.CLAUDE_CODE_USE_OPENAI === 'true' || - process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' || - process.env.CLAUDE_CODE_USE_GITHUB === '1' || - process.env.CLAUDE_CODE_USE_GITHUB === 'true' + isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ) { const openaiWindow = getOpenAIContextWindow(model) if (openaiWindow !== undefined) { @@ -180,12 +177,9 @@ export function getModelMaxOutputTokens(model: string): { // OpenAI-compatible provider — use known output limits to avoid 400 errors if ( - process.env.CLAUDE_CODE_USE_OPENAI === '1' || - process.env.CLAUDE_CODE_USE_OPENAI === 'true' || - process.env.CLAUDE_CODE_USE_GEMINI === '1' || - process.env.CLAUDE_CODE_USE_GEMINI === 'true' || - process.env.CLAUDE_CODE_USE_GITHUB === '1' || - process.env.CLAUDE_CODE_USE_GITHUB === 'true' + isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) || + isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) ) { const openaiMax = getOpenAIMaxOutputTokens(model) if (openaiMax !== undefined) { diff --git a/src/utils/model/openaiContextWindows.ts b/src/utils/model/openaiContextWindows.ts index 4a31a8e5..6cb12c37 100644 --- a/src/utils/model/openaiContextWindows.ts +++ b/src/utils/model/openaiContextWindows.ts @@ -44,6 +44,11 @@ const OPENAI_CONTEXT_WINDOWS: Record = { 'google/gemini-2.0-flash':1_048_576, 'google/gemini-2.5-pro': 1_048_576, + // Google (native via CLAUDE_CODE_USE_GEMINI) + 'gemini-2.0-flash': 1_048_576, + 'gemini-2.5-pro': 1_048_576, + 'gemini-2.5-flash': 1_048_576, + // Ollama local models 'llama3.3:70b': 8_192, 'llama3.1:8b': 8_192, @@ -94,7 +99,12 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record = { // Google (via OpenRouter) 'google/gemini-2.0-flash': 8_192, - 'google/gemini-2.5-pro': 32_768, + 'google/gemini-2.5-pro': 65_536, + + // Google (native via CLAUDE_CODE_USE_GEMINI) + 'gemini-2.0-flash': 8_192, + 'gemini-2.5-pro': 65_536, + 'gemini-2.5-flash': 65_536, // Ollama local models (conservative safe defaults) 'llama3.3:70b': 4_096, From 5ccda35941ba2c14308f73bb431a72f08a76643c Mon Sep 17 00:00:00 2001 From: Vasanthdev2004 Date: Thu, 2 Apr 2026 18:18:48 +0530 Subject: [PATCH 22/25] fix: highlight selected slash suggestion --- .../PromptInputFooterSuggestions.test.tsx | 36 ++ .../PromptInputFooterSuggestions.tsx | 470 ++++++++---------- 2 files changed, 232 insertions(+), 274 deletions(-) create mode 100644 src/components/PromptInput/PromptInputFooterSuggestions.test.tsx diff --git a/src/components/PromptInput/PromptInputFooterSuggestions.test.tsx b/src/components/PromptInput/PromptInputFooterSuggestions.test.tsx new file mode 100644 index 00000000..c03b2432 --- /dev/null +++ b/src/components/PromptInput/PromptInputFooterSuggestions.test.tsx @@ -0,0 +1,36 @@ +import figures from 'figures' +import React from 'react' +import { describe, expect, it } from 'bun:test' +import { renderToString } from '../../utils/staticRender.js' +import { + PromptInputFooterSuggestions, + type SuggestionItem, +} from './PromptInputFooterSuggestions.js' + +describe('PromptInputFooterSuggestions', () => { + it('renders a visible marker for the selected suggestion', async () => { + const suggestions: SuggestionItem[] = [ + { + id: 'command-help', + displayText: '/help', + description: 'Show help', + }, + { + id: 'command-doctor', + displayText: '/doctor', + description: 'Run diagnostics', + }, + ] + + const output = await renderToString( + , + 80, + ) + + expect(output).toContain(`${figures.pointer} /doctor`) + expect(output).toContain(' /help') + }) +}) diff --git a/src/components/PromptInput/PromptInputFooterSuggestions.tsx b/src/components/PromptInput/PromptInputFooterSuggestions.tsx index f7337b29..2d7d9bd2 100644 --- a/src/components/PromptInput/PromptInputFooterSuggestions.tsx +++ b/src/components/PromptInput/PromptInputFooterSuggestions.tsx @@ -1,293 +1,215 @@ -import { c as _c } from "react-compiler-runtime"; -import * as React from 'react'; -import { memo, type ReactNode } from 'react'; -import { useTerminalSize } from '../../hooks/useTerminalSize.js'; -import { stringWidth } from '../../ink/stringWidth.js'; -import { Box, Text } from '../../ink.js'; -import { truncatePathMiddle, truncateToWidth } from '../../utils/format.js'; -import type { Theme } from '../../utils/theme.js'; +import figures from 'figures' +import * as React from 'react' +import { memo, type ReactNode } from 'react' +import { useTerminalSize } from '../../hooks/useTerminalSize.js' +import { stringWidth } from '../../ink/stringWidth.js' +import { Box, Text } from '../../ink.js' +import { truncatePathMiddle, truncateToWidth } from '../../utils/format.js' +import type { Theme } from '../../utils/theme.js' + export type SuggestionItem = { - id: string; - displayText: string; - tag?: string; - description?: string; - metadata?: unknown; - color?: keyof Theme; -}; -export type SuggestionType = 'command' | 'file' | 'directory' | 'agent' | 'shell' | 'custom-title' | 'slack-channel' | 'none'; -export const OVERLAY_MAX_ITEMS = 5; + id: string + displayText: string + tag?: string + description?: string + metadata?: unknown + color?: keyof Theme +} + +export type SuggestionType = + | 'command' + | 'file' + | 'directory' + | 'agent' + | 'shell' + | 'custom-title' + | 'slack-channel' + | 'none' + +export const OVERLAY_MAX_ITEMS = 5 + +const SELECTED_PREFIX = `${figures.pointer} ` +const UNSELECTED_PREFIX = ' ' +const PREFIX_WIDTH = stringWidth(SELECTED_PREFIX) -/** - * Get the icon for a suggestion based on its type - * Icons: + for files, ◇ for MCP resources, * for agents - */ function getIcon(itemId: string): string { - if (itemId.startsWith('file-')) return '+'; - if (itemId.startsWith('mcp-resource-')) return '◇'; - if (itemId.startsWith('agent-')) return '*'; - return '+'; + if (itemId.startsWith('file-')) return '+' + if (itemId.startsWith('mcp-resource-')) return '◇' + if (itemId.startsWith('agent-')) return '*' + return '+' } -/** - * Check if an item is a unified suggestion type (file, mcp-resource, or agent) - */ function isUnifiedSuggestion(itemId: string): boolean { - return itemId.startsWith('file-') || itemId.startsWith('mcp-resource-') || itemId.startsWith('agent-'); + return ( + itemId.startsWith('file-') || + itemId.startsWith('mcp-resource-') || + itemId.startsWith('agent-') + ) } -const SuggestionItemRow = memo(function SuggestionItemRow(t0) { - const $ = _c(36); - const { - item, - maxColumnWidth, - isSelected - } = t0; - const columns = useTerminalSize().columns; - const isUnified = isUnifiedSuggestion(item.id); - if (isUnified) { - let t1; - if ($[0] !== item.id) { - t1 = getIcon(item.id); - $[0] = item.id; - $[1] = t1; - } else { - t1 = $[1]; - } - const icon = t1; - const textColor = isSelected ? "suggestion" : undefined; - const dimColor = !isSelected; - const isFile = item.id.startsWith("file-"); - const isMcpResource = item.id.startsWith("mcp-resource-"); - const separatorWidth = item.description ? 3 : 0; - let displayText; + +const SuggestionItemRow = memo(function SuggestionItemRow({ + item, + maxColumnWidth, + isSelected, +}: { + item: SuggestionItem + maxColumnWidth?: number + isSelected: boolean +}): ReactNode { + const columns = useTerminalSize().columns + const selectionPrefix = isSelected ? SELECTED_PREFIX : UNSELECTED_PREFIX + + if (isUnifiedSuggestion(item.id)) { + const icon = getIcon(item.id) + const textColor: keyof Theme | undefined = isSelected + ? 'suggestion' + : undefined + const dimColor = !isSelected + const isFile = item.id.startsWith('file-') + const isMcpResource = item.id.startsWith('mcp-resource-') + const iconWidth = 2 + const paddingWidth = 4 + const separatorWidth = item.description ? 3 : 0 + + let displayText: string if (isFile) { - let t2; - if ($[2] !== item.description) { - t2 = item.description ? Math.min(20, stringWidth(item.description)) : 0; - $[2] = item.description; - $[3] = t2; - } else { - t2 = $[3]; - } - const descReserve = t2; - const maxPathLength = columns - 2 - 4 - separatorWidth - descReserve; - let t3; - if ($[4] !== item.displayText || $[5] !== maxPathLength) { - t3 = truncatePathMiddle(item.displayText, maxPathLength); - $[4] = item.displayText; - $[5] = maxPathLength; - $[6] = t3; - } else { - t3 = $[6]; - } - displayText = t3; + const descReserve = item.description + ? Math.min(20, stringWidth(item.description)) + : 0 + const maxPathLength = + columns - + PREFIX_WIDTH - + iconWidth - + paddingWidth - + separatorWidth - + descReserve + displayText = truncatePathMiddle(item.displayText, maxPathLength) + } else if (isMcpResource) { + displayText = truncateToWidth(item.displayText, 30) } else { - if (isMcpResource) { - let t2; - if ($[7] !== item.displayText) { - t2 = truncateToWidth(item.displayText, 30); - $[7] = item.displayText; - $[8] = t2; - } else { - t2 = $[8]; - } - displayText = t2; - } else { - displayText = item.displayText; - } + displayText = item.displayText } - const availableWidth = columns - 2 - stringWidth(displayText) - separatorWidth - 4; - let lineContent; + + const availableWidth = + columns - + PREFIX_WIDTH - + iconWidth - + stringWidth(displayText) - + separatorWidth - + paddingWidth + + let lineContent: string if (item.description) { - const maxDescLength = Math.max(0, availableWidth); - let t2; - if ($[9] !== item.description || $[10] !== maxDescLength) { - t2 = truncateToWidth(item.description.replace(/\s+/g, " "), maxDescLength); - $[9] = item.description; - $[10] = maxDescLength; - $[11] = t2; - } else { - t2 = $[11]; - } - const truncatedDesc = t2; - lineContent = `${icon} ${displayText} – ${truncatedDesc}`; + const truncatedDesc = truncateToWidth( + item.description.replace(/\s+/g, ' '), + Math.max(0, availableWidth), + ) + lineContent = `${selectionPrefix}${icon} ${displayText} - ${truncatedDesc}` } else { - lineContent = `${icon} ${displayText}`; + lineContent = `${selectionPrefix}${icon} ${displayText}` } - let t2; - if ($[12] !== dimColor || $[13] !== lineContent || $[14] !== textColor) { - t2 = {lineContent}; - $[12] = dimColor; - $[13] = lineContent; - $[14] = textColor; - $[15] = t2; - } else { - t2 = $[15]; - } - return t2; + + return ( + + {lineContent} + + ) } - const maxNameWidth = Math.floor(columns * 0.4); - const displayTextWidth = Math.min(maxColumnWidth ?? stringWidth(item.displayText) + 5, maxNameWidth); - const textColor_0 = item.color || (isSelected ? "suggestion" : undefined); - const shouldDim = !isSelected; - let displayText_0 = item.displayText; - if (stringWidth(displayText_0) > displayTextWidth - 2) { - const t1 = displayTextWidth - 2; - let t2; - if ($[16] !== displayText_0 || $[17] !== t1) { - t2 = truncateToWidth(displayText_0, t1); - $[16] = displayText_0; - $[17] = t1; - $[18] = t2; - } else { - t2 = $[18]; - } - displayText_0 = t2; + + const maxNameWidth = Math.floor(columns * 0.4) + const displayTextWidth = Math.min( + maxColumnWidth ?? stringWidth(item.displayText) + 5, + maxNameWidth, + ) + const textColor = item.color || (isSelected ? 'suggestion' : undefined) + const shouldDim = !isSelected + + let displayText = item.displayText + if (stringWidth(displayText) > displayTextWidth - 2) { + displayText = truncateToWidth(displayText, displayTextWidth - 2) } - const paddedDisplayText = displayText_0 + " ".repeat(Math.max(0, displayTextWidth - stringWidth(displayText_0))); - const tagText = item.tag ? `[${item.tag}] ` : ""; - const tagWidth = stringWidth(tagText); - const descriptionWidth = Math.max(0, columns - displayTextWidth - tagWidth - 4); - let t1; - if ($[19] !== descriptionWidth || $[20] !== item.description) { - t1 = item.description ? truncateToWidth(item.description.replace(/\s+/g, " "), descriptionWidth) : ""; - $[19] = descriptionWidth; - $[20] = item.description; - $[21] = t1; - } else { - t1 = $[21]; - } - const truncatedDescription = t1; - let t2; - if ($[22] !== paddedDisplayText || $[23] !== shouldDim || $[24] !== textColor_0) { - t2 = {paddedDisplayText}; - $[22] = paddedDisplayText; - $[23] = shouldDim; - $[24] = textColor_0; - $[25] = t2; - } else { - t2 = $[25]; - } - let t3; - if ($[26] !== tagText) { - t3 = tagText ? {tagText} : null; - $[26] = tagText; - $[27] = t3; - } else { - t3 = $[27]; - } - const t4 = isSelected ? "suggestion" : undefined; - const t5 = !isSelected; - let t6; - if ($[28] !== t4 || $[29] !== t5 || $[30] !== truncatedDescription) { - t6 = {truncatedDescription}; - $[28] = t4; - $[29] = t5; - $[30] = truncatedDescription; - $[31] = t6; - } else { - t6 = $[31]; - } - let t7; - if ($[32] !== t2 || $[33] !== t3 || $[34] !== t6) { - t7 = {t2}{t3}{t6}; - $[32] = t2; - $[33] = t3; - $[34] = t6; - $[35] = t7; - } else { - t7 = $[35]; - } - return t7; -}); + + const paddedDisplayText = + selectionPrefix + + displayText + + ' '.repeat(Math.max(0, displayTextWidth - stringWidth(displayText))) + const tagText = item.tag ? `[${item.tag}] ` : '' + const tagWidth = stringWidth(tagText) + const descriptionWidth = Math.max( + 0, + columns - PREFIX_WIDTH - displayTextWidth - tagWidth - 4, + ) + const truncatedDescription = item.description + ? truncateToWidth(item.description.replace(/\s+/g, ' '), descriptionWidth) + : '' + + return ( + + + {paddedDisplayText} + + {tagText ? {tagText} : null} + + {truncatedDescription} + + + ) +}) + type Props = { - suggestions: SuggestionItem[]; - selectedSuggestion: number; - maxColumnWidth?: number; - /** - * When true, the suggestions are rendered inside a position=absolute - * overlay. We omit minHeight and flex-end so the y-clamp in the - * renderer doesn't push fewer items down into the prompt area. - */ - overlay?: boolean; -}; -export function PromptInputFooterSuggestions(t0) { - const $ = _c(22); - const { - suggestions, - selectedSuggestion, - maxColumnWidth: maxColumnWidthProp, - overlay - } = t0; - const { - rows - } = useTerminalSize(); - const maxVisibleItems = overlay ? OVERLAY_MAX_ITEMS : Math.min(6, Math.max(1, rows - 3)); + suggestions: SuggestionItem[] + selectedSuggestion: number + maxColumnWidth?: number + overlay?: boolean +} + +export function PromptInputFooterSuggestions({ + suggestions, + selectedSuggestion, + maxColumnWidth: maxColumnWidthProp, + overlay, +}: Props): ReactNode { + const { rows } = useTerminalSize() + const maxVisibleItems = overlay ? OVERLAY_MAX_ITEMS : Math.min(6, Math.max(1, rows - 3)) + if (suggestions.length === 0) { - return null; + return null } - let t1; - if ($[0] !== maxColumnWidthProp || $[1] !== suggestions) { - t1 = maxColumnWidthProp ?? Math.max(...suggestions.map(_temp)) + 5; - $[0] = maxColumnWidthProp; - $[1] = suggestions; - $[2] = t1; - } else { - t1 = $[2]; - } - const maxColumnWidth = t1; - const startIndex = Math.max(0, Math.min(selectedSuggestion - Math.floor(maxVisibleItems / 2), suggestions.length - maxVisibleItems)); - const endIndex = Math.min(startIndex + maxVisibleItems, suggestions.length); - let T0; - let t2; - let t3; - let t4; - if ($[3] !== endIndex || $[4] !== maxColumnWidth || $[5] !== overlay || $[6] !== selectedSuggestion || $[7] !== startIndex || $[8] !== suggestions) { - const visibleItems = suggestions.slice(startIndex, endIndex); - T0 = Box; - t2 = "column"; - t3 = overlay ? undefined : "flex-end"; - let t5; - if ($[13] !== maxColumnWidth || $[14] !== selectedSuggestion || $[15] !== suggestions) { - t5 = item_0 => ; - $[13] = maxColumnWidth; - $[14] = selectedSuggestion; - $[15] = suggestions; - $[16] = t5; - } else { - t5 = $[16]; - } - t4 = visibleItems.map(t5); - $[3] = endIndex; - $[4] = maxColumnWidth; - $[5] = overlay; - $[6] = selectedSuggestion; - $[7] = startIndex; - $[8] = suggestions; - $[9] = T0; - $[10] = t2; - $[11] = t3; - $[12] = t4; - } else { - T0 = $[9]; - t2 = $[10]; - t3 = $[11]; - t4 = $[12]; - } - let t5; - if ($[17] !== T0 || $[18] !== t2 || $[19] !== t3 || $[20] !== t4) { - t5 = {t4}; - $[17] = T0; - $[18] = t2; - $[19] = t3; - $[20] = t4; - $[21] = t5; - } else { - t5 = $[21]; - } - return t5; + + const maxColumnWidth = + maxColumnWidthProp ?? + Math.max(...suggestions.map(item => stringWidth(item.displayText))) + 5 + + const startIndex = Math.max( + 0, + Math.min( + selectedSuggestion - Math.floor(maxVisibleItems / 2), + suggestions.length - maxVisibleItems, + ), + ) + const endIndex = Math.min(startIndex + maxVisibleItems, suggestions.length) + const visibleItems = suggestions.slice(startIndex, endIndex) + + return ( + + {visibleItems.map(item => ( + + ))} + + ) } -function _temp(item) { - return stringWidth(item.displayText); -} -export default memo(PromptInputFooterSuggestions); -//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"names":["React","memo","ReactNode","useTerminalSize","stringWidth","Box","Text","truncatePathMiddle","truncateToWidth","Theme","SuggestionItem","id","displayText","tag","description","metadata","color","SuggestionType","OVERLAY_MAX_ITEMS","getIcon","itemId","startsWith","isUnifiedSuggestion","SuggestionItemRow","t0","$","_c","item","maxColumnWidth","isSelected","columns","isUnified","t1","icon","textColor","undefined","dimColor","isFile","isMcpResource","separatorWidth","t2","Math","min","descReserve","maxPathLength","t3","availableWidth","lineContent","maxDescLength","max","replace","truncatedDesc","maxNameWidth","floor","displayTextWidth","textColor_0","shouldDim","displayText_0","paddedDisplayText","repeat","tagText","tagWidth","descriptionWidth","truncatedDescription","t4","t5","t6","t7","Props","suggestions","selectedSuggestion","overlay","PromptInputFooterSuggestions","maxColumnWidthProp","rows","maxVisibleItems","length","map","_temp","startIndex","endIndex","T0","visibleItems","slice","item_0"],"sources":["PromptInputFooterSuggestions.tsx"],"sourcesContent":["import * as React from 'react'\nimport { memo, type ReactNode } from 'react'\nimport { useTerminalSize } from '../../hooks/useTerminalSize.js'\nimport { stringWidth } from '../../ink/stringWidth.js'\nimport { Box, Text } from '../../ink.js'\nimport { truncatePathMiddle, truncateToWidth } from '../../utils/format.js'\nimport type { Theme } from '../../utils/theme.js'\n\nexport type SuggestionItem = {\n  id: string\n  displayText: string\n  tag?: string\n  description?: string\n  metadata?: unknown\n  color?: keyof Theme\n}\n\nexport type SuggestionType =\n  | 'command'\n  | 'file'\n  | 'directory'\n  | 'agent'\n  | 'shell'\n  | 'custom-title'\n  | 'slack-channel'\n  | 'none'\n\nexport const OVERLAY_MAX_ITEMS = 5\n\n/**\n * Get the icon for a suggestion based on its type\n * Icons: + for files, ◇ for MCP resources, * for agents\n */\nfunction getIcon(itemId: string): string {\n  if (itemId.startsWith('file-')) return '+'\n  if (itemId.startsWith('mcp-resource-')) return '◇'\n  if (itemId.startsWith('agent-')) return '*'\n  return '+'\n}\n\n/**\n * Check if an item is a unified suggestion type (file, mcp-resource, or agent)\n */\nfunction isUnifiedSuggestion(itemId: string): boolean {\n  return (\n    itemId.startsWith('file-') ||\n    itemId.startsWith('mcp-resource-') ||\n    itemId.startsWith('agent-')\n  )\n}\n\nconst SuggestionItemRow = memo(function SuggestionItemRow({\n  item,\n  maxColumnWidth,\n  isSelected,\n}: {\n  item: SuggestionItem\n  maxColumnWidth?: number\n  isSelected: boolean\n}): ReactNode {\n  const columns = useTerminalSize().columns\n  const isUnified = isUnifiedSuggestion(item.id)\n\n  // For unified suggestions (file, mcp-resource, agent), use single-line layout with icon\n  if (isUnified) {\n    const icon = getIcon(item.id)\n    const textColor: keyof Theme | undefined = isSelected\n      ? 'suggestion'\n      : undefined\n    const dimColor = !isSelected\n\n    const isFile = item.id.startsWith('file-')\n    const isMcpResource = item.id.startsWith('mcp-resource-')\n\n    // Calculate layout widths\n    // Layout: \"X \" (2) + displayText + \" – \" (3) + description + padding (4)\n    const iconWidth = 2 // icon + space (fixed)\n    const paddingWidth = 4\n    const separatorWidth = item.description ? 3 : 0 // ' – ' separator\n\n    // For files, truncate middle of path to show both directory context and filename\n    // For MCP resources, limit displayText to 30 chars (truncate from end)\n    // For agents, no truncation\n    let displayText: string\n    if (isFile) {\n      // Reserve space for description if present, otherwise use all available space\n      const descReserve = item.description\n        ? Math.min(20, stringWidth(item.description))\n        : 0\n      const maxPathLength =\n        columns - iconWidth - paddingWidth - separatorWidth - descReserve\n      displayText = truncatePathMiddle(item.displayText, maxPathLength)\n    } else if (isMcpResource) {\n      const maxDisplayTextLength = 30\n      displayText = truncateToWidth(item.displayText, maxDisplayTextLength)\n    } else {\n      displayText = item.displayText\n    }\n\n    const availableWidth =\n      columns -\n      iconWidth -\n      stringWidth(displayText) -\n      separatorWidth -\n      paddingWidth\n\n    // Build the full line as a single string to prevent wrapping\n    let lineContent: string\n    if (item.description) {\n      const maxDescLength = Math.max(0, availableWidth)\n      const truncatedDesc = truncateToWidth(\n        item.description.replace(/\\s+/g, ' '),\n        maxDescLength,\n      )\n      lineContent = `${icon} ${displayText} – ${truncatedDesc}`\n    } else {\n      lineContent = `${icon} ${displayText}`\n    }\n\n    return (\n      <Text color={textColor} dimColor={dimColor} wrap=\"truncate\">\n        {lineContent}\n      </Text>\n    )\n  }\n\n  // For non-unified suggestions (commands, shell, etc.), use improved layout from main\n  // Cap the command name column at 40% of terminal width to ensure description has space\n  const maxNameWidth = Math.floor(columns * 0.4)\n  const displayTextWidth = Math.min(\n    maxColumnWidth ?? stringWidth(item.displayText) + 5,\n    maxNameWidth,\n  )\n\n  const textColor = item.color || (isSelected ? 'suggestion' : undefined)\n  const shouldDim = !isSelected\n\n  // Truncate and pad the display text to fixed width\n  let displayText = item.displayText\n  if (stringWidth(displayText) > displayTextWidth - 2) {\n    displayText = truncateToWidth(displayText, displayTextWidth - 2)\n  }\n  const paddedDisplayText =\n    displayText +\n    ' '.repeat(Math.max(0, displayTextWidth - stringWidth(displayText)))\n\n  const tagText = item.tag ? `[${item.tag}] ` : ''\n  const tagWidth = stringWidth(tagText)\n  const descriptionWidth = Math.max(\n    0,\n    columns - displayTextWidth - tagWidth - 4,\n  )\n  // Skill descriptions can contain newlines (e.g. /claude-api's \"TRIGGER\n  // when:\" block). A multi-line row grows the overlay past minHeight; when\n  // the filter narrows past that skill, the overlay shrinks and leaves\n  // ghost rows. Flatten to one line before truncating.\n  const truncatedDescription = item.description\n    ? truncateToWidth(item.description.replace(/\\s+/g, ' '), descriptionWidth)\n    : ''\n\n  return (\n    <Text wrap=\"truncate\">\n      <Text color={textColor} dimColor={shouldDim}>\n        {paddedDisplayText}\n      </Text>\n      {tagText ? <Text dimColor>{tagText}</Text> : null}\n      <Text\n        color={isSelected ? 'suggestion' : undefined}\n        dimColor={!isSelected}\n      >\n        {truncatedDescription}\n      </Text>\n    </Text>\n  )\n})\n\ntype Props = {\n  suggestions: SuggestionItem[]\n  selectedSuggestion: number\n  maxColumnWidth?: number\n  /**\n   * When true, the suggestions are rendered inside a position=absolute\n   * overlay. We omit minHeight and flex-end so the y-clamp in the\n   * renderer doesn't push fewer items down into the prompt area.\n   */\n  overlay?: boolean\n}\n\nexport function PromptInputFooterSuggestions({\n  suggestions,\n  selectedSuggestion,\n  maxColumnWidth: maxColumnWidthProp,\n  overlay,\n}: Props): ReactNode {\n  const { rows } = useTerminalSize()\n  // Maximum number of suggestions to show at once (leaving space for prompt).\n  // Overlay mode (fullscreen) uses a fixed 5 — the floating box sits over\n  // the ScrollBox, so terminal height isn't the constraint.\n  const maxVisibleItems = overlay\n    ? OVERLAY_MAX_ITEMS\n    : Math.min(6, Math.max(1, rows - 3))\n\n  // No suggestions to display\n  if (suggestions.length === 0) {\n    return null\n  }\n\n  // Use prop if provided (stable width from all commands), otherwise calculate from visible\n  const maxColumnWidth =\n    maxColumnWidthProp ??\n    Math.max(...suggestions.map(item => stringWidth(item.displayText))) + 5\n\n  // Calculate visible items range based on selected index\n  const startIndex = Math.max(\n    0,\n    Math.min(\n      selectedSuggestion - Math.floor(maxVisibleItems / 2),\n      suggestions.length - maxVisibleItems,\n    ),\n  )\n  const endIndex = Math.min(startIndex + maxVisibleItems, suggestions.length)\n  const visibleItems = suggestions.slice(startIndex, endIndex)\n\n  // In non-overlay (inline) mode, justifyContent keeps suggestions\n  // anchored to the bottom (near the prompt). In overlay mode we omit\n  // both minHeight and flex-end: the parent is position=absolute with\n  // bottom='100%', so its y is clamped to 0 by the renderer when it\n  // would go negative. Adding minHeight + flex-end would create empty\n  // padding rows that shift the visible items down into the prompt area\n  // when the list has fewer items than maxVisibleItems.\n  return (\n    <Box\n      flexDirection=\"column\"\n      justifyContent={overlay ? undefined : 'flex-end'}\n    >\n      {visibleItems.map(item => (\n        <SuggestionItemRow\n          key={item.id}\n          item={item}\n          maxColumnWidth={maxColumnWidth}\n          isSelected={item.id === suggestions[selectedSuggestion]?.id}\n        />\n      ))}\n    </Box>\n  )\n}\n\nexport default memo(PromptInputFooterSuggestions)\n"],"mappings":";AAAA,OAAO,KAAKA,KAAK,MAAM,OAAO;AAC9B,SAASC,IAAI,EAAE,KAAKC,SAAS,QAAQ,OAAO;AAC5C,SAASC,eAAe,QAAQ,gCAAgC;AAChE,SAASC,WAAW,QAAQ,0BAA0B;AACtD,SAASC,GAAG,EAAEC,IAAI,QAAQ,cAAc;AACxC,SAASC,kBAAkB,EAAEC,eAAe,QAAQ,uBAAuB;AAC3E,cAAcC,KAAK,QAAQ,sBAAsB;AAEjD,OAAO,KAAKC,cAAc,GAAG;EAC3BC,EAAE,EAAE,MAAM;EACVC,WAAW,EAAE,MAAM;EACnBC,GAAG,CAAC,EAAE,MAAM;EACZC,WAAW,CAAC,EAAE,MAAM;EACpBC,QAAQ,CAAC,EAAE,OAAO;EAClBC,KAAK,CAAC,EAAE,MAAMP,KAAK;AACrB,CAAC;AAED,OAAO,KAAKQ,cAAc,GACtB,SAAS,GACT,MAAM,GACN,WAAW,GACX,OAAO,GACP,OAAO,GACP,cAAc,GACd,eAAe,GACf,MAAM;AAEV,OAAO,MAAMC,iBAAiB,GAAG,CAAC;;AAElC;AACA;AACA;AACA;AACA,SAASC,OAAOA,CAACC,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,CAAC;EACvC,IAAIA,MAAM,CAACC,UAAU,CAAC,OAAO,CAAC,EAAE,OAAO,GAAG;EAC1C,IAAID,MAAM,CAACC,UAAU,CAAC,eAAe,CAAC,EAAE,OAAO,GAAG;EAClD,IAAID,MAAM,CAACC,UAAU,CAAC,QAAQ,CAAC,EAAE,OAAO,GAAG;EAC3C,OAAO,GAAG;AACZ;;AAEA;AACA;AACA;AACA,SAASC,mBAAmBA,CAACF,MAAM,EAAE,MAAM,CAAC,EAAE,OAAO,CAAC;EACpD,OACEA,MAAM,CAACC,UAAU,CAAC,OAAO,CAAC,IAC1BD,MAAM,CAACC,UAAU,CAAC,eAAe,CAAC,IAClCD,MAAM,CAACC,UAAU,CAAC,QAAQ,CAAC;AAE/B;AAEA,MAAME,iBAAiB,GAAGtB,IAAI,CAAC,SAAAsB,kBAAAC,EAAA;EAAA,MAAAC,CAAA,GAAAC,EAAA;EAA2B;IAAAC,IAAA;IAAAC,cAAA;IAAAC;EAAA,IAAAL,EAQzD;EACC,MAAAM,OAAA,GAAgB3B,eAAe,CAAC,CAAC,CAAA2B,OAAQ;EACzC,MAAAC,SAAA,GAAkBT,mBAAmB,CAACK,IAAI,CAAAhB,EAAG,CAAC;EAG9C,IAAIoB,SAAS;IAAA,IAAAC,EAAA;IAAA,IAAAP,CAAA,QAAAE,IAAA,CAAAhB,EAAA;MACEqB,EAAA,GAAAb,OAAO,CAACQ,IAAI,CAAAhB,EAAG,CAAC;MAAAc,CAAA,MAAAE,IAAA,CAAAhB,EAAA;MAAAc,CAAA,MAAAO,EAAA;IAAA;MAAAA,EAAA,GAAAP,CAAA;IAAA;IAA7B,MAAAQ,IAAA,GAAaD,EAAgB;IAC7B,MAAAE,SAAA,GAA2CL,UAAU,GAAV,YAE9B,GAF8BM,SAE9B;IACb,MAAAC,QAAA,GAAiB,CAACP,UAAU;IAE5B,MAAAQ,MAAA,GAAeV,IAAI,CAAAhB,EAAG,CAAAU,UAAW,CAAC,OAAO,CAAC;IAC1C,MAAAiB,aAAA,GAAsBX,IAAI,CAAAhB,EAAG,CAAAU,UAAW,CAAC,eAAe,CAAC;IAMzD,MAAAkB,cAAA,GAAuBZ,IAAI,CAAAb,WAAoB,GAAxB,CAAwB,GAAxB,CAAwB;IAK3CF,GAAA,CAAAA,WAAA;IACJ,IAAIyB,MAAM;MAAA,IAAAG,EAAA;MAAA,IAAAf,CAAA,QAAAE,IAAA,CAAAb,WAAA;QAEY0B,EAAA,GAAAb,IAAI,CAAAb,WAEnB,GADD2B,IAAI,CAAAC,GAAI,CAAC,EAAE,EAAEtC,WAAW,CAACuB,IAAI,CAAAb,WAAY,CACzC,CAAC,GAFe,CAEf;QAAAW,CAAA,MAAAE,IAAA,CAAAb,WAAA;QAAAW,CAAA,MAAAe,EAAA;MAAA;QAAAA,EAAA,GAAAf,CAAA;MAAA;MAFL,MAAAkB,WAAA,GAAoBH,EAEf;MACL,MAAAI,aAAA,GACEd,OAAO,GAdO,CAcK,GAbF,CAaiB,GAAGS,cAAc,GAAGI,WAAW;MAAA,IAAAE,EAAA;MAAA,IAAApB,CAAA,QAAAE,IAAA,CAAAf,WAAA,IAAAa,CAAA,QAAAmB,aAAA;QACrDC,EAAA,GAAAtC,kBAAkB,CAACoB,IAAI,CAAAf,WAAY,EAAEgC,aAAa,CAAC;QAAAnB,CAAA,MAAAE,IAAA,CAAAf,WAAA;QAAAa,CAAA,MAAAmB,aAAA;QAAAnB,CAAA,MAAAoB,EAAA;MAAA;QAAAA,EAAA,GAAApB,CAAA;MAAA;MAAjEb,WAAA,CAAAA,CAAA,CAAcA,EAAmD;IAAtD;MACN,IAAI0B,aAAa;QAAA,IAAAE,EAAA;QAAA,IAAAf,CAAA,QAAAE,IAAA,CAAAf,WAAA;UAER4B,EAAA,GAAAhC,eAAe,CAACmB,IAAI,CAAAf,WAAY,EADjB,EACuC,CAAC;UAAAa,CAAA,MAAAE,IAAA,CAAAf,WAAA;UAAAa,CAAA,MAAAe,EAAA;QAAA;UAAAA,EAAA,GAAAf,CAAA;QAAA;QAArEb,WAAA,CAAAA,CAAA,CAAcA,EAAuD;MAA1D;QAEXA,WAAA,CAAAA,CAAA,CAAce,IAAI,CAAAf,WAAY;MAAnB;IACZ;IAED,MAAAkC,cAAA,GACEhB,OAAO,GAxBS,CAyBP,GACT1B,WAAW,CAACQ,WAAW,CAAC,GACxB2B,cAAc,GA1BK,CA2BP;IAGVQ,GAAA,CAAAA,WAAA;IACJ,IAAIpB,IAAI,CAAAb,WAAY;MAClB,MAAAkC,aAAA,GAAsBP,IAAI,CAAAQ,GAAI,CAAC,CAAC,EAAEH,cAAc,CAAC;MAAA,IAAAN,EAAA;MAAA,IAAAf,CAAA,QAAAE,IAAA,CAAAb,WAAA,IAAAW,CAAA,SAAAuB,aAAA;QAC3BR,EAAA,GAAAhC,eAAe,CACnCmB,IAAI,CAAAb,WAAY,CAAAoC,OAAQ,CAAC,MAAM,EAAE,GAAG,CAAC,EACrCF,aACF,CAAC;QAAAvB,CAAA,MAAAE,IAAA,CAAAb,WAAA;QAAAW,CAAA,OAAAuB,aAAA;QAAAvB,CAAA,OAAAe,EAAA;MAAA;QAAAA,EAAA,GAAAf,CAAA;MAAA;MAHD,MAAA0B,aAAA,GAAsBX,EAGrB;MACDO,WAAA,CAAAA,CAAA,CAAcA,GAAGd,IAAI,IAAIrB,WAAW,MAAMuC,aAAa,EAAE;IAA9C;MAEXJ,WAAA,CAAAA,CAAA,CAAcA,GAAGd,IAAI,IAAIrB,WAAW,EAAE;IAA3B;IACZ,IAAA4B,EAAA;IAAA,IAAAf,CAAA,SAAAW,QAAA,IAAAX,CAAA,SAAAsB,WAAA,IAAAtB,CAAA,SAAAS,SAAA;MAGCM,EAAA,IAAC,IAAI,CAAQN,KAAS,CAATA,UAAQ,CAAC,CAAYE,QAAQ,CAARA,SAAO,CAAC,CAAO,IAAU,CAAV,UAAU,CACxDW,YAAU,CACb,EAFC,IAAI,CAEE;MAAAtB,CAAA,OAAAW,QAAA;MAAAX,CAAA,OAAAsB,WAAA;MAAAtB,CAAA,OAAAS,SAAA;MAAAT,CAAA,OAAAe,EAAA;IAAA;MAAAA,EAAA,GAAAf,CAAA;IAAA;IAAA,OAFPe,EAEO;EAAA;EAMX,MAAAY,YAAA,GAAqBX,IAAI,CAAAY,KAAM,CAACvB,OAAO,GAAG,GAAG,CAAC;EAC9C,MAAAwB,gBAAA,GAAyBb,IAAI,CAAAC,GAAI,CAC/Bd,cAAmD,IAAjCxB,WAAW,CAACuB,IAAI,CAAAf,WAAY,CAAC,GAAG,CAAC,EACnDwC,YACF,CAAC;EAED,MAAAG,WAAA,GAAkB5B,IAAI,CAAAX,KAAiD,KAAtCa,UAAU,GAAV,YAAqC,GAArCM,SAAsC;EACvE,MAAAqB,SAAA,GAAkB,CAAC3B,UAAU;EAG7B,IAAA4B,aAAA,GAAkB9B,IAAI,CAAAf,WAAY;EAClC,IAAIR,WAAW,CAACQ,aAAW,CAAC,GAAG0C,gBAAgB,GAAG,CAAC;IACN,MAAAtB,EAAA,GAAAsB,gBAAgB,GAAG,CAAC;IAAA,IAAAd,EAAA;IAAA,IAAAf,CAAA,SAAAgC,aAAA,IAAAhC,CAAA,SAAAO,EAAA;MAAjDQ,EAAA,GAAAhC,eAAe,CAACI,aAAW,EAAEoB,EAAoB,CAAC;MAAAP,CAAA,OAAAgC,aAAA;MAAAhC,CAAA,OAAAO,EAAA;MAAAP,CAAA,OAAAe,EAAA;IAAA;MAAAA,EAAA,GAAAf,CAAA;IAAA;IAAhEb,aAAA,CAAAA,CAAA,CAAcA,EAAkD;EAArD;EAEb,MAAA8C,iBAAA,GACE9C,aAAW,GACX,GAAG,CAAA+C,MAAO,CAAClB,IAAI,CAAAQ,GAAI,CAAC,CAAC,EAAEK,gBAAgB,GAAGlD,WAAW,CAACQ,aAAW,CAAC,CAAC,CAAC;EAEtE,MAAAgD,OAAA,GAAgBjC,IAAI,CAAAd,GAA4B,GAAhC,IAAec,IAAI,CAAAd,GAAI,IAAS,GAAhC,EAAgC;EAChD,MAAAgD,QAAA,GAAiBzD,WAAW,CAACwD,OAAO,CAAC;EACrC,MAAAE,gBAAA,GAAyBrB,IAAI,CAAAQ,GAAI,CAC/B,CAAC,EACDnB,OAAO,GAAGwB,gBAAgB,GAAGO,QAAQ,GAAG,CAC1C,CAAC;EAAA,IAAA7B,EAAA;EAAA,IAAAP,CAAA,SAAAqC,gBAAA,IAAArC,CAAA,SAAAE,IAAA,CAAAb,WAAA;IAK4BkB,EAAA,GAAAL,IAAI,CAAAb,WAE3B,GADFN,eAAe,CAACmB,IAAI,CAAAb,WAAY,CAAAoC,OAAQ,CAAC,MAAM,EAAE,GAAG,CAAC,EAAEY,gBACtD,CAAC,GAFuB,EAEvB;IAAArC,CAAA,OAAAqC,gBAAA;IAAArC,CAAA,OAAAE,IAAA,CAAAb,WAAA;IAAAW,CAAA,OAAAO,EAAA;EAAA;IAAAA,EAAA,GAAAP,CAAA;EAAA;EAFN,MAAAsC,oBAAA,GAA6B/B,EAEvB;EAAA,IAAAQ,EAAA;EAAA,IAAAf,CAAA,SAAAiC,iBAAA,IAAAjC,CAAA,SAAA+B,SAAA,IAAA/B,CAAA,SAAA8B,WAAA;IAIFf,EAAA,IAAC,IAAI,CAAQN,KAAS,CAATA,YAAQ,CAAC,CAAYsB,QAAS,CAATA,UAAQ,CAAC,CACxCE,kBAAgB,CACnB,EAFC,IAAI,CAEE;IAAAjC,CAAA,OAAAiC,iBAAA;IAAAjC,CAAA,OAAA+B,SAAA;IAAA/B,CAAA,OAAA8B,WAAA;IAAA9B,CAAA,OAAAe,EAAA;EAAA;IAAAA,EAAA,GAAAf,CAAA;EAAA;EAAA,IAAAoB,EAAA;EAAA,IAAApB,CAAA,SAAAmC,OAAA;IACNf,EAAA,GAAAe,OAAO,GAAG,CAAC,IAAI,CAAC,QAAQ,CAAR,KAAO,CAAC,CAAEA,QAAM,CAAE,EAAvB,IAAI,CAAiC,GAAhD,IAAgD;IAAAnC,CAAA,OAAAmC,OAAA;IAAAnC,CAAA,OAAAoB,EAAA;EAAA;IAAAA,EAAA,GAAApB,CAAA;EAAA;EAExC,MAAAuC,EAAA,GAAAnC,UAAU,GAAV,YAAqC,GAArCM,SAAqC;EAClC,MAAA8B,EAAA,IAACpC,UAAU;EAAA,IAAAqC,EAAA;EAAA,IAAAzC,CAAA,SAAAuC,EAAA,IAAAvC,CAAA,SAAAwC,EAAA,IAAAxC,CAAA,SAAAsC,oBAAA;IAFvBG,EAAA,IAAC,IAAI,CACI,KAAqC,CAArC,CAAAF,EAAoC,CAAC,CAClC,QAAW,CAAX,CAAAC,EAAU,CAAC,CAEpBF,qBAAmB,CACtB,EALC,IAAI,CAKE;IAAAtC,CAAA,OAAAuC,EAAA;IAAAvC,CAAA,OAAAwC,EAAA;IAAAxC,CAAA,OAAAsC,oBAAA;IAAAtC,CAAA,OAAAyC,EAAA;EAAA;IAAAA,EAAA,GAAAzC,CAAA;EAAA;EAAA,IAAA0C,EAAA;EAAA,IAAA1C,CAAA,SAAAe,EAAA,IAAAf,CAAA,SAAAoB,EAAA,IAAApB,CAAA,SAAAyC,EAAA;IAVTC,EAAA,IAAC,IAAI,CAAM,IAAU,CAAV,UAAU,CACnB,CAAA3B,EAEM,CACL,CAAAK,EAA+C,CAChD,CAAAqB,EAKM,CACR,EAXC,IAAI,CAWE;IAAAzC,CAAA,OAAAe,EAAA;IAAAf,CAAA,OAAAoB,EAAA;IAAApB,CAAA,OAAAyC,EAAA;IAAAzC,CAAA,OAAA0C,EAAA;EAAA;IAAAA,EAAA,GAAA1C,CAAA;EAAA;EAAA,OAXP0C,EAWO;AAAA,CAEV,CAAC;AAEF,KAAKC,KAAK,GAAG;EACXC,WAAW,EAAE3D,cAAc,EAAE;EAC7B4D,kBAAkB,EAAE,MAAM;EAC1B1C,cAAc,CAAC,EAAE,MAAM;EACvB;AACF;AACA;AACA;AACA;EACE2C,OAAO,CAAC,EAAE,OAAO;AACnB,CAAC;AAED,OAAO,SAAAC,6BAAAhD,EAAA;EAAA,MAAAC,CAAA,GAAAC,EAAA;EAAsC;IAAA2C,WAAA;IAAAC,kBAAA;IAAA1C,cAAA,EAAA6C,kBAAA;IAAAF;EAAA,IAAA/C,EAKrC;EACN;IAAAkD;EAAA,IAAiBvE,eAAe,CAAC,CAAC;EAIlC,MAAAwE,eAAA,GAAwBJ,OAAO,GAAPrD,iBAEc,GAAlCuB,IAAI,CAAAC,GAAI,CAAC,CAAC,EAAED,IAAI,CAAAQ,GAAI,CAAC,CAAC,EAAEyB,IAAI,GAAG,CAAC,CAAC,CAAC;EAGtC,IAAIL,WAAW,CAAAO,MAAO,KAAK,CAAC;IAAA,OACnB,IAAI;EAAA;EACZ,IAAA5C,EAAA;EAAA,IAAAP,CAAA,QAAAgD,kBAAA,IAAAhD,CAAA,QAAA4C,WAAA;IAICrC,EAAA,GAAAyC,kBACuE,IAAvEhC,IAAI,CAAAQ,GAAI,IAAIoB,WAAW,CAAAQ,GAAI,CAACC,KAAqC,CAAC,CAAC,GAAG,CAAC;IAAArD,CAAA,MAAAgD,kBAAA;IAAAhD,CAAA,MAAA4C,WAAA;IAAA5C,CAAA,MAAAO,EAAA;EAAA;IAAAA,EAAA,GAAAP,CAAA;EAAA;EAFzE,MAAAG,cAAA,GACEI,EACuE;EAGzE,MAAA+C,UAAA,GAAmBtC,IAAI,CAAAQ,GAAI,CACzB,CAAC,EACDR,IAAI,CAAAC,GAAI,CACN4B,kBAAkB,GAAG7B,IAAI,CAAAY,KAAM,CAACsB,eAAe,GAAG,CAAC,CAAC,EACpDN,WAAW,CAAAO,MAAO,GAAGD,eACvB,CACF,CAAC;EACD,MAAAK,QAAA,GAAiBvC,IAAI,CAAAC,GAAI,CAACqC,UAAU,GAAGJ,eAAe,EAAEN,WAAW,CAAAO,MAAO,CAAC;EAAA,IAAAK,EAAA;EAAA,IAAAzC,EAAA;EAAA,IAAAK,EAAA;EAAA,IAAAmB,EAAA;EAAA,IAAAvC,CAAA,QAAAuD,QAAA,IAAAvD,CAAA,QAAAG,cAAA,IAAAH,CAAA,QAAA8C,OAAA,IAAA9C,CAAA,QAAA6C,kBAAA,IAAA7C,CAAA,QAAAsD,UAAA,IAAAtD,CAAA,QAAA4C,WAAA;IAC3E,MAAAa,YAAA,GAAqBb,WAAW,CAAAc,KAAM,CAACJ,UAAU,EAAEC,QAAQ,CAAC;IAUzDC,EAAA,GAAA5E,GAAG;IACYmC,EAAA,WAAQ;IACNK,EAAA,GAAA0B,OAAO,GAAPpC,SAAgC,GAAhC,UAAgC;IAAA,IAAA8B,EAAA;IAAA,IAAAxC,CAAA,SAAAG,cAAA,IAAAH,CAAA,SAAA6C,kBAAA,IAAA7C,CAAA,SAAA4C,WAAA;MAE9BJ,EAAA,GAAAmB,MAAA,IAChB,CAAC,iBAAiB,CACX,GAAO,CAAP,CAAAzD,MAAI,CAAAhB,EAAE,CAAC,CACNgB,IAAI,CAAJA,OAAG,CAAC,CACMC,cAAc,CAAdA,eAAa,CAAC,CAClB,UAA+C,CAA/C,CAAAD,MAAI,CAAAhB,EAAG,KAAK0D,WAAW,CAACC,kBAAkB,CAAK,EAAA3D,EAAD,CAAC,GAE9D;MAAAc,CAAA,OAAAG,cAAA;MAAAH,CAAA,OAAA6C,kBAAA;MAAA7C,CAAA,OAAA4C,WAAA;MAAA5C,CAAA,OAAAwC,EAAA;IAAA;MAAAA,EAAA,GAAAxC,CAAA;IAAA;IAPAuC,EAAA,GAAAkB,YAAY,CAAAL,GAAI,CAACZ,EAOjB,CAAC;IAAAxC,CAAA,MAAAuD,QAAA;IAAAvD,CAAA,MAAAG,cAAA;IAAAH,CAAA,MAAA8C,OAAA;IAAA9C,CAAA,MAAA6C,kBAAA;IAAA7C,CAAA,MAAAsD,UAAA;IAAAtD,CAAA,MAAA4C,WAAA;IAAA5C,CAAA,MAAAwD,EAAA;IAAAxD,CAAA,OAAAe,EAAA;IAAAf,CAAA,OAAAoB,EAAA;IAAApB,CAAA,OAAAuC,EAAA;EAAA;IAAAiB,EAAA,GAAAxD,CAAA;IAAAe,EAAA,GAAAf,CAAA;IAAAoB,EAAA,GAAApB,CAAA;IAAAuC,EAAA,GAAAvC,CAAA;EAAA;EAAA,IAAAwC,EAAA;EAAA,IAAAxC,CAAA,SAAAwD,EAAA,IAAAxD,CAAA,SAAAe,EAAA,IAAAf,CAAA,SAAAoB,EAAA,IAAApB,CAAA,SAAAuC,EAAA;IAXJC,EAAA,IAAC,EAAG,CACY,aAAQ,CAAR,CAAAzB,EAAO,CAAC,CACN,cAAgC,CAAhC,CAAAK,EAA+B,CAAC,CAE/C,CAAAmB,EAOA,CACH,EAZC,EAAG,CAYE;IAAAvC,CAAA,OAAAwD,EAAA;IAAAxD,CAAA,OAAAe,EAAA;IAAAf,CAAA,OAAAoB,EAAA;IAAApB,CAAA,OAAAuC,EAAA;IAAAvC,CAAA,OAAAwC,EAAA;EAAA;IAAAA,EAAA,GAAAxC,CAAA;EAAA;EAAA,OAZNwC,EAYM;AAAA;AAvDH,SAAAa,MAAAnD,IAAA;EAAA,OAsBiCvB,WAAW,CAACuB,IAAI,CAAAf,WAAY,CAAC;AAAA;AAqCrE,eAAeX,IAAI,CAACuE,4BAA4B,CAAC","ignoreList":[]} \ No newline at end of file + +export default memo(PromptInputFooterSuggestions) From 118b0793e09e651a743bdb8a86787638b08a6098 Mon Sep 17 00:00:00 2001 From: Vasanthdev2004 Date: Thu, 2 Apr 2026 18:25:52 +0530 Subject: [PATCH 23/25] fix: move slash suggestion highlight with selection --- .../PromptInputFooterSuggestions.tsx | 32 ++++++++++++++++--- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/src/components/PromptInput/PromptInputFooterSuggestions.tsx b/src/components/PromptInput/PromptInputFooterSuggestions.tsx index 2d7d9bd2..75c9d75b 100644 --- a/src/components/PromptInput/PromptInputFooterSuggestions.tsx +++ b/src/components/PromptInput/PromptInputFooterSuggestions.tsx @@ -62,6 +62,9 @@ const SuggestionItemRow = memo(function SuggestionItemRow({ if (isUnifiedSuggestion(item.id)) { const icon = getIcon(item.id) const textColor: keyof Theme | undefined = isSelected + ? 'inverseText' + : undefined + const backgroundColor: keyof Theme | undefined = isSelected ? 'suggestion' : undefined const dimColor = !isSelected @@ -112,6 +115,7 @@ const SuggestionItemRow = memo(function SuggestionItemRow({ return ( - + {paddedDisplayText} - {tagText ? {tagText} : null} - + {tagText ? ( + + {tagText} + + ) : null} + {truncatedDescription} From 4d0886a4feed7de8512e6d1428bddea290f3bcb0 Mon Sep 17 00:00:00 2001 From: Vasanthdev2004 Date: Thu, 2 Apr 2026 18:42:56 +0530 Subject: [PATCH 24/25] fix: keep slash highlight in sync in fullscreen --- .../PromptInputFooterSuggestions.tsx | 68 +++++++------------ src/context/promptOverlayContext.tsx | 44 ++++++++++-- 2 files changed, 63 insertions(+), 49 deletions(-) diff --git a/src/components/PromptInput/PromptInputFooterSuggestions.tsx b/src/components/PromptInput/PromptInputFooterSuggestions.tsx index 75c9d75b..de1e7c95 100644 --- a/src/components/PromptInput/PromptInputFooterSuggestions.tsx +++ b/src/components/PromptInput/PromptInputFooterSuggestions.tsx @@ -58,15 +58,13 @@ const SuggestionItemRow = memo(function SuggestionItemRow({ }): ReactNode { const columns = useTerminalSize().columns const selectionPrefix = isSelected ? SELECTED_PREFIX : UNSELECTED_PREFIX + const rowBackgroundColor: keyof Theme | undefined = isSelected + ? 'suggestion' + : undefined + const textColor: keyof Theme | undefined = isSelected ? 'inverseText' : undefined if (isUnifiedSuggestion(item.id)) { const icon = getIcon(item.id) - const textColor: keyof Theme | undefined = isSelected - ? 'inverseText' - : undefined - const backgroundColor: keyof Theme | undefined = isSelected - ? 'suggestion' - : undefined const dimColor = !isSelected const isFile = item.id.startsWith('file-') const isMcpResource = item.id.startsWith('mcp-resource-') @@ -113,15 +111,11 @@ const SuggestionItemRow = memo(function SuggestionItemRow({ } return ( - - {lineContent} - + + + {lineContent} + + ) } @@ -130,10 +124,7 @@ const SuggestionItemRow = memo(function SuggestionItemRow({ maxColumnWidth ?? stringWidth(item.displayText) + 5, maxNameWidth, ) - const textColor = isSelected ? 'inverseText' : item.color - const backgroundColor: keyof Theme | undefined = isSelected - ? 'suggestion' - : undefined + const displayTextColor = isSelected ? 'inverseText' : item.color const shouldDim = !isSelected let displayText = item.displayText @@ -156,32 +147,21 @@ const SuggestionItemRow = memo(function SuggestionItemRow({ : '' return ( - - - {paddedDisplayText} - - {tagText ? ( - - {tagText} + + + + {paddedDisplayText} + + {tagText ? ( + + {tagText} + + ) : null} + + {truncatedDescription} - ) : null} - - {truncatedDescription} - + ) }) @@ -226,7 +206,7 @@ export function PromptInputFooterSuggestions({ > {visibleItems.map(item => ( { if (!set) { return; } set(data); - return () => set(null); }; t1 = [set, data]; $[0] = data; @@ -91,7 +92,23 @@ export function useSetPromptOverlay(data) { t0 = $[2]; t1 = $[3]; } + if ($[4] !== set) { + t2 = () => { + if (!set) { + return; + } + return () => set(null); + }; + t3 = [set]; + $[4] = set; + $[5] = t2; + $[6] = t3; + } else { + t2 = $[5]; + t3 = $[6]; + } useEffect(t0, t1); + useEffect(t2, t3); } /** @@ -99,17 +116,18 @@ export function useSetPromptOverlay(data) { * No-op outside the provider (non-fullscreen renders inline instead). */ export function useSetPromptOverlayDialog(node) { - const $ = _c(4); + const $ = _c(8); const set = useContext(SetDialogContext); let t0; let t1; + let t2; + let t3; if ($[0] !== node || $[1] !== set) { t0 = () => { if (!set) { return; } set(node); - return () => set(null); }; t1 = [set, node]; $[0] = node; @@ -120,6 +138,22 @@ export function useSetPromptOverlayDialog(node) { t0 = $[2]; t1 = $[3]; } + if ($[4] !== set) { + t2 = () => { + if (!set) { + return; + } + return () => set(null); + }; + t3 = [set]; + $[4] = set; + $[5] = t2; + $[6] = t3; + } else { + t2 = $[5]; + t3 = $[6]; + } useEffect(t0, t1); + useEffect(t2, t3); } -//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"names":["React","createContext","ReactNode","useContext","useEffect","useState","SuggestionItem","PromptOverlayData","suggestions","selectedSuggestion","maxColumnWidth","Setter","d","T","DataContext","SetContext","DialogContext","SetDialogContext","PromptOverlayProvider","t0","$","_c","children","data","setData","dialog","setDialog","t1","t2","usePromptOverlay","usePromptOverlayDialog","useSetPromptOverlay","set","useSetPromptOverlayDialog","node"],"sources":["promptOverlayContext.tsx"],"sourcesContent":["/**\n * Portal for content that floats above the prompt so it escapes\n * FullscreenLayout's bottom-slot `overflowY:hidden` clip.\n *\n * The clip is load-bearing (CC-668: tall pastes squash the ScrollBox\n * without it), but floating overlays use `position:absolute\n * bottom=\"100%\"` to float above the prompt — and Ink's clip stack\n * intersects ALL descendants, so they were clipped to ~1 row.\n *\n * Two channels:\n * - `useSetPromptOverlay` — slash-command suggestion data (structured,\n *   written by PromptInputFooter)\n * - `useSetPromptOverlayDialog` — arbitrary dialog node (e.g.\n *   AutoModeOptInDialog, written by PromptInput)\n *\n * FullscreenLayout reads both and renders them outside the clipped slot.\n *\n * Split into data/setter context pairs so writers never re-render on\n * their own writes — the setter contexts are stable.\n */\nimport React, {\n  createContext,\n  type ReactNode,\n  useContext,\n  useEffect,\n  useState,\n} from 'react'\nimport type { SuggestionItem } from '../components/PromptInput/PromptInputFooterSuggestions.js'\n\nexport type PromptOverlayData = {\n  suggestions: SuggestionItem[]\n  selectedSuggestion: number\n  maxColumnWidth?: number\n}\n\ntype Setter<T> = (d: T | null) => void\n\nconst DataContext = createContext<PromptOverlayData | null>(null)\nconst SetContext = createContext<Setter<PromptOverlayData> | null>(null)\nconst DialogContext = createContext<ReactNode>(null)\nconst SetDialogContext = createContext<Setter<ReactNode> | null>(null)\n\nexport function PromptOverlayProvider({\n  children,\n}: {\n  children: ReactNode\n}): ReactNode {\n  const [data, setData] = useState<PromptOverlayData | null>(null)\n  const [dialog, setDialog] = useState<ReactNode>(null)\n  return (\n    <SetContext.Provider value={setData}>\n      <SetDialogContext.Provider value={setDialog}>\n        <DataContext.Provider value={data}>\n          <DialogContext.Provider value={dialog}>\n            {children}\n          </DialogContext.Provider>\n        </DataContext.Provider>\n      </SetDialogContext.Provider>\n    </SetContext.Provider>\n  )\n}\n\nexport function usePromptOverlay(): PromptOverlayData | null {\n  return useContext(DataContext)\n}\n\nexport function usePromptOverlayDialog(): ReactNode {\n  return useContext(DialogContext)\n}\n\n/**\n * Register suggestion data for the floating overlay. Clears on unmount.\n * No-op outside the provider (non-fullscreen renders inline instead).\n */\nexport function useSetPromptOverlay(data: PromptOverlayData | null): void {\n  const set = useContext(SetContext)\n  useEffect(() => {\n    if (!set) return\n    set(data)\n    return () => set(null)\n  }, [set, data])\n}\n\n/**\n * Register a dialog node to float above the prompt. Clears on unmount.\n * No-op outside the provider (non-fullscreen renders inline instead).\n */\nexport function useSetPromptOverlayDialog(node: ReactNode): void {\n  const set = useContext(SetDialogContext)\n  useEffect(() => {\n    if (!set) return\n    set(node)\n    return () => set(null)\n  }, [set, node])\n}\n"],"mappings":";AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAOA,KAAK,IACVC,aAAa,EACb,KAAKC,SAAS,EACdC,UAAU,EACVC,SAAS,EACTC,QAAQ,QACH,OAAO;AACd,cAAcC,cAAc,QAAQ,2DAA2D;AAE/F,OAAO,KAAKC,iBAAiB,GAAG;EAC9BC,WAAW,EAAEF,cAAc,EAAE;EAC7BG,kBAAkB,EAAE,MAAM;EAC1BC,cAAc,CAAC,EAAE,MAAM;AACzB,CAAC;AAED,KAAKC,MAAM,CAAC,CAAC,CAAC,GAAG,CAACC,CAAC,EAAEC,CAAC,GAAG,IAAI,EAAE,GAAG,IAAI;AAEtC,MAAMC,WAAW,GAAGb,aAAa,CAACM,iBAAiB,GAAG,IAAI,CAAC,CAAC,IAAI,CAAC;AACjE,MAAMQ,UAAU,GAAGd,aAAa,CAACU,MAAM,CAACJ,iBAAiB,CAAC,GAAG,IAAI,CAAC,CAAC,IAAI,CAAC;AACxE,MAAMS,aAAa,GAAGf,aAAa,CAACC,SAAS,CAAC,CAAC,IAAI,CAAC;AACpD,MAAMe,gBAAgB,GAAGhB,aAAa,CAACU,MAAM,CAACT,SAAS,CAAC,GAAG,IAAI,CAAC,CAAC,IAAI,CAAC;AAEtE,OAAO,SAAAgB,sBAAAC,EAAA;EAAA,MAAAC,CAAA,GAAAC,EAAA;EAA+B;IAAAC;EAAA,IAAAH,EAIrC;EACC,OAAAI,IAAA,EAAAC,OAAA,IAAwBnB,QAAQ,CAA2B,IAAI,CAAC;EAChE,OAAAoB,MAAA,EAAAC,SAAA,IAA4BrB,QAAQ,CAAY,IAAI,CAAC;EAAA,IAAAsB,EAAA;EAAA,IAAAP,CAAA,QAAAE,QAAA,IAAAF,CAAA,QAAAK,MAAA;IAK7CE,EAAA,2BAA+BF,KAAM,CAANA,OAAK,CAAC,CAClCH,SAAO,CACV,yBAAyB;IAAAF,CAAA,MAAAE,QAAA;IAAAF,CAAA,MAAAK,MAAA;IAAAL,CAAA,MAAAO,EAAA;EAAA;IAAAA,EAAA,GAAAP,CAAA;EAAA;EAAA,IAAAQ,EAAA;EAAA,IAAAR,CAAA,QAAAG,IAAA,IAAAH,CAAA,QAAAO,EAAA;IAL/BC,EAAA,wBAA4BJ,KAAO,CAAPA,QAAM,CAAC,CACjC,2BAAkCE,KAAS,CAATA,UAAQ,CAAC,CACzC,sBAA6BH,KAAI,CAAJA,KAAG,CAAC,CAC/B,CAAAI,EAEwB,CAC1B,uBACF,4BACF,sBAAsB;IAAAP,CAAA,MAAAG,IAAA;IAAAH,CAAA,MAAAO,EAAA;IAAAP,CAAA,MAAAQ,EAAA;EAAA;IAAAA,EAAA,GAAAR,CAAA;EAAA;EAAA,OARtBQ,EAQsB;AAAA;AAI1B,OAAO,SAAAC,iBAAA;EAAA,OACE1B,UAAU,CAACW,WAAW,CAAC;AAAA;AAGhC,OAAO,SAAAgB,uBAAA;EAAA,OACE3B,UAAU,CAACa,aAAa,CAAC;AAAA;;AAGlC;AACA;AACA;AACA;AACA,OAAO,SAAAe,oBAAAR,IAAA;EAAA,MAAAH,CAAA,GAAAC,EAAA;EACL,MAAAW,GAAA,GAAY7B,UAAU,CAACY,UAAU,CAAC;EAAA,IAAAI,EAAA;EAAA,IAAAQ,EAAA;EAAA,IAAAP,CAAA,QAAAG,IAAA,IAAAH,CAAA,QAAAY,GAAA;IACxBb,EAAA,GAAAA,CAAA;MACR,IAAI,CAACa,GAAG;QAAA;MAAA;MACRA,GAAG,CAACT,IAAI,CAAC;MAAA,OACF,MAAMS,GAAG,CAAC,IAAI,CAAC;IAAA,CACvB;IAAEL,EAAA,IAACK,GAAG,EAAET,IAAI,CAAC;IAAAH,CAAA,MAAAG,IAAA;IAAAH,CAAA,MAAAY,GAAA;IAAAZ,CAAA,MAAAD,EAAA;IAAAC,CAAA,MAAAO,EAAA;EAAA;IAAAR,EAAA,GAAAC,CAAA;IAAAO,EAAA,GAAAP,CAAA;EAAA;EAJdhB,SAAS,CAACe,EAIT,EAAEQ,EAAW,CAAC;AAAA;;AAGjB;AACA;AACA;AACA;AACA,OAAO,SAAAM,0BAAAC,IAAA;EAAA,MAAAd,CAAA,GAAAC,EAAA;EACL,MAAAW,GAAA,GAAY7B,UAAU,CAACc,gBAAgB,CAAC;EAAA,IAAAE,EAAA;EAAA,IAAAQ,EAAA;EAAA,IAAAP,CAAA,QAAAc,IAAA,IAAAd,CAAA,QAAAY,GAAA;IAC9Bb,EAAA,GAAAA,CAAA;MACR,IAAI,CAACa,GAAG;QAAA;MAAA;MACRA,GAAG,CAACE,IAAI,CAAC;MAAA,OACF,MAAMF,GAAG,CAAC,IAAI,CAAC;IAAA,CACvB;IAAEL,EAAA,IAACK,GAAG,EAAEE,IAAI,CAAC;IAAAd,CAAA,MAAAc,IAAA;IAAAd,CAAA,MAAAY,GAAA;IAAAZ,CAAA,MAAAD,EAAA;IAAAC,CAAA,MAAAO,EAAA;EAAA;IAAAR,EAAA,GAAAC,CAAA;IAAAO,EAAA,GAAAP,CAAA;EAAA;EAJdhB,SAAS,CAACe,EAIT,EAAEQ,EAAW,CAAC;AAAA","ignoreList":[]} \ No newline at end of file +//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"names":["React","createContext","ReactNode","useContext","useEffect","useState","SuggestionItem","PromptOverlayData","suggestions","selectedSuggestion","maxColumnWidth","Setter","d","T","DataContext","SetContext","DialogContext","SetDialogContext","PromptOverlayProvider","t0","$","_c","children","data","setData","dialog","setDialog","t1","t2","usePromptOverlay","usePromptOverlayDialog","useSetPromptOverlay","set","useSetPromptOverlayDialog","node"],"sources":["promptOverlayContext.tsx"],"sourcesContent":["/**\n * Portal for content that floats above the prompt so it escapes\n * FullscreenLayout's bottom-slot `overflowY:hidden` clip.\n *\n * The clip is load-bearing (CC-668: tall pastes squash the ScrollBox\n * without it), but floating overlays use `position:absolute\n * bottom=\"100%\"` to float above the prompt — and Ink's clip stack\n * intersects ALL descendants, so they were clipped to ~1 row.\n *\n * Two channels:\n * - `useSetPromptOverlay` — slash-command suggestion data (structured,\n *   written by PromptInputFooter)\n * - `useSetPromptOverlayDialog` — arbitrary dialog node (e.g.\n *   AutoModeOptInDialog, written by PromptInput)\n *\n * FullscreenLayout reads both and renders them outside the clipped slot.\n *\n * Split into data/setter context pairs so writers never re-render on\n * their own writes — the setter contexts are stable.\n */\nimport React, {\n  createContext,\n  type ReactNode,\n  useContext,\n  useEffect,\n  useState,\n} from 'react'\nimport type { SuggestionItem } from '../components/PromptInput/PromptInputFooterSuggestions.js'\n\nexport type PromptOverlayData = {\n  suggestions: SuggestionItem[]\n  selectedSuggestion: number\n  maxColumnWidth?: number\n}\n\ntype Setter<T> = (d: T | null) => void\n\nconst DataContext = createContext<PromptOverlayData | null>(null)\nconst SetContext = createContext<Setter<PromptOverlayData> | null>(null)\nconst DialogContext = createContext<ReactNode>(null)\nconst SetDialogContext = createContext<Setter<ReactNode> | null>(null)\n\nexport function PromptOverlayProvider({\n  children,\n}: {\n  children: ReactNode\n}): ReactNode {\n  const [data, setData] = useState<PromptOverlayData | null>(null)\n  const [dialog, setDialog] = useState<ReactNode>(null)\n  return (\n    <SetContext.Provider value={setData}>\n      <SetDialogContext.Provider value={setDialog}>\n        <DataContext.Provider value={data}>\n          <DialogContext.Provider value={dialog}>\n            {children}\n          </DialogContext.Provider>\n        </DataContext.Provider>\n      </SetDialogContext.Provider>\n    </SetContext.Provider>\n  )\n}\n\nexport function usePromptOverlay(): PromptOverlayData | null {\n  return useContext(DataContext)\n}\n\nexport function usePromptOverlayDialog(): ReactNode {\n  return useContext(DialogContext)\n}\n\n/**\n * Register suggestion data for the floating overlay. Clears on unmount.\n * No-op outside the provider (non-fullscreen renders inline instead).\n */\nexport function useSetPromptOverlay(data: PromptOverlayData | null): void {\n  const set = useContext(SetContext)\n  useEffect(() => {\n    if (!set) return\n    set(data)\n    return () => set(null)\n  }, [set, data])\n}\n\n/**\n * Register a dialog node to float above the prompt. Clears on unmount.\n * No-op outside the provider (non-fullscreen renders inline instead).\n */\nexport function useSetPromptOverlayDialog(node: ReactNode): void {\n  const set = useContext(SetDialogContext)\n  useEffect(() => {\n    if (!set) return\n    set(node)\n    return () => set(null)\n  }, [set, node])\n}\n"],"mappings":";AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAOA,KAAK,IACVC,aAAa,EACb,KAAKC,SAAS,EACdC,UAAU,EACVC,SAAS,EACTC,QAAQ,QACH,OAAO;AACd,cAAcC,cAAc,QAAQ,2DAA2D;AAE/F,OAAO,KAAKC,iBAAiB,GAAG;EAC9BC,WAAW,EAAEF,cAAc,EAAE;EAC7BG,kBAAkB,EAAE,MAAM;EAC1BC,cAAc,CAAC,EAAE,MAAM;AACzB,CAAC;AAED,KAAKC,MAAM,CAAC,CAAC,CAAC,GAAG,CAACC,CAAC,EAAEC,CAAC,GAAG,IAAI,EAAE,GAAG,IAAI;AAEtC,MAAMC,WAAW,GAAGb,aAAa,CAACM,iBAAiB,GAAG,IAAI,CAAC,CAAC,IAAI,CAAC;AACjE,MAAMQ,UAAU,GAAGd,aAAa,CAACU,MAAM,CAACJ,iBAAiB,CAAC,GAAG,IAAI,CAAC,CAAC,IAAI,CAAC;AACxE,MAAMS,aAAa,GAAGf,aAAa,CAACC,SAAS,CAAC,CAAC,IAAI,CAAC;AACpD,MAAMe,gBAAgB,GAAGhB,aAAa,CAACU,MAAM,CAACT,SAAS,CAAC,GAAG,IAAI,CAAC,CAAC,IAAI,CAAC;AAEtE,OAAO,SAAAgB,sBAAAC,EAAA;EAAA,MAAAC,CAAA,GAAAC,EAAA;EAA+B;IAAAC;EAAA,IAAAH,EAIrC;EACC,OAAAI,IAAA,EAAAC,OAAA,IAAwBnB,QAAQ,CAA2B,IAAI,CAAC;EAChE,OAAAoB,MAAA,EAAAC,SAAA,IAA4BrB,QAAQ,CAAY,IAAI,CAAC;EAAA,IAAAsB,EAAA;EAAA,IAAAP,CAAA,QAAAE,QAAA,IAAAF,CAAA,QAAAK,MAAA;IAK7CE,EAAA,2BAA+BF,KAAM,CAANA,OAAK,CAAC,CAClCH,SAAO,CACV,yBAAyB;IAAAF,CAAA,MAAAE,QAAA;IAAAF,CAAA,MAAAK,MAAA;IAAAL,CAAA,MAAAO,EAAA;EAAA;IAAAA,EAAA,GAAAP,CAAA;EAAA;EAAA,IAAAQ,EAAA;EAAA,IAAAR,CAAA,QAAAG,IAAA,IAAAH,CAAA,QAAAO,EAAA;IAL/BC,EAAA,wBAA4BJ,KAAO,CAAPA,QAAM,CAAC,CACjC,2BAAkCE,KAAS,CAATA,UAAQ,CAAC,CACzC,sBAA6BH,KAAI,CAAJA,KAAG,CAAC,CAC/B,CAAAI,EAEwB,CAC1B,uBACF,4BACF,sBAAsB;IAAAP,CAAA,MAAAG,IAAA;IAAAH,CAAA,MAAAO,EAAA;IAAAP,CAAA,MAAAQ,EAAA;EAAA;IAAAA,EAAA,GAAAR,CAAA;EAAA;EAAA,OARtBQ,EAQsB;AAAA;AAI1B,OAAO,SAAAC,iBAAA;EAAA,OACE1B,UAAU,CAACW,WAAW,CAAC;AAAA;AAGhC,OAAO,SAAAgB,uBAAA;EAAA,OACE3B,UAAU,CAACa,aAAa,CAAC;AAAA;;AAGlC;AACA;AACA;AACA;AACA,OAAO,SAAAe,oBAAAR,IAAA;EAAA,MAAAH,CAAA,GAAAC,EAAA;EACL,MAAAW,GAAA,GAAY7B,UAAU,CAACY,UAAU,CAAC;EAAA,IAAAI,EAAA;EAAA,IAAAQ,EAAA;EAAA,IAAAP,CAAA,QAAAG,IAAA,IAAAH,CAAA,QAAAY,GAAA;IACxBb,EAAA,GAAAA,CAAA;MACR,IAAI,CAACa,GAAG;QAAA;MAAA;MACRA,GAAG,CAACT,IAAI,CAAC;MAAA,OACF,MAAMS,GAAG,CAAC,IAAI,CAAC;IAAA,CACvB;IAAEL,EAAA,IAACK,GAAG,EAAET,IAAI,CAAC;IAAAH,CAAA,MAAAG,IAAA;IAAAH,CAAA,MAAAY,GAAA;IAAAZ,CAAA,MAAAD,EAAA;IAAAC,CAAA,MAAAO,EAAA;EAAA;IAAAR,EAAA,GAAAC,CAAA;IAAAO,EAAA,GAAAP,CAAA;EAAA;EAJdhB,SAAS,CAACe,EAIT,EAAEQ,EAAW,CAAC;AAAA;;AAGjB;AACA;AACA;AACA;AACA,OAAO,SAAAM,0BAAAC,IAAA;EAAA,MAAAd,CAAA,GAAAC,EAAA;EACL,MAAAW,GAAA,GAAY7B,UAAU,CAACc,gBAAgB,CAAC;EAAA,IAAAE,EAAA;EAAA,IAAAQ,EAAA;EAAA,IAAAP,CAAA,QAAAc,IAAA,IAAAd,CAAA,QAAAY,GAAA;IAC9Bb,EAAA,GAAAA,CAAA;MACR,IAAI,CAACa,GAAG;QAAA;MAAA;MACRA,GAAG,CAACE,IAAI,CAAC;MAAA,OACF,MAAMF,GAAG,CAAC,IAAI,CAAC;IAAA,CACvB;IAAEL,EAAA,IAACK,GAAG,EAAEE,IAAI,CAAC;IAAAd,CAAA,MAAAc,IAAA;IAAAd,CAAA,MAAAY,GAAA;IAAAZ,CAAA,MAAAD,EAAA;IAAAC,CAAA,MAAAO,EAAA;EAAA;IAAAR,EAAA,GAAAC,CAAA;IAAAO,EAAA,GAAAP,CAAA;EAAA;EAJdhB,SAAS,CAACe,EAIT,EAAEQ,EAAW,CAAC;AAAA","ignoreList":[]} From 8f50f17674f9b8da94abff76a47ccb93061f1c47 Mon Sep 17 00:00:00 2001 From: Meet Patel Date: Thu, 2 Apr 2026 17:17:14 +0530 Subject: [PATCH 25/25] feat: Refactor model handling & reasoning effort across navigation, typeahead, OpenAI/Codex providers, API shim, configs, and UI (adds EffortPicker, new mappings/options, unique suggestion IDs, effort utilities; removes deprecated aliases; defaults Codex to gpt-5.4; improves selection logic and status display) --- src/commands/effort/effort.tsx | 52 +++++- .../CustomSelect/use-select-navigation.ts | 111 +++++++------ src/components/EffortPicker.tsx | 152 ++++++++++++++++++ src/components/StartupScreen.ts | 34 +++- src/hooks/useTypeahead.tsx | 16 +- src/services/api/openaiShim.ts | 15 +- src/services/api/providerConfig.ts | 50 +++++- src/utils/effort.ts | 52 +++++- src/utils/model/aliases.ts | 2 - src/utils/model/model.ts | 43 +++-- src/utils/model/modelOptions.ts | 82 ++++++++-- src/utils/model/modelStrings.ts | 5 +- src/utils/model/providers.ts | 18 ++- src/utils/status.tsx | 89 +++++++--- src/utils/suggestions/commandSuggestions.ts | 30 +++- 15 files changed, 612 insertions(+), 139 deletions(-) create mode 100644 src/components/EffortPicker.tsx diff --git a/src/commands/effort/effort.tsx b/src/commands/effort/effort.tsx index 0dadd606..1cbc83d1 100644 --- a/src/commands/effort/effort.tsx +++ b/src/commands/effort/effort.tsx @@ -4,7 +4,8 @@ import { useMainLoopModel } from '../../hooks/useMainLoopModel.js'; import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js'; import { useAppState, useSetAppState } from '../../state/AppState.js'; import type { LocalJSXCommandOnDone } from '../../types/command.js'; -import { type EffortValue, getDisplayedEffortLevel, getEffortEnvOverride, getEffortValueDescription, isEffortLevel, toPersistableEffort } from '../../utils/effort.js'; +import { type EffortValue, getDisplayedEffortLevel, getEffortEnvOverride, getEffortValueDescription, isEffortLevel, isOpenAIEffortLevel, modelUsesOpenAIEffort, toPersistableEffort } from '../../utils/effort.js'; +import { EffortPicker } from '../../components/EffortPicker.js'; import { updateSettingsForSource } from '../../utils/settings/settings.js'; const COMMON_HELP_ARGS = ['help', '-h', '--help']; type EffortCommandResult = { @@ -109,12 +110,15 @@ export function executeEffort(args: string): EffortCommandResult { if (normalized === 'auto' || normalized === 'unset') { return unsetEffortLevel(); } - if (!isEffortLevel(normalized)) { - return { - message: `Invalid argument: ${args}. Valid options are: low, medium, high, max, auto` - }; + if (isEffortLevel(normalized)) { + return setEffortValue(normalized); } - return setEffortValue(normalized); + if (isOpenAIEffortLevel(normalized)) { + return setEffortValue(normalized); + } + return { + message: `Invalid argument: ${args}. Valid options are: low, medium, high, max, xhigh, auto` + }; } function ShowCurrentEffort(t0) { const { @@ -174,10 +178,44 @@ export async function call(onDone: LocalJSXCommandOnDone, _context: unknown, arg onDone('Usage: /effort [low|medium|high|max|auto]\n\nEffort levels:\n- low: Quick, straightforward implementation\n- medium: Balanced approach with standard testing\n- high: Comprehensive implementation with extensive testing\n- max: Maximum capability with deepest reasoning (Opus 4.6 only)\n- auto: Use the default effort level for your model'); return; } - if (!args || args === 'current' || args === 'status') { + if (args === 'current' || args === 'status') { return ; } + if (!args) { + return ; + } const result = executeEffort(args); return ; } + +function EffortPickerWrapper({ onDone }: { onDone: LocalJSXCommandOnDone }) { + const setAppState = useSetAppState(); + const model = useMainLoopModel(); + const usesOpenAIEffort = modelUsesOpenAIEffort(model); + + function handleSelect(effort: EffortValue | undefined) { + const persistable = toPersistableEffort(effort); + if (persistable !== undefined) { + updateSettingsForSource('userSettings', { + effortLevel: persistable + }); + } + logEvent('tengu_effort_command', { + effort: (effort ?? 'auto') as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS + }); + setAppState(prev => ({ + ...prev, + effortValue: effort + })); + const description = effort ? getEffortValueDescription(effort) : 'Use default effort level for your model'; + const suffix = persistable !== undefined ? '' : ' (this session only)'; + onDone(`Set effort level to ${effort ?? 'auto'}${suffix}: ${description}`); + } + + function handleCancel() { + onDone('Cancelled'); + } + + return ; +} //# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"names":["React","useMainLoopModel","AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS","logEvent","useAppState","useSetAppState","LocalJSXCommandOnDone","EffortValue","getDisplayedEffortLevel","getEffortEnvOverride","getEffortValueDescription","isEffortLevel","toPersistableEffort","updateSettingsForSource","COMMON_HELP_ARGS","EffortCommandResult","message","effortUpdate","value","setEffortValue","effortValue","persistable","undefined","result","effortLevel","error","effort","envOverride","envRaw","process","env","CLAUDE_CODE_EFFORT_LEVEL","description","suffix","showCurrentEffort","appStateEffort","model","effectiveValue","level","unsetEffortLevel","executeEffort","args","normalized","toLowerCase","ShowCurrentEffort","t0","onDone","_temp","s","ApplyEffortAndClose","$","_c","setAppState","t1","t2","prev","useEffect","call","_context","Promise","ReactNode","trim","includes"],"sources":["effort.tsx"],"sourcesContent":["import * as React from 'react'\nimport { useMainLoopModel } from '../../hooks/useMainLoopModel.js'\nimport {\n  type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  logEvent,\n} from '../../services/analytics/index.js'\nimport { useAppState, useSetAppState } from '../../state/AppState.js'\nimport type { LocalJSXCommandOnDone } from '../../types/command.js'\nimport {\n  type EffortValue,\n  getDisplayedEffortLevel,\n  getEffortEnvOverride,\n  getEffortValueDescription,\n  isEffortLevel,\n  toPersistableEffort,\n} from '../../utils/effort.js'\nimport { updateSettingsForSource } from '../../utils/settings/settings.js'\n\nconst COMMON_HELP_ARGS = ['help', '-h', '--help']\n\ntype EffortCommandResult = {\n  message: string\n  effortUpdate?: { value: EffortValue | undefined }\n}\n\nfunction setEffortValue(effortValue: EffortValue): EffortCommandResult {\n  const persistable = toPersistableEffort(effortValue)\n  if (persistable !== undefined) {\n    const result = updateSettingsForSource('userSettings', {\n      effortLevel: persistable,\n    })\n    if (result.error) {\n      return {\n        message: `Failed to set effort level: ${result.error.message}`,\n      }\n    }\n  }\n  logEvent('tengu_effort_command', {\n    effort:\n      effortValue as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  })\n\n  // Env var wins at resolveAppliedEffort time. Only flag it when it actually\n  // conflicts — if env matches what the user just asked for, the outcome is\n  // the same, so \"Set effort to X\" is true and the note is noise.\n  const envOverride = getEffortEnvOverride()\n  if (envOverride !== undefined && envOverride !== effortValue) {\n    const envRaw = process.env.CLAUDE_CODE_EFFORT_LEVEL\n    if (persistable === undefined) {\n      return {\n        message: `Not applied: CLAUDE_CODE_EFFORT_LEVEL=${envRaw} overrides effort this session, and ${effortValue} is session-only (nothing saved)`,\n        effortUpdate: { value: effortValue },\n      }\n    }\n    return {\n      message: `CLAUDE_CODE_EFFORT_LEVEL=${envRaw} overrides this session — clear it and ${effortValue} takes over`,\n      effortUpdate: { value: effortValue },\n    }\n  }\n\n  const description = getEffortValueDescription(effortValue)\n  const suffix = persistable !== undefined ? '' : ' (this session only)'\n  return {\n    message: `Set effort level to ${effortValue}${suffix}: ${description}`,\n    effortUpdate: { value: effortValue },\n  }\n}\n\nexport function showCurrentEffort(\n  appStateEffort: EffortValue | undefined,\n  model: string,\n): EffortCommandResult {\n  const envOverride = getEffortEnvOverride()\n  const effectiveValue =\n    envOverride === null ? undefined : (envOverride ?? appStateEffort)\n  if (effectiveValue === undefined) {\n    const level = getDisplayedEffortLevel(model, appStateEffort)\n    return { message: `Effort level: auto (currently ${level})` }\n  }\n  const description = getEffortValueDescription(effectiveValue)\n  return {\n    message: `Current effort level: ${effectiveValue} (${description})`,\n  }\n}\n\nfunction unsetEffortLevel(): EffortCommandResult {\n  const result = updateSettingsForSource('userSettings', {\n    effortLevel: undefined,\n  })\n  if (result.error) {\n    return {\n      message: `Failed to set effort level: ${result.error.message}`,\n    }\n  }\n  logEvent('tengu_effort_command', {\n    effort:\n      'auto' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  })\n  // env=auto/unset (null) matches what /effort auto asks for, so only warn\n  // when env is pinning a specific level that will keep overriding.\n  const envOverride = getEffortEnvOverride()\n  if (envOverride !== undefined && envOverride !== null) {\n    const envRaw = process.env.CLAUDE_CODE_EFFORT_LEVEL\n    return {\n      message: `Cleared effort from settings, but CLAUDE_CODE_EFFORT_LEVEL=${envRaw} still controls this session`,\n      effortUpdate: { value: undefined },\n    }\n  }\n  return {\n    message: 'Effort level set to auto',\n    effortUpdate: { value: undefined },\n  }\n}\n\nexport function executeEffort(args: string): EffortCommandResult {\n  const normalized = args.toLowerCase()\n  if (normalized === 'auto' || normalized === 'unset') {\n    return unsetEffortLevel()\n  }\n\n  if (!isEffortLevel(normalized)) {\n    return {\n      message: `Invalid argument: ${args}. Valid options are: low, medium, high, max, auto`,\n    }\n  }\n\n  return setEffortValue(normalized)\n}\n\nfunction ShowCurrentEffort({\n  onDone,\n}: {\n  onDone: (result: string) => void\n}): React.ReactNode {\n  const effortValue = useAppState(s => s.effortValue)\n  const model = useMainLoopModel()\n  const { message } = showCurrentEffort(effortValue, model)\n  onDone(message)\n  return null\n}\n\nfunction ApplyEffortAndClose({\n  result,\n  onDone,\n}: {\n  result: EffortCommandResult\n  onDone: (result: string) => void\n}): React.ReactNode {\n  const setAppState = useSetAppState()\n  const { effortUpdate, message } = result\n  React.useEffect(() => {\n    if (effortUpdate) {\n      setAppState(prev => ({\n        ...prev,\n        effortValue: effortUpdate.value,\n      }))\n    }\n    onDone(message)\n  }, [setAppState, effortUpdate, message, onDone])\n  return null\n}\n\nexport async function call(\n  onDone: LocalJSXCommandOnDone,\n  _context: unknown,\n  args?: string,\n): Promise<React.ReactNode> {\n  args = args?.trim() || ''\n\n  if (COMMON_HELP_ARGS.includes(args)) {\n    onDone(\n      'Usage: /effort [low|medium|high|max|auto]\\n\\nEffort levels:\\n- low: Quick, straightforward implementation\\n- medium: Balanced approach with standard testing\\n- high: Comprehensive implementation with extensive testing\\n- max: Maximum capability with deepest reasoning (Opus 4.6 only)\\n- auto: Use the default effort level for your model',\n    )\n    return\n  }\n\n  if (!args || args === 'current' || args === 'status') {\n    return <ShowCurrentEffort onDone={onDone} />\n  }\n\n  const result = executeEffort(args)\n  return <ApplyEffortAndClose result={result} onDone={onDone} />\n}\n"],"mappings":";AAAA,OAAO,KAAKA,KAAK,MAAM,OAAO;AAC9B,SAASC,gBAAgB,QAAQ,iCAAiC;AAClE,SACE,KAAKC,0DAA0D,EAC/DC,QAAQ,QACH,mCAAmC;AAC1C,SAASC,WAAW,EAAEC,cAAc,QAAQ,yBAAyB;AACrE,cAAcC,qBAAqB,QAAQ,wBAAwB;AACnE,SACE,KAAKC,WAAW,EAChBC,uBAAuB,EACvBC,oBAAoB,EACpBC,yBAAyB,EACzBC,aAAa,EACbC,mBAAmB,QACd,uBAAuB;AAC9B,SAASC,uBAAuB,QAAQ,kCAAkC;AAE1E,MAAMC,gBAAgB,GAAG,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,CAAC;AAEjD,KAAKC,mBAAmB,GAAG;EACzBC,OAAO,EAAE,MAAM;EACfC,YAAY,CAAC,EAAE;IAAEC,KAAK,EAAEX,WAAW,GAAG,SAAS;EAAC,CAAC;AACnD,CAAC;AAED,SAASY,cAAcA,CAACC,WAAW,EAAEb,WAAW,CAAC,EAAEQ,mBAAmB,CAAC;EACrE,MAAMM,WAAW,GAAGT,mBAAmB,CAACQ,WAAW,CAAC;EACpD,IAAIC,WAAW,KAAKC,SAAS,EAAE;IAC7B,MAAMC,MAAM,GAAGV,uBAAuB,CAAC,cAAc,EAAE;MACrDW,WAAW,EAAEH;IACf,CAAC,CAAC;IACF,IAAIE,MAAM,CAACE,KAAK,EAAE;MAChB,OAAO;QACLT,OAAO,EAAE,+BAA+BO,MAAM,CAACE,KAAK,CAACT,OAAO;MAC9D,CAAC;IACH;EACF;EACAb,QAAQ,CAAC,sBAAsB,EAAE;IAC/BuB,MAAM,EACJN,WAAW,IAAIlB;EACnB,CAAC,CAAC;;EAEF;EACA;EACA;EACA,MAAMyB,WAAW,GAAGlB,oBAAoB,CAAC,CAAC;EAC1C,IAAIkB,WAAW,KAAKL,SAAS,IAAIK,WAAW,KAAKP,WAAW,EAAE;IAC5D,MAAMQ,MAAM,GAAGC,OAAO,CAACC,GAAG,CAACC,wBAAwB;IACnD,IAAIV,WAAW,KAAKC,SAAS,EAAE;MAC7B,OAAO;QACLN,OAAO,EAAE,yCAAyCY,MAAM,uCAAuCR,WAAW,kCAAkC;QAC5IH,YAAY,EAAE;UAAEC,KAAK,EAAEE;QAAY;MACrC,CAAC;IACH;IACA,OAAO;MACLJ,OAAO,EAAE,4BAA4BY,MAAM,0CAA0CR,WAAW,aAAa;MAC7GH,YAAY,EAAE;QAAEC,KAAK,EAAEE;MAAY;IACrC,CAAC;EACH;EAEA,MAAMY,WAAW,GAAGtB,yBAAyB,CAACU,WAAW,CAAC;EAC1D,MAAMa,MAAM,GAAGZ,WAAW,KAAKC,SAAS,GAAG,EAAE,GAAG,sBAAsB;EACtE,OAAO;IACLN,OAAO,EAAE,uBAAuBI,WAAW,GAAGa,MAAM,KAAKD,WAAW,EAAE;IACtEf,YAAY,EAAE;MAAEC,KAAK,EAAEE;IAAY;EACrC,CAAC;AACH;AAEA,OAAO,SAASc,iBAAiBA,CAC/BC,cAAc,EAAE5B,WAAW,GAAG,SAAS,EACvC6B,KAAK,EAAE,MAAM,CACd,EAAErB,mBAAmB,CAAC;EACrB,MAAMY,WAAW,GAAGlB,oBAAoB,CAAC,CAAC;EAC1C,MAAM4B,cAAc,GAClBV,WAAW,KAAK,IAAI,GAAGL,SAAS,GAAIK,WAAW,IAAIQ,cAAe;EACpE,IAAIE,cAAc,KAAKf,SAAS,EAAE;IAChC,MAAMgB,KAAK,GAAG9B,uBAAuB,CAAC4B,KAAK,EAAED,cAAc,CAAC;IAC5D,OAAO;MAAEnB,OAAO,EAAE,iCAAiCsB,KAAK;IAAI,CAAC;EAC/D;EACA,MAAMN,WAAW,GAAGtB,yBAAyB,CAAC2B,cAAc,CAAC;EAC7D,OAAO;IACLrB,OAAO,EAAE,yBAAyBqB,cAAc,KAAKL,WAAW;EAClE,CAAC;AACH;AAEA,SAASO,gBAAgBA,CAAA,CAAE,EAAExB,mBAAmB,CAAC;EAC/C,MAAMQ,MAAM,GAAGV,uBAAuB,CAAC,cAAc,EAAE;IACrDW,WAAW,EAAEF;EACf,CAAC,CAAC;EACF,IAAIC,MAAM,CAACE,KAAK,EAAE;IAChB,OAAO;MACLT,OAAO,EAAE,+BAA+BO,MAAM,CAACE,KAAK,CAACT,OAAO;IAC9D,CAAC;EACH;EACAb,QAAQ,CAAC,sBAAsB,EAAE;IAC/BuB,MAAM,EACJ,MAAM,IAAIxB;EACd,CAAC,CAAC;EACF;EACA;EACA,MAAMyB,WAAW,GAAGlB,oBAAoB,CAAC,CAAC;EAC1C,IAAIkB,WAAW,KAAKL,SAAS,IAAIK,WAAW,KAAK,IAAI,EAAE;IACrD,MAAMC,MAAM,GAAGC,OAAO,CAACC,GAAG,CAACC,wBAAwB;IACnD,OAAO;MACLf,OAAO,EAAE,8DAA8DY,MAAM,8BAA8B;MAC3GX,YAAY,EAAE;QAAEC,KAAK,EAAEI;MAAU;IACnC,CAAC;EACH;EACA,OAAO;IACLN,OAAO,EAAE,0BAA0B;IACnCC,YAAY,EAAE;MAAEC,KAAK,EAAEI;IAAU;EACnC,CAAC;AACH;AAEA,OAAO,SAASkB,aAAaA,CAACC,IAAI,EAAE,MAAM,CAAC,EAAE1B,mBAAmB,CAAC;EAC/D,MAAM2B,UAAU,GAAGD,IAAI,CAACE,WAAW,CAAC,CAAC;EACrC,IAAID,UAAU,KAAK,MAAM,IAAIA,UAAU,KAAK,OAAO,EAAE;IACnD,OAAOH,gBAAgB,CAAC,CAAC;EAC3B;EAEA,IAAI,CAAC5B,aAAa,CAAC+B,UAAU,CAAC,EAAE;IAC9B,OAAO;MACL1B,OAAO,EAAE,qBAAqByB,IAAI;IACpC,CAAC;EACH;EAEA,OAAOtB,cAAc,CAACuB,UAAU,CAAC;AACnC;AAEA,SAAAE,kBAAAC,EAAA;EAA2B;IAAAC;EAAA,IAAAD,EAI1B;EACC,MAAAzB,WAAA,GAAoBhB,WAAW,CAAC2C,KAAkB,CAAC;EACnD,MAAAX,KAAA,GAAcnC,gBAAgB,CAAC,CAAC;EAChC;IAAAe;EAAA,IAAoBkB,iBAAiB,CAACd,WAAW,EAAEgB,KAAK,CAAC;EACzDU,MAAM,CAAC9B,OAAO,CAAC;EAAA,OACR,IAAI;AAAA;AATb,SAAA+B,MAAAC,CAAA;EAAA,OAKuCA,CAAC,CAAA5B,WAAY;AAAA;AAOpD,SAAA6B,oBAAAJ,EAAA;EAAA,MAAAK,CAAA,GAAAC,EAAA;EAA6B;IAAA5B,MAAA;IAAAuB;EAAA,IAAAD,EAM5B;EACC,MAAAO,WAAA,GAAoB/C,cAAc,CAAC,CAAC;EACpC;IAAAY,YAAA;IAAAD;EAAA,IAAkCO,MAAM;EAAA,IAAA8B,EAAA;EAAA,IAAAC,EAAA;EAAA,IAAAJ,CAAA,QAAAjC,YAAA,IAAAiC,CAAA,QAAAlC,OAAA,IAAAkC,CAAA,QAAAJ,MAAA,IAAAI,CAAA,QAAAE,WAAA;IACxBC,EAAA,GAAAA,CAAA;MACd,IAAIpC,YAAY;QACdmC,WAAW,CAACG,IAAA,KAAS;UAAA,GAChBA,IAAI;UAAAnC,WAAA,EACMH,YAAY,CAAAC;QAC3B,CAAC,CAAC,CAAC;MAAA;MAEL4B,MAAM,CAAC9B,OAAO,CAAC;IAAA,CAChB;IAAEsC,EAAA,IAACF,WAAW,EAAEnC,YAAY,EAAED,OAAO,EAAE8B,MAAM,CAAC;IAAAI,CAAA,MAAAjC,YAAA;IAAAiC,CAAA,MAAAlC,OAAA;IAAAkC,CAAA,MAAAJ,MAAA;IAAAI,CAAA,MAAAE,WAAA;IAAAF,CAAA,MAAAG,EAAA;IAAAH,CAAA,MAAAI,EAAA;EAAA;IAAAD,EAAA,GAAAH,CAAA;IAAAI,EAAA,GAAAJ,CAAA;EAAA;EAR/ClD,KAAK,CAAAwD,SAAU,CAACH,EAQf,EAAEC,EAA4C,CAAC;EAAA,OACzC,IAAI;AAAA;AAGb,OAAO,eAAeG,IAAIA,CACxBX,MAAM,EAAExC,qBAAqB,EAC7BoD,QAAQ,EAAE,OAAO,EACjBjB,IAAa,CAAR,EAAE,MAAM,CACd,EAAEkB,OAAO,CAAC3D,KAAK,CAAC4D,SAAS,CAAC,CAAC;EAC1BnB,IAAI,GAAGA,IAAI,EAAEoB,IAAI,CAAC,CAAC,IAAI,EAAE;EAEzB,IAAI/C,gBAAgB,CAACgD,QAAQ,CAACrB,IAAI,CAAC,EAAE;IACnCK,MAAM,CACJ,kVACF,CAAC;IACD;EACF;EAEA,IAAI,CAACL,IAAI,IAAIA,IAAI,KAAK,SAAS,IAAIA,IAAI,KAAK,QAAQ,EAAE;IACpD,OAAO,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAACK,MAAM,CAAC,GAAG;EAC9C;EAEA,MAAMvB,MAAM,GAAGiB,aAAa,CAACC,IAAI,CAAC;EAClC,OAAO,CAAC,mBAAmB,CAAC,MAAM,CAAC,CAAClB,MAAM,CAAC,CAAC,MAAM,CAAC,CAACuB,MAAM,CAAC,GAAG;AAChE","ignoreList":[]} \ No newline at end of file diff --git a/src/components/CustomSelect/use-select-navigation.ts b/src/components/CustomSelect/use-select-navigation.ts index 7ecb4e71..544bbfa7 100644 --- a/src/components/CustomSelect/use-select-navigation.ts +++ b/src/components/CustomSelect/use-select-navigation.ts @@ -84,44 +84,44 @@ const reducer = (state: State, action: Action): State => { return state } - // Wrap to first item if at the end - const next = item.next || state.optionMap.first + // If there's a next item in the list, go to it + if (item.next) { + const needsToScroll = item.next.index >= state.visibleToIndex - if (!next) { + if (!needsToScroll) { + return { + ...state, + focusedValue: item.next.value, + } + } + + const nextVisibleToIndex = Math.min( + state.optionMap.size, + state.visibleToIndex + 1, + ) + + const nextVisibleFromIndex = nextVisibleToIndex - state.visibleOptionCount + + return { + ...state, + focusedValue: item.next.value, + visibleFromIndex: nextVisibleFromIndex, + visibleToIndex: nextVisibleToIndex, + } + } + + // No next item - wrap to first item + const firstItem = state.optionMap.first + if (!firstItem) { return state } // When wrapping to first, reset viewport to start - if (!item.next && next === state.optionMap.first) { - return { - ...state, - focusedValue: next.value, - visibleFromIndex: 0, - visibleToIndex: state.visibleOptionCount, - } - } - - const needsToScroll = next.index >= state.visibleToIndex - - if (!needsToScroll) { - return { - ...state, - focusedValue: next.value, - } - } - - const nextVisibleToIndex = Math.min( - state.optionMap.size, - state.visibleToIndex + 1, - ) - - const nextVisibleFromIndex = nextVisibleToIndex - state.visibleOptionCount - return { ...state, - focusedValue: next.value, - visibleFromIndex: nextVisibleFromIndex, - visibleToIndex: nextVisibleToIndex, + focusedValue: firstItem.value, + visibleFromIndex: 0, + visibleToIndex: state.visibleOptionCount, } } @@ -136,44 +136,43 @@ const reducer = (state: State, action: Action): State => { return state } - // Wrap to last item if at the beginning - const previous = item.previous || state.optionMap.last + // If there's a previous item in the list, go to it + if (item.previous) { + const needsToScroll = item.previous.index < state.visibleFromIndex - if (!previous) { - return state - } + if (!needsToScroll) { + return { + ...state, + focusedValue: item.previous.value, + } + } + + const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1) + const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount - // When wrapping to last, reset viewport to end - if (!item.previous && previous === state.optionMap.last) { - const nextVisibleToIndex = state.optionMap.size - const nextVisibleFromIndex = Math.max( - 0, - nextVisibleToIndex - state.visibleOptionCount, - ) return { ...state, - focusedValue: previous.value, + focusedValue: item.previous.value, visibleFromIndex: nextVisibleFromIndex, visibleToIndex: nextVisibleToIndex, } } - const needsToScroll = previous.index <= state.visibleFromIndex - - if (!needsToScroll) { - return { - ...state, - focusedValue: previous.value, - } + // No previous item - wrap to last item + const lastItem = state.optionMap.last + if (!lastItem) { + return state } - const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1) - - const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount - + // When wrapping to last, reset viewport to end + const nextVisibleToIndex = state.optionMap.size + const nextVisibleFromIndex = Math.max( + 0, + nextVisibleToIndex - state.visibleOptionCount, + ) return { ...state, - focusedValue: previous.value, + focusedValue: lastItem.value, visibleFromIndex: nextVisibleFromIndex, visibleToIndex: nextVisibleToIndex, } diff --git a/src/components/EffortPicker.tsx b/src/components/EffortPicker.tsx new file mode 100644 index 00000000..2e86509e --- /dev/null +++ b/src/components/EffortPicker.tsx @@ -0,0 +1,152 @@ +import React, { useState } from 'react' +import { Box, Text } from '../ink.js' +import { useMainLoopModel } from '../hooks/useMainLoopModel.js' +import { useAppState, useSetAppState } from '../state/AppState.js' +import type { EffortLevel, OpenAIEffortLevel } from '../utils/effort.js' +import { + getAvailableEffortLevels, + getDisplayedEffortLevel, + getEffortLevelDescription, + getEffortLevelLabel, + getEffortValueDescription, + modelSupportsEffort, + modelUsesOpenAIEffort, + standardEffortToOpenAI, + isOpenAIEffortLevel, +} from '../utils/effort.js' +import { getAPIProvider } from '../utils/model/providers.js' +import { getReasoningEffortForModel } from '../services/api/providerConfig.js' +import { Select } from './CustomSelect/select.js' +import { effortLevelToSymbol } from './EffortIndicator.js' +import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js' +import { Byline } from './design-system/Byline.js' + +type EffortOption = { + label: React.ReactNode + value: string + description: string + isAvailable: boolean +} + +type Props = { + onSelect: (effort: EffortLevel | undefined) => void + onCancel?: () => void +} + +export function EffortPicker({ onSelect, onCancel }: Props) { + const model = useMainLoopModel() + const appStateEffort = useAppState((s: any) => s.effortValue) + const setAppState = useSetAppState() + const provider = getAPIProvider() + const usesOpenAIEffort = modelUsesOpenAIEffort(model) + const availableLevels = getAvailableEffortLevels(model) + const currentDisplayedLevel = getDisplayedEffortLevel(model, appStateEffort) + + // For OpenAI/Codex, get the model's default reasoning effort + const modelReasoningEffort = usesOpenAIEffort ? getReasoningEffortForModel(model) : undefined + const defaultEffortForModel = modelReasoningEffort || currentDisplayedLevel + + const options: EffortOption[] = [ + { + label: , + value: 'auto', + description: 'Use the default effort level for your model', + isAvailable: true, + }, + ...availableLevels.map(level => { + const displayLevel = usesOpenAIEffort + ? (level === 'xhigh' ? 'max' : level) + : level + const isCurrent = currentDisplayedLevel === displayLevel + return { + label: ( + + ), + value: level, + description: getEffortLevelDescription(level as EffortLevel), + isAvailable: true, + } + }), + ] + + function handleSelect(value: string) { + if (value === 'auto') { + setAppState(prev => ({ + ...prev, + effortValue: undefined, + })) + onSelect(undefined) + } else { + const effortLevel = value as EffortLevel + setAppState(prev => ({ + ...prev, + effortValue: effortLevel, + })) + onSelect(effortLevel) + } + } + + function handleCancel() { + onCancel?.() + } + + const supportsEffort = modelSupportsEffort(model) + // For OpenAI/Codex, use the model's default reasoning effort as initial focus + // For Claude, use the displayed effort level or 'auto' + const initialFocus = usesOpenAIEffort + ? (modelReasoningEffort || 'auto') + : (appStateEffort ? String(appStateEffort) : 'auto') + + return ( + + + Set effort level + + {usesOpenAIEffort + ? `OpenAI/Codex provider (${provider})` + : supportsEffort + ? `Claude model · ${provider} provider` + : `Effort not supported for this model` + } + + + + +