import * as React from 'react' import type { LocalJSXCommandCall, LocalJSXCommandOnDone } from '../../types/command.js' import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js' import { ProviderManager, type ProviderManagerResult, } from '../../components/ProviderManager.js' import TextInput from '../../components/TextInput.js' import { Select, type OptionWithDescription, } from '../../components/CustomSelect/index.js' import { Dialog } from '../../components/design-system/Dialog.js' import { LoadingState } from '../../components/design-system/LoadingState.js' import { useCodexOAuthFlow } from '../../components/useCodexOAuthFlow.js' import { useTerminalSize } from '../../hooks/useTerminalSize.js' import { Box, Text } from '../../ink.js' import { type CodexOAuthTokens, } from '../../services/api/codexOAuth.js' import { DEFAULT_CODEX_BASE_URL, DEFAULT_OPENAI_BASE_URL, isLocalProviderUrl, resolveCodexApiCredentials, resolveProviderRequest, } from '../../services/api/providerConfig.js' import { applySavedProfileToCurrentSession as applySharedProfileToCurrentSession, buildCodexOAuthProfileEnv as buildSharedCodexOAuthProfileEnv, buildCodexProfileEnv, buildGeminiProfileEnv, buildMistralProfileEnv, buildOllamaProfileEnv, buildOpenAIProfileEnv, createProfileFile, DEFAULT_GEMINI_BASE_URL, DEFAULT_GEMINI_MODEL, DEFAULT_MISTRAL_BASE_URL, DEFAULT_MISTRAL_MODEL, deleteProfileFile, loadProfileFile, maskSecretForDisplay, redactSecretValueForDisplay, sanitizeApiKey, sanitizeProviderConfigValue, saveProfileFile, type ProfileEnv, type ProfileFile, type ProviderProfile, } from '../../utils/providerProfile.js' import { getGeminiProjectIdHint, mayHaveGeminiAdcCredentials, } from '../../utils/geminiAuth.js' import { readGeminiAccessToken, saveGeminiAccessToken, } from '../../utils/geminiCredentials.js' import { isBareMode } from '../../utils/envUtils.js' import { getGoalDefaultOpenAIModel, normalizeRecommendationGoal, rankOllamaModels, recommendOllamaModel, type RecommendationGoal, } from '../../utils/providerRecommendation.js' import { getOllamaChatBaseUrl, getLocalOpenAICompatibleProviderLabel, probeOllamaGenerationReadiness, type OllamaGenerationReadiness, } from '../../utils/providerDiscovery.js' export function buildProviderManagerCompletion(result?: ProviderManagerResult): { message: string metaMessages?: string[] } { const message = result?.message ?? (result?.action === 'saved' ? 'Provider profile updated' : 'Provider manager closed') const metaMessages = result?.action === 'activated' && result.activeProviderName ? [ `Provider switched mid-session to ${result.activeProviderName}${ result.activeProviderModel ? ` using model ${result.activeProviderModel}` : '' }. Use this provider/model for subsequent requests unless the user switches again.`, ] : undefined return { message, metaMessages } } function describeOllamaReadinessIssue( readiness: OllamaGenerationReadiness, options?: { baseUrl?: string allowManualFallback?: boolean }, ): string { const endpoint = options?.baseUrl ?? 'http://localhost:11434' if (readiness.state === 'unreachable') { return `Could not reach Ollama at ${endpoint}. Start Ollama first, then run /provider again.` } if (readiness.state === 'no_models') { const manualSuffix = options?.allowManualFallback ? ', or enter details manually' : '' return `Ollama is running, but no installed models were found. Pull a chat model such as qwen2.5-coder:7b or llama3.1:8b first${manualSuffix}.` } if (readiness.state === 'generation_failed') { const modelHint = readiness.probeModel ?? 'the selected model' const detailSuffix = readiness.detail ? ` Details: ${readiness.detail}.` : '' const manualSuffix = options?.allowManualFallback ? ' You can also enter details manually.' : '' return `Ollama is reachable and models are installed, but a generation probe failed for ${modelHint}.${detailSuffix} Run "ollama run ${modelHint}" once and retry.${manualSuffix}` } return '' } type ProviderChoice = 'auto' | ProviderProfile | 'codex-oauth' | 'clear' type Step = | { name: 'choose' } | { name: 'auto-goal' } | { name: 'auto-detect'; goal: RecommendationGoal } | { name: 'ollama-detect' } | { name: 'openai-key'; defaultModel: string } | { name: 'openai-base'; apiKey: string; defaultModel: string } | { name: 'openai-model' apiKey: string baseUrl: string | null defaultModel: string } | { name: 'mistral-key'; defaultModel: string } | { name: 'mistral-base'; apiKey: string; defaultModel: string } | { name: 'mistral-model' apiKey: string baseUrl: string | null defaultModel: string } | { name: 'gemini-auth-method' } | { name: 'gemini-key' } | { name: 'gemini-access-token' } | { name: 'gemini-model' apiKey?: string authMode: 'api-key' | 'access-token' | 'adc' } | { name: 'codex-oauth' } | { name: 'codex-check' } type CurrentProviderSummary = { providerLabel: string modelLabel: string endpointLabel: string savedProfileLabel: string } type SavedProfileSummary = { providerLabel: string modelLabel: string endpointLabel: string credentialLabel?: string } type TextEntryDialogProps = { title: string subtitle?: string resetStateKey?: string description: React.ReactNode initialValue: string placeholder?: string mask?: string allowEmpty?: boolean validate?: (value: string) => string | null onSubmit: (value: string) => void onCancel: () => void } type ProviderWizardDefaults = { openAIModel: string openAIBaseUrl: string geminiModel: string mistralModel: string mistralBaseUrl: string } type SecretSourceEnv = NodeJS.ProcessEnv & Partial function isEnvTruthy(value: string | undefined): boolean { if (!value) return false const normalized = value.trim().toLowerCase() return normalized !== '' && normalized !== '0' && normalized !== 'false' && normalized !== 'no' } function getSafeDisplayValue( value: string | undefined, processEnv: SecretSourceEnv, profileEnv?: ProfileEnv, fallback = '(not set)', ): string { return ( redactSecretValueForDisplay(value, processEnv, profileEnv) ?? fallback ) } export function getProviderWizardDefaults( processEnv: NodeJS.ProcessEnv = process.env, ): ProviderWizardDefaults { const secretSource = processEnv as SecretSourceEnv const safeOpenAIModel = sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) || 'gpt-4o' const safeOpenAIBaseUrl = sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) || DEFAULT_OPENAI_BASE_URL const safeGeminiModel = sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) || DEFAULT_GEMINI_MODEL const safeMistralModel = sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, processEnv) || DEFAULT_MISTRAL_MODEL const safeMistralBaseUrl = sanitizeProviderConfigValue(processEnv.MISTRAL_BASE_URL, processEnv) || DEFAULT_MISTRAL_BASE_URL return { openAIModel: safeOpenAIModel, openAIBaseUrl: safeOpenAIBaseUrl, geminiModel: safeGeminiModel, mistralModel: safeMistralModel, mistralBaseUrl: safeMistralBaseUrl, } } export function buildCurrentProviderSummary(options?: { processEnv?: NodeJS.ProcessEnv persisted?: ProfileFile | null }): CurrentProviderSummary { const processEnv = options?.processEnv ?? process.env const secretSource = processEnv as SecretSourceEnv const persisted = options?.persisted ?? loadProfileFile() const savedProfileLabel = persisted?.profile ?? 'none' if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GEMINI)) { return { providerLabel: 'Google Gemini', modelLabel: getSafeDisplayValue( processEnv.GEMINI_MODEL ?? DEFAULT_GEMINI_MODEL, secretSource, ), endpointLabel: getSafeDisplayValue( processEnv.GEMINI_BASE_URL ?? DEFAULT_GEMINI_BASE_URL, secretSource, ), savedProfileLabel, } } if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_MISTRAL)) { return { providerLabel: 'Mistral', modelLabel: getSafeDisplayValue( processEnv.MISTRAL_MODEL ?? DEFAULT_MISTRAL_MODEL, processEnv ), endpointLabel: getSafeDisplayValue( processEnv.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL, processEnv ), savedProfileLabel, } } if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) { return { providerLabel: 'GitHub Models', modelLabel: getSafeDisplayValue( processEnv.OPENAI_MODEL ?? 'github:copilot', secretSource, ), endpointLabel: getSafeDisplayValue( processEnv.OPENAI_BASE_URL ?? processEnv.OPENAI_API_BASE ?? 'https://models.github.ai/inference', secretSource, ), savedProfileLabel, } } if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_OPENAI)) { const request = resolveProviderRequest({ model: processEnv.OPENAI_MODEL, baseUrl: processEnv.OPENAI_BASE_URL, }) let providerLabel = 'OpenAI-compatible' if (request.transport === 'codex_responses') { providerLabel = 'Codex' } else if (isLocalProviderUrl(request.baseUrl)) { providerLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl) } return { providerLabel, modelLabel: getSafeDisplayValue(request.requestedModel, secretSource), endpointLabel: getSafeDisplayValue(request.baseUrl, secretSource), savedProfileLabel, } } return { providerLabel: 'Anthropic', modelLabel: getSafeDisplayValue( processEnv.ANTHROPIC_MODEL ?? processEnv.CLAUDE_MODEL ?? 'claude-sonnet-4-6', secretSource, ), endpointLabel: getSafeDisplayValue( processEnv.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com', secretSource, ), savedProfileLabel, } } function buildSavedProfileSummary( profile: ProviderProfile, env: ProfileEnv, ): SavedProfileSummary { switch (profile) { case 'gemini': return { providerLabel: 'Google Gemini', modelLabel: getSafeDisplayValue( env.GEMINI_MODEL ?? DEFAULT_GEMINI_MODEL, process.env, env, ), endpointLabel: getSafeDisplayValue( env.GEMINI_BASE_URL ?? DEFAULT_GEMINI_BASE_URL, process.env, env, ), credentialLabel: env.GEMINI_AUTH_MODE === 'access-token' ? 'access token (stored securely)' : env.GEMINI_AUTH_MODE === 'adc' ? 'local ADC' : maskSecretForDisplay(env.GEMINI_API_KEY) !== undefined ? 'configured' : undefined, } case 'mistral': return { providerLabel: 'Mistral', modelLabel: getSafeDisplayValue( env.MISTRAL_MODEL ?? DEFAULT_MISTRAL_MODEL, process.env, env, ), endpointLabel: getSafeDisplayValue( env.MISTRAL_BASE_URL ?? DEFAULT_MISTRAL_BASE_URL, process.env, env, ), credentialLabel: maskSecretForDisplay(env.MISTRAL_API_KEY) !== undefined ? 'configured' : undefined, } case 'codex': return { providerLabel: 'Codex', modelLabel: getSafeDisplayValue( env.OPENAI_MODEL ?? 'codexplan', process.env, env, ), endpointLabel: getSafeDisplayValue( env.OPENAI_BASE_URL ?? DEFAULT_CODEX_BASE_URL, process.env, env, ), credentialLabel: maskSecretForDisplay(env.CODEX_API_KEY) !== undefined ? 'configured' : undefined, } case 'ollama': return { providerLabel: 'Ollama', modelLabel: getSafeDisplayValue( env.OPENAI_MODEL, process.env, env, ), endpointLabel: getSafeDisplayValue( env.OPENAI_BASE_URL, process.env, env, ), } case 'openai': default: { const baseUrl = env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL return { providerLabel: isLocalProviderUrl(baseUrl) ? getLocalOpenAICompatibleProviderLabel(baseUrl) : 'OpenAI-compatible', modelLabel: getSafeDisplayValue( env.OPENAI_MODEL ?? 'gpt-4o', process.env, env, ), endpointLabel: getSafeDisplayValue( baseUrl, process.env, env, ), credentialLabel: maskSecretForDisplay(env.OPENAI_API_KEY) !== undefined ? 'configured' : undefined, } } } } export function buildProfileSaveMessage( profile: ProviderProfile, env: ProfileEnv, filePath: string, options?: { activatedInSession?: boolean activationWarning?: string | null }, ): string { const summary = buildSavedProfileSummary(profile, env) const lines = [ `Saved ${summary.providerLabel} profile.`, `Model: ${summary.modelLabel}`, `Endpoint: ${summary.endpointLabel}`, ] if (summary.credentialLabel) { lines.push(`Credentials: ${summary.credentialLabel}`) } lines.push(`Profile: ${filePath}`) if (options?.activatedInSession) { lines.push('OpenClaude switched to it for this session.') } else if (options?.activationWarning) { lines.push( `Saved for next startup. Warning: could not activate it in this session (${options.activationWarning}).`, ) } else { lines.push('Restart OpenClaude to use it.') } return lines.join('\n') } function buildUsageText(): string { const summary = buildCurrentProviderSummary() const availableProviders = isBareMode() ? 'Choose Auto, Ollama, OpenAI-compatible, Gemini, or Codex, then save a provider profile.' : 'Choose Auto, Ollama, OpenAI-compatible, Gemini, Codex, or Codex OAuth, then save a provider profile.' return [ 'Usage: /provider', '', 'Guided setup for saved provider profiles.', '', `Current provider: ${summary.providerLabel}`, `Current model: ${summary.modelLabel}`, `Current endpoint: ${summary.endpointLabel}`, `Saved profile: ${summary.savedProfileLabel}`, '', availableProviders, ].join('\n') } function finishProfileSave( onDone: LocalJSXCommandOnDone, profile: ProviderProfile, env: ProfileEnv, ): void { void saveProfileAndNotify(onDone, profile, env) } export function buildCodexOAuthProfileEnv( tokens: Pick, ): ProfileEnv | null { return buildSharedCodexOAuthProfileEnv(tokens) } export async function applySavedProfileToCurrentSession(options: { profileFile: ProfileFile processEnv?: NodeJS.ProcessEnv }): Promise { return applySharedProfileToCurrentSession(options) } async function saveProfileAndNotify( onDone: LocalJSXCommandOnDone, profile: ProviderProfile, env: ProfileEnv, ): Promise { try { const profileFile = createProfileFile(profile, env) const filePath = saveProfileFile(profileFile) const shouldActivateInSession = profile === 'codex' const activationWarning = shouldActivateInSession ? await applySharedProfileToCurrentSession({ profileFile }) : null onDone( buildProfileSaveMessage(profile, env, filePath, { activatedInSession: shouldActivateInSession && activationWarning === null, activationWarning, }), { display: 'system', }, ) } catch (error) { const message = error instanceof Error ? error.message : String(error) onDone(`Failed to save provider profile: ${message}`, { display: 'system', }) } } export function TextEntryDialog({ title, subtitle, resetStateKey, description, initialValue, placeholder, mask, allowEmpty = false, validate, onSubmit, onCancel, }: TextEntryDialogProps): React.ReactNode { const { columns } = useTerminalSize() const [value, setValue] = React.useState(initialValue) const [cursorOffset, setCursorOffset] = React.useState(initialValue.length) const [error, setError] = React.useState(null) React.useLayoutEffect(() => { setValue(initialValue) setCursorOffset(initialValue.length) setError(null) }, [initialValue, resetStateKey]) const inputColumns = Math.max(30, columns - 6) const handleSubmit = React.useCallback( (nextValue: string) => { if (!allowEmpty && nextValue.trim().length === 0) { setError('A value is required for this step.') return } const validationError = validate?.(nextValue) if (validationError) { setError(validationError) return } setError(null) onSubmit(nextValue) }, [allowEmpty, onSubmit, validate], ) return ( {description} {error ? {error} : null} ) } function ProviderChooser({ onChoose, onCancel, }: { onChoose: (value: ProviderChoice) => void onCancel: () => void }): React.ReactNode { const summary = buildCurrentProviderSummary() const canUseCodexOAuth = !isBareMode() const helperText = canUseCodexOAuth ? 'Save a provider profile without editing environment variables first. Codex profiles backed by env, auth.json, or OpenClaude secure storage can switch this session immediately when validation succeeds.' : 'Save a provider profile without editing environment variables first. Codex profiles backed by env or auth.json can switch this session immediately.' const options: OptionWithDescription[] = [ { label: 'Auto', value: 'auto', description: 'Prefer local Ollama when available, otherwise guide you into OpenAI-compatible setup', }, { label: 'Ollama', value: 'ollama', description: 'Use a local Ollama model with no API key', }, { label: 'OpenAI-compatible', value: 'openai', description: 'GPT-4o, DeepSeek, OpenRouter, Groq, LM Studio, and similar APIs', }, { label: 'Gemini', value: 'gemini', description: 'Use Google Gemini with API key, access token, or local ADC', }, { label: 'Mistral', value: 'mistral', description: 'Use Mistral with API key' }, { label: 'Codex', value: 'codex', description: 'Use existing ChatGPT Codex CLI auth or env credentials', }, ...(canUseCodexOAuth ? [ { label: 'Codex OAuth', value: 'codex-oauth' as const, description: 'Sign in with ChatGPT in your browser and store Codex tokens securely', }, ] : []), ] if (summary.savedProfileLabel !== 'none') { options.push({ label: 'Clear saved profile', value: 'clear', description: 'Remove .openclaude-profile.json and return to normal startup', }) } return ( {helperText} Current model: {summary.modelLabel} Current endpoint: {summary.endpointLabel} Saved profile: {summary.savedProfileLabel} ) } function AutoRecommendationStep({ goal, onBack, onSave, onNeedOpenAI, onCancel, }: { goal: RecommendationGoal onBack: () => void onSave: (profile: ProviderProfile, env: ProfileEnv) => void onNeedOpenAI: (defaultModel: string) => void onCancel: () => void }): React.ReactNode { const [status, setStatus] = React.useState< | { state: 'loading' } | { state: 'ollama' model: string summary: string } | { state: 'openai' defaultModel: string reason: string } | { state: 'error' message: string } >({ state: 'loading' }) React.useEffect(() => { let cancelled = false void (async () => { const defaultModel = getGoalDefaultOpenAIModel(goal) try { const readiness = await probeOllamaGenerationReadiness() if (readiness.state !== 'ready') { if (!cancelled) { setStatus({ state: 'openai', defaultModel, reason: describeOllamaReadinessIssue(readiness), }) } return } const recommended = recommendOllamaModel(readiness.models, goal) if (!recommended) { if (!cancelled) { setStatus({ state: 'openai', defaultModel, reason: 'Ollama responded to a generation probe, but no recommended chat model matched this goal.', }) } return } if (!cancelled) { setStatus({ state: 'ollama', model: recommended.name, summary: recommended.summary, }) } } catch (error) { if (!cancelled) { setStatus({ state: 'error', message: error instanceof Error ? error.message : String(error), }) } } })() return () => { cancelled = true } }, [goal]) if (status.state === 'loading') { return } if (status.state === 'error') { return ( {status.message} { if (value === 'continue') { onNeedOpenAI(status.defaultModel) } else if (value === 'back') { onBack() } else { onCancel() } }} onCancel={onCancel} /> ) } return ( Auto setup recommends a local Ollama profile for {goal} based on the models currently available on this machine. Recommended model: {status.model} {status.summary ? ` · ${status.summary}` : ''} value === 'back' ? onBack() : onCancel() } onCancel={onCancel} /> ) } return ( Pick one of the installed Ollama models to save into a local provider profile. value === 'back' ? onBack() : onCancel() } onCancel={onCancel} /> ) } if (status.state === 'starting') { return } return ( Finish signing in with ChatGPT in your browser. OpenClaude will store the resulting Codex credentials securely for future sessions. {status.browserOpened === false ? ( Browser did not open automatically. Visit this URL to continue: ) : status.browserOpened === true ? ( Browser opened. Complete the sign-in there, then OpenClaude will finish setup automatically. ) : ( Opening your browser... )} {status.authUrl} Press Esc to cancel and go back. ) } function CodexCredentialStep({ onSave, onBack, onCancel, }: { onSave: (profile: ProviderProfile, env: ProfileEnv) => void onBack: () => void onCancel: () => void }): React.ReactNode { const credentials = resolveCodexCredentials(process.env) if (!credentials.ok) { return ( {credentials.message} { const env = buildCodexProfileEnv({ model: value, credentialSource: credentials.credentialSource, processEnv: process.env, }) if (env) { onSave('codex', env) } }} onCancel={onBack} /> ) } function resolveCodexCredentials(processEnv: NodeJS.ProcessEnv): | { ok: true sourceDescription: string credentialSource: 'oauth' | 'existing' } | { ok: false; message: string } { const credentials = resolveCodexApiCredentials(processEnv) const oauthHint = isBareMode() ? 'Re-login with the Codex CLI' : 'Choose Codex OAuth in /provider, or re-login with the Codex CLI' if (!credentials.apiKey) { const authHint = credentials.authPath ? `Expected auth file: ${credentials.authPath}.` : 'Set CODEX_API_KEY or re-login with the Codex CLI.' return { ok: false, message: `Codex setup needs existing credentials. ${oauthHint}, or set CODEX_API_KEY. ${authHint}`, } } if (!credentials.accountId) { return { ok: false, message: `Codex auth is missing chatgpt_account_id. ${oauthHint}, or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID first.`, } } return { ok: true, credentialSource: credentials.source === 'secure-storage' ? 'oauth' : 'existing', sourceDescription: credentials.source === 'env' ? 'the current shell environment' : credentials.source === 'secure-storage' ? 'OpenClaude secure storage' : credentials.authPath ?? DEFAULT_CODEX_BASE_URL, } } export function ProviderWizard({ onDone, }: { onDone: LocalJSXCommandOnDone }): React.ReactNode { const defaults = getProviderWizardDefaults() const [step, setStep] = React.useState({ name: 'choose' }) switch (step.name) { case 'choose': return ( { if (value === 'auto') { setStep({ name: 'auto-goal' }) } else if (value === 'ollama') { setStep({ name: 'ollama-detect' }) } else if (value === 'openai') { setStep({ name: 'openai-key', defaultModel: defaults.openAIModel, }) } else if (value === 'gemini') { setStep({ name: 'gemini-auth-method' }) } else if (value === 'mistral') { setStep({ name: 'mistral-key', defaultModel: defaults.mistralModel, }) } else if (value === 'codex-oauth') { setStep({ name: 'codex-oauth' }) } else if (value === 'clear') { const filePath = deleteProfileFile() onDone(`Removed saved provider profile at ${filePath}. Restart OpenClaude to go back to normal startup.`, { display: 'system', }) } else { setStep({ name: 'codex-check' }) } }} onCancel={() => onDone()} /> ) case 'auto-goal': return ( setStep({ name: 'auto-detect', goal })} onBack={() => setStep({ name: 'choose' })} /> ) case 'auto-detect': return ( setStep({ name: 'auto-goal' })} onSave={(profile, env) => finishProfileSave(onDone, profile, env)} onNeedOpenAI={defaultModel => setStep({ name: 'openai-key', defaultModel }) } onCancel={() => onDone()} /> ) case 'ollama-detect': return ( finishProfileSave(onDone, profile, env)} onBack={() => setStep({ name: 'choose' })} onCancel={() => onDone()} /> ) case 'openai-key': return ( { const candidate = value.trim() || process.env.OPENAI_API_KEY || '' return sanitizeApiKey(candidate) ? null : 'Enter a real API key. Placeholder values like SUA_CHAVE are not valid.' }} onSubmit={value => { const apiKey = value.trim() || process.env.OPENAI_API_KEY || '' setStep({ name: 'openai-base', apiKey, defaultModel: step.defaultModel, }) }} onCancel={() => setStep({ name: 'choose' })} /> ) case 'openai-base': return ( { setStep({ name: 'openai-model', apiKey: step.apiKey, baseUrl: value.trim() || null, defaultModel: step.defaultModel, }) }} onCancel={() => setStep({ name: 'openai-key', defaultModel: step.defaultModel, }) } /> ) case 'openai-model': return ( { const env = buildOpenAIProfileEnv({ goal: normalizeRecommendationGoal(null), apiKey: step.apiKey, baseUrl: step.baseUrl, model: value.trim() || step.defaultModel, processEnv: {}, }) if (env) { finishProfileSave(onDone, 'openai', env) } }} onCancel={() => setStep({ name: 'openai-base', apiKey: step.apiKey, defaultModel: step.defaultModel, }) } /> ) case 'mistral-key': return ( { const candidate = value.trim() || process.env.MISTRAL_API_KEY || '' return sanitizeApiKey(candidate) ? null : 'Enter a real API key. Placeholder values like SUA_CHAVE are not valid.' }} onSubmit={value => { const apiKey = value.trim() || process.env.MISTRAL_API_KEY || '' setStep({ name: 'mistral-base', apiKey, defaultModel: step.defaultModel, }) }} onCancel={() => setStep({ name: 'choose' })} /> ) case 'mistral-base': return ( { setStep({ name: 'mistral-model', apiKey: step.apiKey, baseUrl: value.trim() || null, defaultModel: step.defaultModel, }) }} onCancel={() => setStep({ name: 'mistral-key', defaultModel: step.defaultModel, }) } /> ) case 'mistral-model': return ( { const env = buildMistralProfileEnv({ model: value.trim() || step.defaultModel, baseUrl: step.baseUrl, apiKey: step.apiKey, processEnv: process.env, }) if (env) { finishProfileSave(onDone, 'mistral', env) } }} onCancel={() => setStep({ name: 'mistral-base', apiKey: step.apiKey, defaultModel: step.defaultModel, }) } /> ) case 'gemini-auth-method': { const hasShellGeminiKey = Boolean( process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY, ) const hasShellGeminiAccessToken = Boolean(process.env.GEMINI_ACCESS_TOKEN) const hasStoredGeminiAccessToken = Boolean(readGeminiAccessToken()) const hasAdc = mayHaveGeminiAdcCredentials(process.env) const projectHint = getGeminiProjectIdHint(process.env) const options: OptionWithDescription[] = [ { label: 'API key', value: 'api-key', description: hasShellGeminiKey ? 'Use the current Gemini API key from this shell, or enter a new one' : 'Use a Google Gemini API key', }, { label: 'Access token', value: 'access-token', description: hasShellGeminiAccessToken || hasStoredGeminiAccessToken ? `Use ${ hasShellGeminiAccessToken ? 'the current GEMINI_ACCESS_TOKEN' : 'the securely stored Gemini access token' }` : 'Enter a Gemini access token and store it securely', }, { label: 'Local ADC', value: 'adc', description: hasAdc ? `Use local Google ADC credentials${projectHint ? ` (project: ${projectHint})` : ''}` : 'Use local Google ADC credentials after running gcloud auth application-default login', }, ] return ( onDone()}> Choose how this Gemini profile should authenticate.