Merge pull request #144 from Meetpatel006/main

feat: add Codex/OpenAI effort picker and stabilize model/suggestion navigation and its display the current model with effort
This commit is contained in:
Kevin Codex
2026-04-02 21:25:48 +08:00
committed by GitHub
15 changed files with 612 additions and 139 deletions

File diff suppressed because one or more lines are too long

View File

@@ -84,44 +84,44 @@ const reducer = <T>(state: State<T>, action: Action<T>): State<T> => {
return state return state
} }
// Wrap to first item if at the end // If there's a next item in the list, go to it
const next = item.next || state.optionMap.first if (item.next) {
const needsToScroll = item.next.index >= state.visibleToIndex
if (!next) { if (!needsToScroll) {
return {
...state,
focusedValue: item.next.value,
}
}
const nextVisibleToIndex = Math.min(
state.optionMap.size,
state.visibleToIndex + 1,
)
const nextVisibleFromIndex = nextVisibleToIndex - state.visibleOptionCount
return {
...state,
focusedValue: item.next.value,
visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex,
}
}
// No next item - wrap to first item
const firstItem = state.optionMap.first
if (!firstItem) {
return state return state
} }
// When wrapping to first, reset viewport to start // When wrapping to first, reset viewport to start
if (!item.next && next === state.optionMap.first) {
return {
...state,
focusedValue: next.value,
visibleFromIndex: 0,
visibleToIndex: state.visibleOptionCount,
}
}
const needsToScroll = next.index >= state.visibleToIndex
if (!needsToScroll) {
return {
...state,
focusedValue: next.value,
}
}
const nextVisibleToIndex = Math.min(
state.optionMap.size,
state.visibleToIndex + 1,
)
const nextVisibleFromIndex = nextVisibleToIndex - state.visibleOptionCount
return { return {
...state, ...state,
focusedValue: next.value, focusedValue: firstItem.value,
visibleFromIndex: nextVisibleFromIndex, visibleFromIndex: 0,
visibleToIndex: nextVisibleToIndex, visibleToIndex: state.visibleOptionCount,
} }
} }
@@ -136,44 +136,43 @@ const reducer = <T>(state: State<T>, action: Action<T>): State<T> => {
return state return state
} }
// Wrap to last item if at the beginning // If there's a previous item in the list, go to it
const previous = item.previous || state.optionMap.last if (item.previous) {
const needsToScroll = item.previous.index < state.visibleFromIndex
if (!previous) { if (!needsToScroll) {
return state return {
} ...state,
focusedValue: item.previous.value,
}
}
const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1)
const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount
// When wrapping to last, reset viewport to end
if (!item.previous && previous === state.optionMap.last) {
const nextVisibleToIndex = state.optionMap.size
const nextVisibleFromIndex = Math.max(
0,
nextVisibleToIndex - state.visibleOptionCount,
)
return { return {
...state, ...state,
focusedValue: previous.value, focusedValue: item.previous.value,
visibleFromIndex: nextVisibleFromIndex, visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex, visibleToIndex: nextVisibleToIndex,
} }
} }
const needsToScroll = previous.index <= state.visibleFromIndex // No previous item - wrap to last item
const lastItem = state.optionMap.last
if (!needsToScroll) { if (!lastItem) {
return { return state
...state,
focusedValue: previous.value,
}
} }
const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1) // When wrapping to last, reset viewport to end
const nextVisibleToIndex = state.optionMap.size
const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount const nextVisibleFromIndex = Math.max(
0,
nextVisibleToIndex - state.visibleOptionCount,
)
return { return {
...state, ...state,
focusedValue: previous.value, focusedValue: lastItem.value,
visibleFromIndex: nextVisibleFromIndex, visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex, visibleToIndex: nextVisibleToIndex,
} }

View File

@@ -0,0 +1,152 @@
import React, { useState } from 'react'
import { Box, Text } from '../ink.js'
import { useMainLoopModel } from '../hooks/useMainLoopModel.js'
import { useAppState, useSetAppState } from '../state/AppState.js'
import type { EffortLevel, OpenAIEffortLevel } from '../utils/effort.js'
import {
getAvailableEffortLevels,
getDisplayedEffortLevel,
getEffortLevelDescription,
getEffortLevelLabel,
getEffortValueDescription,
modelSupportsEffort,
modelUsesOpenAIEffort,
standardEffortToOpenAI,
isOpenAIEffortLevel,
} from '../utils/effort.js'
import { getAPIProvider } from '../utils/model/providers.js'
import { getReasoningEffortForModel } from '../services/api/providerConfig.js'
import { Select } from './CustomSelect/select.js'
import { effortLevelToSymbol } from './EffortIndicator.js'
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js'
import { Byline } from './design-system/Byline.js'
type EffortOption = {
label: React.ReactNode
value: string
description: string
isAvailable: boolean
}
type Props = {
onSelect: (effort: EffortLevel | undefined) => void
onCancel?: () => void
}
export function EffortPicker({ onSelect, onCancel }: Props) {
const model = useMainLoopModel()
const appStateEffort = useAppState((s: any) => s.effortValue)
const setAppState = useSetAppState()
const provider = getAPIProvider()
const usesOpenAIEffort = modelUsesOpenAIEffort(model)
const availableLevels = getAvailableEffortLevels(model)
const currentDisplayedLevel = getDisplayedEffortLevel(model, appStateEffort)
// For OpenAI/Codex, get the model's default reasoning effort
const modelReasoningEffort = usesOpenAIEffort ? getReasoningEffortForModel(model) : undefined
const defaultEffortForModel = modelReasoningEffort || currentDisplayedLevel
const options: EffortOption[] = [
{
label: <EffortOptionLabel level="auto" text="Auto" isCurrent={false} />,
value: 'auto',
description: 'Use the default effort level for your model',
isAvailable: true,
},
...availableLevels.map(level => {
const displayLevel = usesOpenAIEffort
? (level === 'xhigh' ? 'max' : level)
: level
const isCurrent = currentDisplayedLevel === displayLevel
return {
label: (
<EffortOptionLabel
level={level as EffortLevel}
text={getEffortLevelLabel(level as EffortLevel)}
isCurrent={isCurrent}
/>
),
value: level,
description: getEffortLevelDescription(level as EffortLevel),
isAvailable: true,
}
}),
]
function handleSelect(value: string) {
if (value === 'auto') {
setAppState(prev => ({
...prev,
effortValue: undefined,
}))
onSelect(undefined)
} else {
const effortLevel = value as EffortLevel
setAppState(prev => ({
...prev,
effortValue: effortLevel,
}))
onSelect(effortLevel)
}
}
function handleCancel() {
onCancel?.()
}
const supportsEffort = modelSupportsEffort(model)
// For OpenAI/Codex, use the model's default reasoning effort as initial focus
// For Claude, use the displayed effort level or 'auto'
const initialFocus = usesOpenAIEffort
? (modelReasoningEffort || 'auto')
: (appStateEffort ? String(appStateEffort) : 'auto')
return (
<Box flexDirection="column">
<Box marginBottom={1} flexDirection="column">
<Text color="remember" bold={true}>Set effort level</Text>
<Text dimColor={true}>
{usesOpenAIEffort
? `OpenAI/Codex provider (${provider})`
: supportsEffort
? `Claude model · ${provider} provider`
: `Effort not supported for this model`
}
</Text>
</Box>
<Box marginBottom={1}>
<Select
options={options}
defaultValue={initialFocus}
onChange={handleSelect}
onCancel={handleCancel}
visibleOptionCount={Math.min(6, options.length)}
inlineDescriptions={true}
/>
</Box>
<Box marginBottom={1}>
<Text dimColor={true} italic={true}>
<Byline>
<KeyboardShortcutHint shortcut="Enter" action="confirm" />
<KeyboardShortcutHint shortcut="Esc" action="cancel" />
</Byline>
</Text>
</Box>
</Box>
)
}
function EffortOptionLabel({ level, text, isCurrent }: { level: EffortLevel | 'auto', text: string, isCurrent: boolean }) {
const symbol = level === 'auto' ? '⊘' : effortLevelToSymbol(level as EffortLevel)
const color = isCurrent ? 'remember' : level === 'auto' ? 'subtle' : 'suggestion'
return (
<>
<Text color={color}>{symbol} </Text>
<Text bold={isCurrent}>{text}</Text>
{isCurrent && <Text dimColor={true}> (current)</Text>}
</>
)
}

View File

@@ -97,21 +97,45 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
} }
if (useOpenAI) { if (useOpenAI) {
const model = process.env.OPENAI_MODEL || 'gpt-4o' const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl) const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
let name = 'OpenAI' let name = 'OpenAI'
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(model)) name = 'DeepSeek' if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter' else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
else if (/together/i.test(baseUrl)) name = 'Together AI' else if (/together/i.test(baseUrl)) name = 'Together AI'
else if (/groq/i.test(baseUrl)) name = 'Groq' else if (/groq/i.test(baseUrl)) name = 'Groq'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(model)) name = 'Mistral' else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI' else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama' else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio' else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
else if (/llama/i.test(model)) name = 'Meta Llama' else if (/llama/i.test(rawModel)) name = 'Meta Llama'
else if (isLocal) name = 'Local' else if (isLocal) name = 'Local'
return { name, model, baseUrl, isLocal }
// Resolve model alias to actual model name + reasoning effort
let displayModel = rawModel
const codexAliases: Record<string, { model: string; reasoningEffort?: string }> = {
codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' },
'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' },
codexspark: { model: 'gpt-5.3-codex-spark' },
'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' },
'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' },
'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' },
'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' },
'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' },
}
const alias = rawModel.toLowerCase()
if (alias in codexAliases) {
const resolved = codexAliases[alias]
displayModel = resolved.model
if (resolved.reasoningEffort) {
displayModel = `${displayModel} (${resolved.reasoningEffort})`
}
}
return { name, model: displayModel, baseUrl, isLocal }
} }
// Default: Anthropic // Default: Anthropic

View File

@@ -1242,17 +1242,25 @@ export function useTypeahead({
const handleAutocompletePrevious = useCallback(() => { const handleAutocompletePrevious = useCallback(() => {
setSuggestionsState(prev => ({ setSuggestionsState(prev => ({
...prev, ...prev,
selectedSuggestion: prev.selectedSuggestion <= 0 ? suggestions.length - 1 : prev.selectedSuggestion - 1 selectedSuggestion: prev.suggestions.length === 0
? -1
: prev.selectedSuggestion <= 0
? prev.suggestions.length - 1
: Math.min(prev.selectedSuggestion - 1, prev.suggestions.length - 1)
})); }));
}, [suggestions.length, setSuggestionsState]); }, [setSuggestionsState]);
// Handler for autocomplete:next - selects next suggestion // Handler for autocomplete:next - selects next suggestion
const handleAutocompleteNext = useCallback(() => { const handleAutocompleteNext = useCallback(() => {
setSuggestionsState(prev => ({ setSuggestionsState(prev => ({
...prev, ...prev,
selectedSuggestion: prev.selectedSuggestion >= suggestions.length - 1 ? 0 : prev.selectedSuggestion + 1 selectedSuggestion: prev.suggestions.length === 0
? -1
: prev.selectedSuggestion >= prev.suggestions.length - 1
? 0
: Math.max(0, prev.selectedSuggestion + 1)
})); }));
}, [suggestions.length, setSuggestionsState]); }, [setSuggestionsState]);
// Autocomplete context keybindings - only active when suggestions are visible // Autocomplete context keybindings - only active when suggestions are visible
const autocompleteHandlers = useMemo(() => ({ const autocompleteHandlers = useMemo(() => ({

View File

@@ -665,9 +665,11 @@ class OpenAIShimStream {
class OpenAIShimMessages { class OpenAIShimMessages {
private defaultHeaders: Record<string, string> private defaultHeaders: Record<string, string>
private reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
constructor(defaultHeaders: Record<string, string>) { constructor(defaultHeaders: Record<string, string>, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh') {
this.defaultHeaders = defaultHeaders this.defaultHeaders = defaultHeaders
this.reasoningEffort = reasoningEffort
} }
create( create(
@@ -679,7 +681,7 @@ class OpenAIShimMessages {
let httpResponse: Response | undefined let httpResponse: Response | undefined
const promise = (async () => { const promise = (async () => {
const request = resolveProviderRequest({ model: params.model }) const request = resolveProviderRequest({ model: params.model, reasoningEffortOverride: self.reasoningEffort })
const response = await self._doRequest(request, params, options) const response = await self._doRequest(request, params, options)
httpResponse = response httpResponse = response
@@ -1018,9 +1020,11 @@ class OpenAIShimMessages {
class OpenAIShimBeta { class OpenAIShimBeta {
messages: OpenAIShimMessages messages: OpenAIShimMessages
reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
constructor(defaultHeaders: Record<string, string>) { constructor(defaultHeaders: Record<string, string>, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh') {
this.messages = new OpenAIShimMessages(defaultHeaders) this.messages = new OpenAIShimMessages(defaultHeaders, reasoningEffort)
this.reasoningEffort = reasoningEffort
} }
} }
@@ -1028,6 +1032,7 @@ export function createOpenAIShimClient(options: {
defaultHeaders?: Record<string, string> defaultHeaders?: Record<string, string>
maxRetries?: number maxRetries?: number
timeout?: number timeout?: number
reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
}): unknown { }): unknown {
hydrateGithubModelsTokenFromSecureStorage() hydrateGithubModelsTokenFromSecureStorage()
@@ -1050,7 +1055,7 @@ export function createOpenAIShimClient(options: {
const beta = new OpenAIShimBeta({ const beta = new OpenAIShimBeta({
...(options.defaultHeaders ?? {}), ...(options.defaultHeaders ?? {}),
}) }, options.reasoningEffort)
return { return {
beta, beta,

View File

@@ -20,13 +20,43 @@ const CODEX_ALIAS_MODELS: Record<
model: 'gpt-5.4', model: 'gpt-5.4',
reasoningEffort: 'high', reasoningEffort: 'high',
}, },
'gpt-5.4': {
model: 'gpt-5.4',
reasoningEffort: 'high',
},
'gpt-5.3-codex': {
model: 'gpt-5.3-codex',
reasoningEffort: 'high',
},
'gpt-5.3-codex-spark': {
model: 'gpt-5.3-codex-spark',
},
codexspark: { codexspark: {
model: 'gpt-5.3-codex-spark', model: 'gpt-5.3-codex-spark',
}, },
'gpt-5.2-codex': {
model: 'gpt-5.2-codex',
reasoningEffort: 'high',
},
'gpt-5.1-codex-max': {
model: 'gpt-5.1-codex-max',
reasoningEffort: 'high',
},
'gpt-5.1-codex-mini': {
model: 'gpt-5.1-codex-mini',
},
'gpt-5.4-mini': {
model: 'gpt-5.4-mini',
reasoningEffort: 'medium',
},
'gpt-5.2': {
model: 'gpt-5.2',
reasoningEffort: 'medium',
},
} as const } as const
type CodexAlias = keyof typeof CODEX_ALIAS_MODELS type CodexAlias = keyof typeof CODEX_ALIAS_MODELS
type ReasoningEffort = 'low' | 'medium' | 'high' type ReasoningEffort = 'low' | 'medium' | 'high' | 'xhigh'
export type ProviderTransport = 'chat_completions' | 'codex_responses' export type ProviderTransport = 'chat_completions' | 'codex_responses'
@@ -102,7 +132,7 @@ function decodeJwtPayload(token: string): Record<string, unknown> | undefined {
function parseReasoningEffort(value: string | undefined): ReasoningEffort | undefined { function parseReasoningEffort(value: string | undefined): ReasoningEffort | undefined {
if (!value) return undefined if (!value) return undefined
const normalized = value.trim().toLowerCase() const normalized = value.trim().toLowerCase()
if (normalized === 'low' || normalized === 'medium' || normalized === 'high') { if (normalized === 'low' || normalized === 'medium' || normalized === 'high' || normalized === 'xhigh') {
return normalized return normalized
} }
return undefined return undefined
@@ -193,6 +223,7 @@ export function resolveProviderRequest(options?: {
model?: string model?: string
baseUrl?: string baseUrl?: string
fallbackModel?: string fallbackModel?: string
reasoningEffortOverride?: ReasoningEffort
}): ResolvedProviderRequest { }): ResolvedProviderRequest {
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
const requestedModel = const requestedModel =
@@ -217,6 +248,11 @@ export function resolveProviderRequest(options?: {
? normalizeGithubModelsApiModel(requestedModel) ? normalizeGithubModelsApiModel(requestedModel)
: descriptor.baseModel : descriptor.baseModel
const reasoning = options?.reasoningEffortOverride
? { effort: options.reasoningEffortOverride }
: descriptor.reasoning
return { return {
transport, transport,
requestedModel, requestedModel,
@@ -227,7 +263,7 @@ export function resolveProviderRequest(options?: {
? DEFAULT_CODEX_BASE_URL ? DEFAULT_CODEX_BASE_URL
: DEFAULT_OPENAI_BASE_URL) : DEFAULT_OPENAI_BASE_URL)
).replace(/\/+$/, ''), ).replace(/\/+$/, ''),
reasoning: descriptor.reasoning, reasoning,
} }
} }
@@ -336,3 +372,11 @@ export function resolveCodexApiCredentials(
source: 'auth.json', source: 'auth.json',
} }
} }
export function getReasoningEffortForModel(model: string): ReasoningEffort | undefined {
const normalized = model.trim().toLowerCase()
const base = normalized.split('?', 1)[0] ?? normalized
const alias = base as CodexAlias
const aliasConfig = CODEX_ALIAS_MODELS[alias]
return aliasConfig?.reasoningEffort
}

View File

@@ -17,6 +17,14 @@ export const EFFORT_LEVELS = [
'max', 'max',
] as const satisfies readonly EffortLevel[] ] as const satisfies readonly EffortLevel[]
export const OPENAI_EFFORT_LEVELS = [
'low',
'medium',
'high',
'xhigh',
] as const
export type OpenAIEffortLevel = typeof OPENAI_EFFORT_LEVELS[number]
export type EffortValue = EffortLevel | number export type EffortValue = EffortLevel | number
// @[MODEL LAUNCH]: Add the new model to the allowlist if it supports the effort parameter. // @[MODEL LAUNCH]: Add the new model to the allowlist if it supports the effort parameter.
@@ -68,6 +76,46 @@ export function isEffortLevel(value: string): value is EffortLevel {
return (EFFORT_LEVELS as readonly string[]).includes(value) return (EFFORT_LEVELS as readonly string[]).includes(value)
} }
export function isOpenAIEffortLevel(value: string): value is OpenAIEffortLevel {
return (OPENAI_EFFORT_LEVELS as readonly string[]).includes(value)
}
export function modelUsesOpenAIEffort(model: string): boolean {
const provider = getAPIProvider()
return provider === 'openai' || provider === 'codex'
}
export function getAvailableEffortLevels(model: string): EffortLevel[] | OpenAIEffortLevel[] {
if (modelUsesOpenAIEffort(model)) {
return [...OPENAI_EFFORT_LEVELS] as OpenAIEffortLevel[]
}
const levels: EffortLevel[] = ['low', 'medium', 'high']
if (modelSupportsMaxEffort(model)) {
levels.push('max')
}
return levels
}
export function getEffortLevelLabel(level: EffortLevel | OpenAIEffortLevel): string {
if (level === 'xhigh') return 'Extra High'
if (level === 'max') return 'Max'
return capitalize(level)
}
export function openAIEffortToStandard(level: OpenAIEffortLevel): EffortLevel {
if (level === 'xhigh') return 'max'
return level
}
export function standardEffortToOpenAI(level: EffortLevel): OpenAIEffortLevel {
if (level === 'max') return 'xhigh'
return level as OpenAIEffortLevel
}
function capitalize(s: string): string {
return s.charAt(0).toUpperCase() + s.slice(1)
}
export function parseEffortValue(value: unknown): EffortValue | undefined { export function parseEffortValue(value: unknown): EffortValue | undefined {
if (value === undefined || value === null || value === '') { if (value === undefined || value === null || value === '') {
return undefined return undefined
@@ -221,7 +269,7 @@ export function convertEffortValueToLevel(value: EffortValue): EffortLevel {
* @param level The effort level to describe * @param level The effort level to describe
* @returns Human-readable description * @returns Human-readable description
*/ */
export function getEffortLevelDescription(level: EffortLevel): string { export function getEffortLevelDescription(level: EffortLevel | OpenAIEffortLevel): string {
switch (level) { switch (level) {
case 'low': case 'low':
return 'Quick, straightforward implementation with minimal overhead' return 'Quick, straightforward implementation with minimal overhead'
@@ -231,6 +279,8 @@ export function getEffortLevelDescription(level: EffortLevel): string {
return 'Comprehensive implementation with extensive testing and documentation' return 'Comprehensive implementation with extensive testing and documentation'
case 'max': case 'max':
return 'Maximum capability with deepest reasoning (Opus 4.6 only)' return 'Maximum capability with deepest reasoning (Opus 4.6 only)'
case 'xhigh':
return 'Extra high reasoning effort for complex tasks (OpenAI/Codex)'
} }
} }

View File

@@ -6,8 +6,6 @@ export const MODEL_ALIASES = [
'sonnet[1m]', 'sonnet[1m]',
'opus[1m]', 'opus[1m]',
'opusplan', 'opusplan',
'codexplan',
'codexspark',
] as const ] as const
export type ModelAlias = (typeof MODEL_ALIASES)[number] export type ModelAlias = (typeof MODEL_ALIASES)[number]

View File

@@ -123,6 +123,10 @@ export function getDefaultOpusModel(): ModelName {
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o' return process.env.OPENAI_MODEL || 'gpt-4o'
} }
// Codex provider: use user-specified model or default to gpt-5.4
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4'
}
// 3P providers (Bedrock, Vertex, Foundry) — kept as a separate branch // 3P providers (Bedrock, Vertex, Foundry) — kept as a separate branch
// even when values match, since 3P availability lags firstParty and // even when values match, since 3P availability lags firstParty and
// these will diverge again at the next model launch. // these will diverge again at the next model launch.
@@ -145,6 +149,10 @@ export function getDefaultSonnetModel(): ModelName {
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o' return process.env.OPENAI_MODEL || 'gpt-4o'
} }
// Codex provider
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4'
}
// Default to Sonnet 4.5 for 3P since they may not have 4.6 yet // Default to Sonnet 4.5 for 3P since they may not have 4.6 yet
if (getAPIProvider() !== 'firstParty') { if (getAPIProvider() !== 'firstParty') {
return getModelStrings().sonnet45 return getModelStrings().sonnet45
@@ -165,6 +173,10 @@ export function getDefaultHaikuModel(): ModelName {
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o-mini' return process.env.OPENAI_MODEL || 'gpt-4o-mini'
} }
// Codex provider
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4'
}
// Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex) // Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex)
return getModelStrings().haiku45 return getModelStrings().haiku45
@@ -217,6 +229,10 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o' return process.env.OPENAI_MODEL || 'gpt-4o'
} }
// Codex provider: always use the configured Codex model (default gpt-5.4)
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4'
}
// Ants default to defaultModel from flag config, or Opus 1M if not configured // Ants default to defaultModel from flag config, or Opus 1M if not configured
if (process.env.USER_TYPE === 'ant') { if (process.env.USER_TYPE === 'ant') {
@@ -343,12 +359,6 @@ export function renderDefaultModelSetting(
if (setting === 'opusplan') { if (setting === 'opusplan') {
return 'Opus 4.6 in plan mode, else Sonnet 4.6' return 'Opus 4.6 in plan mode, else Sonnet 4.6'
} }
if (setting === 'codexplan') {
return 'Codex Plan (GPT-5.4 high reasoning)'
}
if (setting === 'codexspark') {
return 'Codex Spark (GPT-5.3 Codex Spark)'
}
return renderModelName(parseUserSpecifiedModel(setting)) return renderModelName(parseUserSpecifiedModel(setting))
} }
@@ -383,11 +393,12 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
if (setting === 'opusplan') { if (setting === 'opusplan') {
return 'Opus Plan' return 'Opus Plan'
} }
// Handle Codex models - show actual model name + resolved model
if (setting === 'codexplan') { if (setting === 'codexplan') {
return 'Codex Plan' return 'codexplan (gpt-5.4)'
} }
if (setting === 'codexspark') { if (setting === 'codexspark') {
return 'Codex Spark' return 'codexspark (gpt-5.3-codex-spark)'
} }
if (isModelAlias(setting)) { if (isModelAlias(setting)) {
return capitalize(setting) return capitalize(setting)
@@ -401,8 +412,8 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
* if the model is not recognized as a public model. * if the model is not recognized as a public model.
*/ */
export function getPublicModelDisplayName(model: ModelName): string | null { export function getPublicModelDisplayName(model: ModelName): string | null {
// For OpenAI/Gemini providers, show the actual model name not a Claude alias // For OpenAI/Gemini/Codex providers, show the actual model name not a Claude alias
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini') { if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex') {
return null return null
} }
switch (model) { switch (model) {
@@ -517,10 +528,6 @@ export function parseUserSpecifiedModel(
if (isModelAlias(modelString)) { if (isModelAlias(modelString)) {
switch (modelString) { switch (modelString) {
case 'codexplan':
return modelInputTrimmed
case 'codexspark':
return modelInputTrimmed
case 'opusplan': case 'opusplan':
return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode
case 'sonnet': case 'sonnet':
@@ -535,6 +542,14 @@ export function parseUserSpecifiedModel(
} }
} }
// Handle Codex aliases - map to actual model names
if (modelString === 'codexplan') {
return 'gpt-5.4'
}
if (modelString === 'codexspark') {
return 'gpt-5.3-codex-spark'
}
// Opus 4/4.1 are no longer available on the first-party API (same as // Opus 4/4.1 are no longer available on the first-party API (same as
// Claude.ai) — silently remap to the current Opus default. The 'opus' // Claude.ai) — silently remap to the current Opus default. The 'opus'
// alias already resolves to 4.6, so the only users on these explicit // alias already resolves to 4.6, so the only users on these explicit

View File

@@ -268,20 +268,65 @@ function getOpusPlanOption(): ModelOption {
function getCodexPlanOption(): ModelOption { function getCodexPlanOption(): ModelOption {
return { return {
value: 'codexplan', value: 'gpt-5.4',
label: 'Codex Plan', label: 'gpt-5.4',
description: 'GPT-5.4 on the Codex backend with high reasoning', description: 'GPT-5.4 on the Codex backend with high reasoning',
} }
} }
function getCodexSparkOption(): ModelOption { function getCodexSparkOption(): ModelOption {
return { return {
value: 'codexspark', value: 'gpt-5.3-codex-spark',
label: 'Codex Spark', label: 'gpt-5.3-codex-spark',
description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops', description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops',
} }
} }
function getCodexModelOptions(): ModelOption[] {
return [
{
value: 'gpt-5.4',
label: 'gpt-5.4',
description: 'GPT-5.4 with high reasoning',
},
{
value: 'gpt-5.3-codex',
label: 'gpt-5.3-codex',
description: 'GPT-5.3 Codex with high reasoning',
},
{
value: 'gpt-5.3-codex-spark',
label: 'gpt-5.3-codex-spark',
description: 'GPT-5.3 Codex Spark for fast tool loops',
},
{
value: 'codexspark',
label: 'codexspark',
description: 'GPT-5.3 Codex Spark alias for fast tool loops',
},
{
value: 'gpt-5.2-codex',
label: 'gpt-5.2-codex',
description: 'GPT-5.2 Codex with high reasoning',
},
{
value: 'gpt-5.1-codex-max',
label: 'gpt-5.1-codex-max',
description: 'GPT-5.1 Codex Max for deep reasoning',
},
{
value: 'gpt-5.1-codex-mini',
label: 'gpt-5.1-codex-mini',
description: 'GPT-5.1 Codex Mini - faster, cheaper',
},
{
value: 'gpt-5.4-mini',
label: 'gpt-5.4-mini',
description: 'GPT-5.4 Mini - faster, cheaper',
},
]
}
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model. // @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list. // Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
function getModelOptionsBase(fastMode = false): ModelOption[] { function getModelOptionsBase(fastMode = false): ModelOption[] {
@@ -360,8 +405,9 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
// PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1 // PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1
const payg3pOptions = [getDefaultOptionForUser(fastMode)] const payg3pOptions = [getDefaultOptionForUser(fastMode)]
if (getAPIProvider() === 'openai') { // Add Codex models for openai and codex providers
payg3pOptions.push(getCodexPlanOption(), getCodexSparkOption()) if (getAPIProvider() === 'openai' || getAPIProvider() === 'codex') {
payg3pOptions.push(...getCodexModelOptions())
} }
const customSonnet = getCustomSonnetOption() const customSonnet = getCustomSonnetOption()
@@ -517,9 +563,9 @@ export function getModelOptions(fastMode = false): ModelOption[] {
return filterModelOptionsByAllowlist(options) return filterModelOptionsByAllowlist(options)
} else if (customModel === 'opusplan') { } else if (customModel === 'opusplan') {
return filterModelOptionsByAllowlist([...options, getOpusPlanOption()]) return filterModelOptionsByAllowlist([...options, getOpusPlanOption()])
} else if (customModel === 'codexplan') { } else if (customModel === 'gpt-5.4') {
return filterModelOptionsByAllowlist([...options, getCodexPlanOption()]) return filterModelOptionsByAllowlist([...options, getCodexPlanOption()])
} else if (customModel === 'codexspark') { } else if (customModel === 'gpt-5.3-codex-spark') {
return filterModelOptionsByAllowlist([...options, getCodexSparkOption()]) return filterModelOptionsByAllowlist([...options, getCodexSparkOption()])
} else if (customModel === 'opus' && getAPIProvider() === 'firstParty') { } else if (customModel === 'opus' && getAPIProvider() === 'firstParty') {
return filterModelOptionsByAllowlist([ return filterModelOptionsByAllowlist([
@@ -554,11 +600,23 @@ export function getModelOptions(fastMode = false): ModelOption[] {
*/ */
function filterModelOptionsByAllowlist(options: ModelOption[]): ModelOption[] { function filterModelOptionsByAllowlist(options: ModelOption[]): ModelOption[] {
const settings = getSettings_DEPRECATED() || {} const settings = getSettings_DEPRECATED() || {}
if (!settings.availableModels) { const filtered = !settings.availableModels
return options // No restrictions ? options // No restrictions
} : options.filter(
return options.filter(
opt => opt =>
opt.value === null || (opt.value !== null && isModelAllowed(opt.value)), opt.value === null || (opt.value !== null && isModelAllowed(opt.value)),
) )
// Select state uses option values as identity keys. If two entries share the
// same value (e.g. provider-specific aliases collapsing to one model ID),
// navigation/focus can become inconsistent and appear as duplicate rendering.
const seen = new Set<string>()
return filtered.filter(opt => {
const key = String(opt.value)
if (seen.has(key)) {
return false
}
seen.add(key)
return true
})
} }

View File

@@ -23,9 +23,12 @@ export type ModelStrings = Record<ModelKey, string>
const MODEL_KEYS = Object.keys(ALL_MODEL_CONFIGS) as ModelKey[] const MODEL_KEYS = Object.keys(ALL_MODEL_CONFIGS) as ModelKey[]
function getBuiltinModelStrings(provider: APIProvider): ModelStrings { function getBuiltinModelStrings(provider: APIProvider): ModelStrings {
// Codex piggybacks on the OpenAI provider transport for Anthropic tier aliases.
// Reuse OpenAI mappings so model string lookups never return undefined.
const providerKey = provider === 'codex' ? 'openai' : provider
const out = {} as ModelStrings const out = {} as ModelStrings
for (const key of MODEL_KEYS) { for (const key of MODEL_KEYS) {
out[key] = ALL_MODEL_CONFIGS[key][provider] out[key] = ALL_MODEL_CONFIGS[key][providerKey]
} }
return out return out
} }

View File

@@ -9,6 +9,7 @@ export type APIProvider =
| 'openai' | 'openai'
| 'gemini' | 'gemini'
| 'github' | 'github'
| 'codex'
export function getAPIProvider(): APIProvider { export function getAPIProvider(): APIProvider {
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
@@ -16,7 +17,9 @@ export function getAPIProvider(): APIProvider {
: isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) : isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
? 'github' ? 'github'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) : isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
? 'openai' ? isCodexModel()
? 'codex'
: 'openai'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) : isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
? 'bedrock' ? 'bedrock'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) : isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)
@@ -29,6 +32,19 @@ export function getAPIProvider(): APIProvider {
export function usesAnthropicAccountFlow(): boolean { export function usesAnthropicAccountFlow(): boolean {
return getAPIProvider() === 'firstParty' return getAPIProvider() === 'firstParty'
} }
function isCodexModel(): boolean {
const model = (process.env.OPENAI_MODEL || '').toLowerCase()
return (
model === 'codexplan' ||
model === 'codexspark' ||
model === 'gpt-5.4' ||
model === 'gpt-5.3-codex' ||
model === 'gpt-5.3-codex-spark' ||
model === 'gpt-5.2-codex' ||
model === 'gpt-5.1-codex-max' ||
model === 'gpt-5.1-codex-mini'
)
}
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS { export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {
return getAPIProvider() as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS return getAPIProvider() as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS

View File

@@ -12,6 +12,7 @@ import { formatNumber } from './format.js';
import { getIdeClientName, type IDEExtensionInstallationStatus, isJetBrainsIde, toIDEDisplayName } from './ide.js'; import { getIdeClientName, type IDEExtensionInstallationStatus, isJetBrainsIde, toIDEDisplayName } from './ide.js';
import { getClaudeAiUserDefaultModelDescription, modelDisplayString } from './model/model.js'; import { getClaudeAiUserDefaultModelDescription, modelDisplayString } from './model/model.js';
import { getAPIProvider } from './model/providers.js'; import { getAPIProvider } from './model/providers.js';
import { resolveProviderRequest } from '../services/api/providerConfig.js';
import { getMTLSConfig } from './mtls.js'; import { getMTLSConfig } from './mtls.js';
import { checkInstall } from './nativeInstaller/index.js'; import { checkInstall } from './nativeInstaller/index.js';
import { getProxyUrl } from './proxy.js'; import { getProxyUrl } from './proxy.js';
@@ -247,6 +248,7 @@ export function buildAPIProviderProperties(): Property[] {
vertex: 'Google Vertex AI', vertex: 'Google Vertex AI',
foundry: 'Microsoft Foundry', foundry: 'Microsoft Foundry',
openai: 'OpenAI-compatible', openai: 'OpenAI-compatible',
codex: 'Codex',
gemini: 'Google Gemini', gemini: 'Google Gemini',
}[apiProvider]; }[apiProvider];
properties.push({ properties.push({
@@ -325,34 +327,73 @@ export function buildAPIProviderProperties(): Property[] {
} }
} else if (apiProvider === 'openai') { } else if (apiProvider === 'openai') {
const openaiBaseUrl = process.env.OPENAI_BASE_URL; const openaiBaseUrl = process.env.OPENAI_BASE_URL;
if (openaiBaseUrl) { if (openaiBaseUrl) {
properties.push({ properties.push({
label: 'OpenAI base URL', label: 'OpenAI base URL',
value: redactSecretValueForDisplay(openaiBaseUrl, process.env) ?? openaiBaseUrl value: redactSecretValueForDisplay(openaiBaseUrl, process.env) ?? openaiBaseUrl
}); });
}
const openaiModel = process.env.OPENAI_MODEL;
if (openaiModel) {
// Build display model string with resolved model + reasoning effort
let modelDisplay = openaiModel;
const resolved = resolveProviderRequest({ model: openaiModel });
const resolvedModel = resolved.resolvedModel;
const reasoningEffort = resolved.reasoning?.effort;
if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
// Show resolved model name
modelDisplay = resolvedModel;
} }
const openaiModel = process.env.OPENAI_MODEL; if (reasoningEffort) {
if (openaiModel) { modelDisplay = `${modelDisplay} (${reasoningEffort})`;
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(openaiModel, process.env) ?? openaiModel
});
} }
} else if (apiProvider === 'gemini') { properties.push({
const geminiBaseUrl = process.env.GEMINI_BASE_URL; label: 'Model',
if (geminiBaseUrl) { value: redactSecretValueForDisplay(modelDisplay, process.env) ?? modelDisplay
properties.push({ });
label: 'Gemini base URL', }
value: redactSecretValueForDisplay(geminiBaseUrl, process.env) ?? geminiBaseUrl } else if (apiProvider === 'codex') {
}); const codexBaseUrl = process.env.OPENAI_BASE_URL;
if (codexBaseUrl) {
properties.push({
label: 'Codex base URL',
value: redactSecretValueForDisplay(codexBaseUrl, process.env) ?? codexBaseUrl
});
}
const openaiModel = process.env.OPENAI_MODEL;
if (openaiModel) {
// Build display model string with resolved model + reasoning effort
let modelDisplay = openaiModel;
const resolved = resolveProviderRequest({ model: openaiModel });
const resolvedModel = resolved.resolvedModel;
const reasoningEffort = resolved.reasoning?.effort;
if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
// Show resolved model name
modelDisplay = resolvedModel;
} }
const geminiModel = process.env.GEMINI_MODEL; if (reasoningEffort) {
if (geminiModel) { modelDisplay = `${modelDisplay} (${reasoningEffort})`;
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(geminiModel, process.env) ?? geminiModel
});
} }
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(modelDisplay, process.env) ?? modelDisplay
});
}
} else if (apiProvider === 'gemini') {
const geminiBaseUrl = process.env.GEMINI_BASE_URL;
if (geminiBaseUrl) {
properties.push({
label: 'Gemini base URL',
value: redactSecretValueForDisplay(geminiBaseUrl, process.env) ?? geminiBaseUrl
});
}
const geminiModel = process.env.GEMINI_MODEL;
if (geminiModel) {
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(geminiModel, process.env) ?? geminiModel
});
}
} }
const proxyUrl = getProxyUrl(); const proxyUrl = getProxyUrl();
if (proxyUrl) { if (proxyUrl) {

View File

@@ -286,6 +286,25 @@ function createCommandSuggestionItem(
} }
} }
/**
* Ensure suggestion IDs are unique for React keys and selection logic.
* If duplicates exist, append a stable numeric suffix to subsequent entries.
*/
function ensureUniqueSuggestionIds(items: SuggestionItem[]): SuggestionItem[] {
const counts = new Map<string, number>()
return items.map(item => {
const seen = counts.get(item.id) ?? 0
counts.set(item.id, seen + 1)
if (seen === 0) {
return item
}
return {
...item,
id: `${item.id}#${seen + 1}`,
}
})
}
/** /**
* Generate command suggestions based on input * Generate command suggestions based on input
*/ */
@@ -369,14 +388,14 @@ export function generateCommandSuggestions(
// Combine with built-in commands prioritized after recently used, // Combine with built-in commands prioritized after recently used,
// so they remain visible even when many skills are installed // so they remain visible even when many skills are installed
return [ return ensureUniqueSuggestionIds([
...recentlyUsed, ...recentlyUsed,
...builtinCommands, ...builtinCommands,
...userCommands, ...userCommands,
...projectCommands, ...projectCommands,
...policyCommands, ...policyCommands,
...otherCommands, ...otherCommands,
].map(cmd => createCommandSuggestionItem(cmd)) ].map(cmd => createCommandSuggestionItem(cmd)))
} }
// The Fuse index filters isHidden at build time and is keyed on the // The Fuse index filters isHidden at build time and is keyed on the
@@ -491,10 +510,13 @@ export function generateCommandSuggestions(
if (hiddenExact) { if (hiddenExact) {
const hiddenId = getCommandId(hiddenExact) const hiddenId = getCommandId(hiddenExact)
if (!fuseSuggestions.some(s => s.id === hiddenId)) { if (!fuseSuggestions.some(s => s.id === hiddenId)) {
return [createCommandSuggestionItem(hiddenExact), ...fuseSuggestions] return ensureUniqueSuggestionIds([
createCommandSuggestionItem(hiddenExact),
...fuseSuggestions,
])
} }
} }
return fuseSuggestions return ensureUniqueSuggestionIds(fuseSuggestions)
} }
/** /**