feat: Refactor model handling & reasoning effort across navigation, typeahead, OpenAI/Codex providers, API shim, configs, and UI (adds EffortPicker, new mappings/options, unique suggestion IDs, effort utilities; removes deprecated aliases; defaults Codex to gpt-5.4; improves selection logic and status display)
This commit is contained in:
File diff suppressed because one or more lines are too long
@@ -84,29 +84,14 @@ const reducer = <T>(state: State<T>, action: Action<T>): State<T> => {
|
||||
return state
|
||||
}
|
||||
|
||||
// Wrap to first item if at the end
|
||||
const next = item.next || state.optionMap.first
|
||||
|
||||
if (!next) {
|
||||
return state
|
||||
}
|
||||
|
||||
// When wrapping to first, reset viewport to start
|
||||
if (!item.next && next === state.optionMap.first) {
|
||||
return {
|
||||
...state,
|
||||
focusedValue: next.value,
|
||||
visibleFromIndex: 0,
|
||||
visibleToIndex: state.visibleOptionCount,
|
||||
}
|
||||
}
|
||||
|
||||
const needsToScroll = next.index >= state.visibleToIndex
|
||||
// If there's a next item in the list, go to it
|
||||
if (item.next) {
|
||||
const needsToScroll = item.next.index >= state.visibleToIndex
|
||||
|
||||
if (!needsToScroll) {
|
||||
return {
|
||||
...state,
|
||||
focusedValue: next.value,
|
||||
focusedValue: item.next.value,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,12 +104,27 @@ const reducer = <T>(state: State<T>, action: Action<T>): State<T> => {
|
||||
|
||||
return {
|
||||
...state,
|
||||
focusedValue: next.value,
|
||||
focusedValue: item.next.value,
|
||||
visibleFromIndex: nextVisibleFromIndex,
|
||||
visibleToIndex: nextVisibleToIndex,
|
||||
}
|
||||
}
|
||||
|
||||
// No next item - wrap to first item
|
||||
const firstItem = state.optionMap.first
|
||||
if (!firstItem) {
|
||||
return state
|
||||
}
|
||||
|
||||
// When wrapping to first, reset viewport to start
|
||||
return {
|
||||
...state,
|
||||
focusedValue: firstItem.value,
|
||||
visibleFromIndex: 0,
|
||||
visibleToIndex: state.visibleOptionCount,
|
||||
}
|
||||
}
|
||||
|
||||
case 'focus-previous-option': {
|
||||
if (state.focusedValue === undefined) {
|
||||
return state
|
||||
@@ -136,15 +136,35 @@ const reducer = <T>(state: State<T>, action: Action<T>): State<T> => {
|
||||
return state
|
||||
}
|
||||
|
||||
// Wrap to last item if at the beginning
|
||||
const previous = item.previous || state.optionMap.last
|
||||
// If there's a previous item in the list, go to it
|
||||
if (item.previous) {
|
||||
const needsToScroll = item.previous.index < state.visibleFromIndex
|
||||
|
||||
if (!previous) {
|
||||
if (!needsToScroll) {
|
||||
return {
|
||||
...state,
|
||||
focusedValue: item.previous.value,
|
||||
}
|
||||
}
|
||||
|
||||
const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1)
|
||||
const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount
|
||||
|
||||
return {
|
||||
...state,
|
||||
focusedValue: item.previous.value,
|
||||
visibleFromIndex: nextVisibleFromIndex,
|
||||
visibleToIndex: nextVisibleToIndex,
|
||||
}
|
||||
}
|
||||
|
||||
// No previous item - wrap to last item
|
||||
const lastItem = state.optionMap.last
|
||||
if (!lastItem) {
|
||||
return state
|
||||
}
|
||||
|
||||
// When wrapping to last, reset viewport to end
|
||||
if (!item.previous && previous === state.optionMap.last) {
|
||||
const nextVisibleToIndex = state.optionMap.size
|
||||
const nextVisibleFromIndex = Math.max(
|
||||
0,
|
||||
@@ -152,28 +172,7 @@ const reducer = <T>(state: State<T>, action: Action<T>): State<T> => {
|
||||
)
|
||||
return {
|
||||
...state,
|
||||
focusedValue: previous.value,
|
||||
visibleFromIndex: nextVisibleFromIndex,
|
||||
visibleToIndex: nextVisibleToIndex,
|
||||
}
|
||||
}
|
||||
|
||||
const needsToScroll = previous.index <= state.visibleFromIndex
|
||||
|
||||
if (!needsToScroll) {
|
||||
return {
|
||||
...state,
|
||||
focusedValue: previous.value,
|
||||
}
|
||||
}
|
||||
|
||||
const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1)
|
||||
|
||||
const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount
|
||||
|
||||
return {
|
||||
...state,
|
||||
focusedValue: previous.value,
|
||||
focusedValue: lastItem.value,
|
||||
visibleFromIndex: nextVisibleFromIndex,
|
||||
visibleToIndex: nextVisibleToIndex,
|
||||
}
|
||||
|
||||
152
src/components/EffortPicker.tsx
Normal file
152
src/components/EffortPicker.tsx
Normal file
@@ -0,0 +1,152 @@
|
||||
import React, { useState } from 'react'
|
||||
import { Box, Text } from '../ink.js'
|
||||
import { useMainLoopModel } from '../hooks/useMainLoopModel.js'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import type { EffortLevel, OpenAIEffortLevel } from '../utils/effort.js'
|
||||
import {
|
||||
getAvailableEffortLevels,
|
||||
getDisplayedEffortLevel,
|
||||
getEffortLevelDescription,
|
||||
getEffortLevelLabel,
|
||||
getEffortValueDescription,
|
||||
modelSupportsEffort,
|
||||
modelUsesOpenAIEffort,
|
||||
standardEffortToOpenAI,
|
||||
isOpenAIEffortLevel,
|
||||
} from '../utils/effort.js'
|
||||
import { getAPIProvider } from '../utils/model/providers.js'
|
||||
import { getReasoningEffortForModel } from '../services/api/providerConfig.js'
|
||||
import { Select } from './CustomSelect/select.js'
|
||||
import { effortLevelToSymbol } from './EffortIndicator.js'
|
||||
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js'
|
||||
import { Byline } from './design-system/Byline.js'
|
||||
|
||||
type EffortOption = {
|
||||
label: React.ReactNode
|
||||
value: string
|
||||
description: string
|
||||
isAvailable: boolean
|
||||
}
|
||||
|
||||
type Props = {
|
||||
onSelect: (effort: EffortLevel | undefined) => void
|
||||
onCancel?: () => void
|
||||
}
|
||||
|
||||
export function EffortPicker({ onSelect, onCancel }: Props) {
|
||||
const model = useMainLoopModel()
|
||||
const appStateEffort = useAppState((s: any) => s.effortValue)
|
||||
const setAppState = useSetAppState()
|
||||
const provider = getAPIProvider()
|
||||
const usesOpenAIEffort = modelUsesOpenAIEffort(model)
|
||||
const availableLevels = getAvailableEffortLevels(model)
|
||||
const currentDisplayedLevel = getDisplayedEffortLevel(model, appStateEffort)
|
||||
|
||||
// For OpenAI/Codex, get the model's default reasoning effort
|
||||
const modelReasoningEffort = usesOpenAIEffort ? getReasoningEffortForModel(model) : undefined
|
||||
const defaultEffortForModel = modelReasoningEffort || currentDisplayedLevel
|
||||
|
||||
const options: EffortOption[] = [
|
||||
{
|
||||
label: <EffortOptionLabel level="auto" text="Auto" isCurrent={false} />,
|
||||
value: 'auto',
|
||||
description: 'Use the default effort level for your model',
|
||||
isAvailable: true,
|
||||
},
|
||||
...availableLevels.map(level => {
|
||||
const displayLevel = usesOpenAIEffort
|
||||
? (level === 'xhigh' ? 'max' : level)
|
||||
: level
|
||||
const isCurrent = currentDisplayedLevel === displayLevel
|
||||
return {
|
||||
label: (
|
||||
<EffortOptionLabel
|
||||
level={level as EffortLevel}
|
||||
text={getEffortLevelLabel(level as EffortLevel)}
|
||||
isCurrent={isCurrent}
|
||||
/>
|
||||
),
|
||||
value: level,
|
||||
description: getEffortLevelDescription(level as EffortLevel),
|
||||
isAvailable: true,
|
||||
}
|
||||
}),
|
||||
]
|
||||
|
||||
function handleSelect(value: string) {
|
||||
if (value === 'auto') {
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
effortValue: undefined,
|
||||
}))
|
||||
onSelect(undefined)
|
||||
} else {
|
||||
const effortLevel = value as EffortLevel
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
effortValue: effortLevel,
|
||||
}))
|
||||
onSelect(effortLevel)
|
||||
}
|
||||
}
|
||||
|
||||
function handleCancel() {
|
||||
onCancel?.()
|
||||
}
|
||||
|
||||
const supportsEffort = modelSupportsEffort(model)
|
||||
// For OpenAI/Codex, use the model's default reasoning effort as initial focus
|
||||
// For Claude, use the displayed effort level or 'auto'
|
||||
const initialFocus = usesOpenAIEffort
|
||||
? (modelReasoningEffort || 'auto')
|
||||
: (appStateEffort ? String(appStateEffort) : 'auto')
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Box marginBottom={1} flexDirection="column">
|
||||
<Text color="remember" bold={true}>Set effort level</Text>
|
||||
<Text dimColor={true}>
|
||||
{usesOpenAIEffort
|
||||
? `OpenAI/Codex provider (${provider})`
|
||||
: supportsEffort
|
||||
? `Claude model · ${provider} provider`
|
||||
: `Effort not supported for this model`
|
||||
}
|
||||
</Text>
|
||||
</Box>
|
||||
|
||||
<Box marginBottom={1}>
|
||||
<Select
|
||||
options={options}
|
||||
defaultValue={initialFocus}
|
||||
onChange={handleSelect}
|
||||
onCancel={handleCancel}
|
||||
visibleOptionCount={Math.min(6, options.length)}
|
||||
inlineDescriptions={true}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
<Box marginBottom={1}>
|
||||
<Text dimColor={true} italic={true}>
|
||||
<Byline>
|
||||
<KeyboardShortcutHint shortcut="Enter" action="confirm" />
|
||||
<KeyboardShortcutHint shortcut="Esc" action="cancel" />
|
||||
</Byline>
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function EffortOptionLabel({ level, text, isCurrent }: { level: EffortLevel | 'auto', text: string, isCurrent: boolean }) {
|
||||
const symbol = level === 'auto' ? '⊘' : effortLevelToSymbol(level as EffortLevel)
|
||||
const color = isCurrent ? 'remember' : level === 'auto' ? 'subtle' : 'suggestion'
|
||||
|
||||
return (
|
||||
<>
|
||||
<Text color={color}>{symbol} </Text>
|
||||
<Text bold={isCurrent}>{text}</Text>
|
||||
{isCurrent && <Text dimColor={true}> (current)</Text>}
|
||||
</>
|
||||
)
|
||||
}
|
||||
@@ -97,21 +97,45 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
}
|
||||
|
||||
if (useOpenAI) {
|
||||
const model = process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
|
||||
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
|
||||
let name = 'OpenAI'
|
||||
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(model)) name = 'DeepSeek'
|
||||
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
|
||||
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
|
||||
else if (/together/i.test(baseUrl)) name = 'Together AI'
|
||||
else if (/groq/i.test(baseUrl)) name = 'Groq'
|
||||
else if (/mistral/i.test(baseUrl) || /mistral/i.test(model)) name = 'Mistral'
|
||||
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
|
||||
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
|
||||
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
|
||||
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
|
||||
else if (/llama/i.test(model)) name = 'Meta Llama'
|
||||
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
|
||||
else if (isLocal) name = 'Local'
|
||||
return { name, model, baseUrl, isLocal }
|
||||
|
||||
// Resolve model alias to actual model name + reasoning effort
|
||||
let displayModel = rawModel
|
||||
const codexAliases: Record<string, { model: string; reasoningEffort?: string }> = {
|
||||
codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' },
|
||||
'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' },
|
||||
'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' },
|
||||
'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' },
|
||||
codexspark: { model: 'gpt-5.3-codex-spark' },
|
||||
'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' },
|
||||
'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' },
|
||||
'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' },
|
||||
'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' },
|
||||
'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' },
|
||||
}
|
||||
const alias = rawModel.toLowerCase()
|
||||
if (alias in codexAliases) {
|
||||
const resolved = codexAliases[alias]
|
||||
displayModel = resolved.model
|
||||
if (resolved.reasoningEffort) {
|
||||
displayModel = `${displayModel} (${resolved.reasoningEffort})`
|
||||
}
|
||||
}
|
||||
|
||||
return { name, model: displayModel, baseUrl, isLocal }
|
||||
}
|
||||
|
||||
// Default: Anthropic
|
||||
|
||||
@@ -1242,17 +1242,25 @@ export function useTypeahead({
|
||||
const handleAutocompletePrevious = useCallback(() => {
|
||||
setSuggestionsState(prev => ({
|
||||
...prev,
|
||||
selectedSuggestion: prev.selectedSuggestion <= 0 ? suggestions.length - 1 : prev.selectedSuggestion - 1
|
||||
selectedSuggestion: prev.suggestions.length === 0
|
||||
? -1
|
||||
: prev.selectedSuggestion <= 0
|
||||
? prev.suggestions.length - 1
|
||||
: Math.min(prev.selectedSuggestion - 1, prev.suggestions.length - 1)
|
||||
}));
|
||||
}, [suggestions.length, setSuggestionsState]);
|
||||
}, [setSuggestionsState]);
|
||||
|
||||
// Handler for autocomplete:next - selects next suggestion
|
||||
const handleAutocompleteNext = useCallback(() => {
|
||||
setSuggestionsState(prev => ({
|
||||
...prev,
|
||||
selectedSuggestion: prev.selectedSuggestion >= suggestions.length - 1 ? 0 : prev.selectedSuggestion + 1
|
||||
selectedSuggestion: prev.suggestions.length === 0
|
||||
? -1
|
||||
: prev.selectedSuggestion >= prev.suggestions.length - 1
|
||||
? 0
|
||||
: Math.max(0, prev.selectedSuggestion + 1)
|
||||
}));
|
||||
}, [suggestions.length, setSuggestionsState]);
|
||||
}, [setSuggestionsState]);
|
||||
|
||||
// Autocomplete context keybindings - only active when suggestions are visible
|
||||
const autocompleteHandlers = useMemo(() => ({
|
||||
|
||||
@@ -665,9 +665,11 @@ class OpenAIShimStream {
|
||||
|
||||
class OpenAIShimMessages {
|
||||
private defaultHeaders: Record<string, string>
|
||||
private reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
|
||||
|
||||
constructor(defaultHeaders: Record<string, string>) {
|
||||
constructor(defaultHeaders: Record<string, string>, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh') {
|
||||
this.defaultHeaders = defaultHeaders
|
||||
this.reasoningEffort = reasoningEffort
|
||||
}
|
||||
|
||||
create(
|
||||
@@ -679,7 +681,7 @@ class OpenAIShimMessages {
|
||||
let httpResponse: Response | undefined
|
||||
|
||||
const promise = (async () => {
|
||||
const request = resolveProviderRequest({ model: params.model })
|
||||
const request = resolveProviderRequest({ model: params.model, reasoningEffortOverride: self.reasoningEffort })
|
||||
const response = await self._doRequest(request, params, options)
|
||||
httpResponse = response
|
||||
|
||||
@@ -1018,9 +1020,11 @@ class OpenAIShimMessages {
|
||||
|
||||
class OpenAIShimBeta {
|
||||
messages: OpenAIShimMessages
|
||||
reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
|
||||
|
||||
constructor(defaultHeaders: Record<string, string>) {
|
||||
this.messages = new OpenAIShimMessages(defaultHeaders)
|
||||
constructor(defaultHeaders: Record<string, string>, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh') {
|
||||
this.messages = new OpenAIShimMessages(defaultHeaders, reasoningEffort)
|
||||
this.reasoningEffort = reasoningEffort
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1028,6 +1032,7 @@ export function createOpenAIShimClient(options: {
|
||||
defaultHeaders?: Record<string, string>
|
||||
maxRetries?: number
|
||||
timeout?: number
|
||||
reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
|
||||
}): unknown {
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
|
||||
@@ -1050,7 +1055,7 @@ export function createOpenAIShimClient(options: {
|
||||
|
||||
const beta = new OpenAIShimBeta({
|
||||
...(options.defaultHeaders ?? {}),
|
||||
})
|
||||
}, options.reasoningEffort)
|
||||
|
||||
return {
|
||||
beta,
|
||||
|
||||
@@ -20,13 +20,43 @@ const CODEX_ALIAS_MODELS: Record<
|
||||
model: 'gpt-5.4',
|
||||
reasoningEffort: 'high',
|
||||
},
|
||||
'gpt-5.4': {
|
||||
model: 'gpt-5.4',
|
||||
reasoningEffort: 'high',
|
||||
},
|
||||
'gpt-5.3-codex': {
|
||||
model: 'gpt-5.3-codex',
|
||||
reasoningEffort: 'high',
|
||||
},
|
||||
'gpt-5.3-codex-spark': {
|
||||
model: 'gpt-5.3-codex-spark',
|
||||
},
|
||||
codexspark: {
|
||||
model: 'gpt-5.3-codex-spark',
|
||||
},
|
||||
'gpt-5.2-codex': {
|
||||
model: 'gpt-5.2-codex',
|
||||
reasoningEffort: 'high',
|
||||
},
|
||||
'gpt-5.1-codex-max': {
|
||||
model: 'gpt-5.1-codex-max',
|
||||
reasoningEffort: 'high',
|
||||
},
|
||||
'gpt-5.1-codex-mini': {
|
||||
model: 'gpt-5.1-codex-mini',
|
||||
},
|
||||
'gpt-5.4-mini': {
|
||||
model: 'gpt-5.4-mini',
|
||||
reasoningEffort: 'medium',
|
||||
},
|
||||
'gpt-5.2': {
|
||||
model: 'gpt-5.2',
|
||||
reasoningEffort: 'medium',
|
||||
},
|
||||
} as const
|
||||
|
||||
type CodexAlias = keyof typeof CODEX_ALIAS_MODELS
|
||||
type ReasoningEffort = 'low' | 'medium' | 'high'
|
||||
type ReasoningEffort = 'low' | 'medium' | 'high' | 'xhigh'
|
||||
|
||||
export type ProviderTransport = 'chat_completions' | 'codex_responses'
|
||||
|
||||
@@ -102,7 +132,7 @@ function decodeJwtPayload(token: string): Record<string, unknown> | undefined {
|
||||
function parseReasoningEffort(value: string | undefined): ReasoningEffort | undefined {
|
||||
if (!value) return undefined
|
||||
const normalized = value.trim().toLowerCase()
|
||||
if (normalized === 'low' || normalized === 'medium' || normalized === 'high') {
|
||||
if (normalized === 'low' || normalized === 'medium' || normalized === 'high' || normalized === 'xhigh') {
|
||||
return normalized
|
||||
}
|
||||
return undefined
|
||||
@@ -193,6 +223,7 @@ export function resolveProviderRequest(options?: {
|
||||
model?: string
|
||||
baseUrl?: string
|
||||
fallbackModel?: string
|
||||
reasoningEffortOverride?: ReasoningEffort
|
||||
}): ResolvedProviderRequest {
|
||||
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const requestedModel =
|
||||
@@ -217,6 +248,11 @@ export function resolveProviderRequest(options?: {
|
||||
? normalizeGithubModelsApiModel(requestedModel)
|
||||
: descriptor.baseModel
|
||||
|
||||
const reasoning = options?.reasoningEffortOverride
|
||||
? { effort: options.reasoningEffortOverride }
|
||||
: descriptor.reasoning
|
||||
|
||||
|
||||
return {
|
||||
transport,
|
||||
requestedModel,
|
||||
@@ -227,7 +263,7 @@ export function resolveProviderRequest(options?: {
|
||||
? DEFAULT_CODEX_BASE_URL
|
||||
: DEFAULT_OPENAI_BASE_URL)
|
||||
).replace(/\/+$/, ''),
|
||||
reasoning: descriptor.reasoning,
|
||||
reasoning,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,3 +372,11 @@ export function resolveCodexApiCredentials(
|
||||
source: 'auth.json',
|
||||
}
|
||||
}
|
||||
|
||||
export function getReasoningEffortForModel(model: string): ReasoningEffort | undefined {
|
||||
const normalized = model.trim().toLowerCase()
|
||||
const base = normalized.split('?', 1)[0] ?? normalized
|
||||
const alias = base as CodexAlias
|
||||
const aliasConfig = CODEX_ALIAS_MODELS[alias]
|
||||
return aliasConfig?.reasoningEffort
|
||||
}
|
||||
|
||||
@@ -17,6 +17,14 @@ export const EFFORT_LEVELS = [
|
||||
'max',
|
||||
] as const satisfies readonly EffortLevel[]
|
||||
|
||||
export const OPENAI_EFFORT_LEVELS = [
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'xhigh',
|
||||
] as const
|
||||
|
||||
export type OpenAIEffortLevel = typeof OPENAI_EFFORT_LEVELS[number]
|
||||
export type EffortValue = EffortLevel | number
|
||||
|
||||
// @[MODEL LAUNCH]: Add the new model to the allowlist if it supports the effort parameter.
|
||||
@@ -68,6 +76,46 @@ export function isEffortLevel(value: string): value is EffortLevel {
|
||||
return (EFFORT_LEVELS as readonly string[]).includes(value)
|
||||
}
|
||||
|
||||
export function isOpenAIEffortLevel(value: string): value is OpenAIEffortLevel {
|
||||
return (OPENAI_EFFORT_LEVELS as readonly string[]).includes(value)
|
||||
}
|
||||
|
||||
export function modelUsesOpenAIEffort(model: string): boolean {
|
||||
const provider = getAPIProvider()
|
||||
return provider === 'openai' || provider === 'codex'
|
||||
}
|
||||
|
||||
export function getAvailableEffortLevels(model: string): EffortLevel[] | OpenAIEffortLevel[] {
|
||||
if (modelUsesOpenAIEffort(model)) {
|
||||
return [...OPENAI_EFFORT_LEVELS] as OpenAIEffortLevel[]
|
||||
}
|
||||
const levels: EffortLevel[] = ['low', 'medium', 'high']
|
||||
if (modelSupportsMaxEffort(model)) {
|
||||
levels.push('max')
|
||||
}
|
||||
return levels
|
||||
}
|
||||
|
||||
export function getEffortLevelLabel(level: EffortLevel | OpenAIEffortLevel): string {
|
||||
if (level === 'xhigh') return 'Extra High'
|
||||
if (level === 'max') return 'Max'
|
||||
return capitalize(level)
|
||||
}
|
||||
|
||||
export function openAIEffortToStandard(level: OpenAIEffortLevel): EffortLevel {
|
||||
if (level === 'xhigh') return 'max'
|
||||
return level
|
||||
}
|
||||
|
||||
export function standardEffortToOpenAI(level: EffortLevel): OpenAIEffortLevel {
|
||||
if (level === 'max') return 'xhigh'
|
||||
return level as OpenAIEffortLevel
|
||||
}
|
||||
|
||||
function capitalize(s: string): string {
|
||||
return s.charAt(0).toUpperCase() + s.slice(1)
|
||||
}
|
||||
|
||||
export function parseEffortValue(value: unknown): EffortValue | undefined {
|
||||
if (value === undefined || value === null || value === '') {
|
||||
return undefined
|
||||
@@ -221,7 +269,7 @@ export function convertEffortValueToLevel(value: EffortValue): EffortLevel {
|
||||
* @param level The effort level to describe
|
||||
* @returns Human-readable description
|
||||
*/
|
||||
export function getEffortLevelDescription(level: EffortLevel): string {
|
||||
export function getEffortLevelDescription(level: EffortLevel | OpenAIEffortLevel): string {
|
||||
switch (level) {
|
||||
case 'low':
|
||||
return 'Quick, straightforward implementation with minimal overhead'
|
||||
@@ -231,6 +279,8 @@ export function getEffortLevelDescription(level: EffortLevel): string {
|
||||
return 'Comprehensive implementation with extensive testing and documentation'
|
||||
case 'max':
|
||||
return 'Maximum capability with deepest reasoning (Opus 4.6 only)'
|
||||
case 'xhigh':
|
||||
return 'Extra high reasoning effort for complex tasks (OpenAI/Codex)'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ export const MODEL_ALIASES = [
|
||||
'sonnet[1m]',
|
||||
'opus[1m]',
|
||||
'opusplan',
|
||||
'codexplan',
|
||||
'codexspark',
|
||||
] as const
|
||||
export type ModelAlias = (typeof MODEL_ALIASES)[number]
|
||||
|
||||
|
||||
@@ -123,6 +123,10 @@ export function getDefaultOpusModel(): ModelName {
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
}
|
||||
// Codex provider: use user-specified model or default to gpt-5.4
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
}
|
||||
// 3P providers (Bedrock, Vertex, Foundry) — kept as a separate branch
|
||||
// even when values match, since 3P availability lags firstParty and
|
||||
// these will diverge again at the next model launch.
|
||||
@@ -145,6 +149,10 @@ export function getDefaultSonnetModel(): ModelName {
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
}
|
||||
// Codex provider
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
}
|
||||
// Default to Sonnet 4.5 for 3P since they may not have 4.6 yet
|
||||
if (getAPIProvider() !== 'firstParty') {
|
||||
return getModelStrings().sonnet45
|
||||
@@ -165,6 +173,10 @@ export function getDefaultHaikuModel(): ModelName {
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o-mini'
|
||||
}
|
||||
// Codex provider
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
}
|
||||
|
||||
// Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex)
|
||||
return getModelStrings().haiku45
|
||||
@@ -217,6 +229,10 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
}
|
||||
// Codex provider: always use the configured Codex model (default gpt-5.4)
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
}
|
||||
|
||||
// Ants default to defaultModel from flag config, or Opus 1M if not configured
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
@@ -343,12 +359,6 @@ export function renderDefaultModelSetting(
|
||||
if (setting === 'opusplan') {
|
||||
return 'Opus 4.6 in plan mode, else Sonnet 4.6'
|
||||
}
|
||||
if (setting === 'codexplan') {
|
||||
return 'Codex Plan (GPT-5.4 high reasoning)'
|
||||
}
|
||||
if (setting === 'codexspark') {
|
||||
return 'Codex Spark (GPT-5.3 Codex Spark)'
|
||||
}
|
||||
return renderModelName(parseUserSpecifiedModel(setting))
|
||||
}
|
||||
|
||||
@@ -383,11 +393,12 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
|
||||
if (setting === 'opusplan') {
|
||||
return 'Opus Plan'
|
||||
}
|
||||
// Handle Codex models - show actual model name + resolved model
|
||||
if (setting === 'codexplan') {
|
||||
return 'Codex Plan'
|
||||
return 'codexplan (gpt-5.4)'
|
||||
}
|
||||
if (setting === 'codexspark') {
|
||||
return 'Codex Spark'
|
||||
return 'codexspark (gpt-5.3-codex-spark)'
|
||||
}
|
||||
if (isModelAlias(setting)) {
|
||||
return capitalize(setting)
|
||||
@@ -401,8 +412,8 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
|
||||
* if the model is not recognized as a public model.
|
||||
*/
|
||||
export function getPublicModelDisplayName(model: ModelName): string | null {
|
||||
// For OpenAI/Gemini providers, show the actual model name not a Claude alias
|
||||
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini') {
|
||||
// For OpenAI/Gemini/Codex providers, show the actual model name not a Claude alias
|
||||
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex') {
|
||||
return null
|
||||
}
|
||||
switch (model) {
|
||||
@@ -517,10 +528,6 @@ export function parseUserSpecifiedModel(
|
||||
|
||||
if (isModelAlias(modelString)) {
|
||||
switch (modelString) {
|
||||
case 'codexplan':
|
||||
return modelInputTrimmed
|
||||
case 'codexspark':
|
||||
return modelInputTrimmed
|
||||
case 'opusplan':
|
||||
return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode
|
||||
case 'sonnet':
|
||||
@@ -535,6 +542,14 @@ export function parseUserSpecifiedModel(
|
||||
}
|
||||
}
|
||||
|
||||
// Handle Codex aliases - map to actual model names
|
||||
if (modelString === 'codexplan') {
|
||||
return 'gpt-5.4'
|
||||
}
|
||||
if (modelString === 'codexspark') {
|
||||
return 'gpt-5.3-codex-spark'
|
||||
}
|
||||
|
||||
// Opus 4/4.1 are no longer available on the first-party API (same as
|
||||
// Claude.ai) — silently remap to the current Opus default. The 'opus'
|
||||
// alias already resolves to 4.6, so the only users on these explicit
|
||||
|
||||
@@ -268,20 +268,65 @@ function getOpusPlanOption(): ModelOption {
|
||||
|
||||
function getCodexPlanOption(): ModelOption {
|
||||
return {
|
||||
value: 'codexplan',
|
||||
label: 'Codex Plan',
|
||||
value: 'gpt-5.4',
|
||||
label: 'gpt-5.4',
|
||||
description: 'GPT-5.4 on the Codex backend with high reasoning',
|
||||
}
|
||||
}
|
||||
|
||||
function getCodexSparkOption(): ModelOption {
|
||||
return {
|
||||
value: 'codexspark',
|
||||
label: 'Codex Spark',
|
||||
value: 'gpt-5.3-codex-spark',
|
||||
label: 'gpt-5.3-codex-spark',
|
||||
description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops',
|
||||
}
|
||||
}
|
||||
|
||||
function getCodexModelOptions(): ModelOption[] {
|
||||
return [
|
||||
{
|
||||
value: 'gpt-5.4',
|
||||
label: 'gpt-5.4',
|
||||
description: 'GPT-5.4 with high reasoning',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.3-codex',
|
||||
label: 'gpt-5.3-codex',
|
||||
description: 'GPT-5.3 Codex with high reasoning',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.3-codex-spark',
|
||||
label: 'gpt-5.3-codex-spark',
|
||||
description: 'GPT-5.3 Codex Spark for fast tool loops',
|
||||
},
|
||||
{
|
||||
value: 'codexspark',
|
||||
label: 'codexspark',
|
||||
description: 'GPT-5.3 Codex Spark alias for fast tool loops',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.2-codex',
|
||||
label: 'gpt-5.2-codex',
|
||||
description: 'GPT-5.2 Codex with high reasoning',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.1-codex-max',
|
||||
label: 'gpt-5.1-codex-max',
|
||||
description: 'GPT-5.1 Codex Max for deep reasoning',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.1-codex-mini',
|
||||
label: 'gpt-5.1-codex-mini',
|
||||
description: 'GPT-5.1 Codex Mini - faster, cheaper',
|
||||
},
|
||||
{
|
||||
value: 'gpt-5.4-mini',
|
||||
label: 'gpt-5.4-mini',
|
||||
description: 'GPT-5.4 Mini - faster, cheaper',
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
|
||||
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
|
||||
function getModelOptionsBase(fastMode = false): ModelOption[] {
|
||||
@@ -360,8 +405,9 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
|
||||
// PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1
|
||||
const payg3pOptions = [getDefaultOptionForUser(fastMode)]
|
||||
|
||||
if (getAPIProvider() === 'openai') {
|
||||
payg3pOptions.push(getCodexPlanOption(), getCodexSparkOption())
|
||||
// Add Codex models for openai and codex providers
|
||||
if (getAPIProvider() === 'openai' || getAPIProvider() === 'codex') {
|
||||
payg3pOptions.push(...getCodexModelOptions())
|
||||
}
|
||||
|
||||
const customSonnet = getCustomSonnetOption()
|
||||
@@ -517,9 +563,9 @@ export function getModelOptions(fastMode = false): ModelOption[] {
|
||||
return filterModelOptionsByAllowlist(options)
|
||||
} else if (customModel === 'opusplan') {
|
||||
return filterModelOptionsByAllowlist([...options, getOpusPlanOption()])
|
||||
} else if (customModel === 'codexplan') {
|
||||
} else if (customModel === 'gpt-5.4') {
|
||||
return filterModelOptionsByAllowlist([...options, getCodexPlanOption()])
|
||||
} else if (customModel === 'codexspark') {
|
||||
} else if (customModel === 'gpt-5.3-codex-spark') {
|
||||
return filterModelOptionsByAllowlist([...options, getCodexSparkOption()])
|
||||
} else if (customModel === 'opus' && getAPIProvider() === 'firstParty') {
|
||||
return filterModelOptionsByAllowlist([
|
||||
@@ -554,11 +600,23 @@ export function getModelOptions(fastMode = false): ModelOption[] {
|
||||
*/
|
||||
function filterModelOptionsByAllowlist(options: ModelOption[]): ModelOption[] {
|
||||
const settings = getSettings_DEPRECATED() || {}
|
||||
if (!settings.availableModels) {
|
||||
return options // No restrictions
|
||||
}
|
||||
return options.filter(
|
||||
const filtered = !settings.availableModels
|
||||
? options // No restrictions
|
||||
: options.filter(
|
||||
opt =>
|
||||
opt.value === null || (opt.value !== null && isModelAllowed(opt.value)),
|
||||
)
|
||||
|
||||
// Select state uses option values as identity keys. If two entries share the
|
||||
// same value (e.g. provider-specific aliases collapsing to one model ID),
|
||||
// navigation/focus can become inconsistent and appear as duplicate rendering.
|
||||
const seen = new Set<string>()
|
||||
return filtered.filter(opt => {
|
||||
const key = String(opt.value)
|
||||
if (seen.has(key)) {
|
||||
return false
|
||||
}
|
||||
seen.add(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
@@ -23,9 +23,12 @@ export type ModelStrings = Record<ModelKey, string>
|
||||
const MODEL_KEYS = Object.keys(ALL_MODEL_CONFIGS) as ModelKey[]
|
||||
|
||||
function getBuiltinModelStrings(provider: APIProvider): ModelStrings {
|
||||
// Codex piggybacks on the OpenAI provider transport for Anthropic tier aliases.
|
||||
// Reuse OpenAI mappings so model string lookups never return undefined.
|
||||
const providerKey = provider === 'codex' ? 'openai' : provider
|
||||
const out = {} as ModelStrings
|
||||
for (const key of MODEL_KEYS) {
|
||||
out[key] = ALL_MODEL_CONFIGS[key][provider]
|
||||
out[key] = ALL_MODEL_CONFIGS[key][providerKey]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ export type APIProvider =
|
||||
| 'openai'
|
||||
| 'gemini'
|
||||
| 'github'
|
||||
| 'codex'
|
||||
|
||||
export function getAPIProvider(): APIProvider {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
@@ -16,7 +17,9 @@ export function getAPIProvider(): APIProvider {
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
? 'github'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
|
||||
? 'openai'
|
||||
? isCodexModel()
|
||||
? 'codex'
|
||||
: 'openai'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
|
||||
? 'bedrock'
|
||||
: isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)
|
||||
@@ -29,6 +32,19 @@ export function getAPIProvider(): APIProvider {
|
||||
export function usesAnthropicAccountFlow(): boolean {
|
||||
return getAPIProvider() === 'firstParty'
|
||||
}
|
||||
function isCodexModel(): boolean {
|
||||
const model = (process.env.OPENAI_MODEL || '').toLowerCase()
|
||||
return (
|
||||
model === 'codexplan' ||
|
||||
model === 'codexspark' ||
|
||||
model === 'gpt-5.4' ||
|
||||
model === 'gpt-5.3-codex' ||
|
||||
model === 'gpt-5.3-codex-spark' ||
|
||||
model === 'gpt-5.2-codex' ||
|
||||
model === 'gpt-5.1-codex-max' ||
|
||||
model === 'gpt-5.1-codex-mini'
|
||||
)
|
||||
}
|
||||
|
||||
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {
|
||||
return getAPIProvider() as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS
|
||||
|
||||
@@ -12,6 +12,7 @@ import { formatNumber } from './format.js';
|
||||
import { getIdeClientName, type IDEExtensionInstallationStatus, isJetBrainsIde, toIDEDisplayName } from './ide.js';
|
||||
import { getClaudeAiUserDefaultModelDescription, modelDisplayString } from './model/model.js';
|
||||
import { getAPIProvider } from './model/providers.js';
|
||||
import { resolveProviderRequest } from '../services/api/providerConfig.js';
|
||||
import { getMTLSConfig } from './mtls.js';
|
||||
import { checkInstall } from './nativeInstaller/index.js';
|
||||
import { getProxyUrl } from './proxy.js';
|
||||
@@ -247,6 +248,7 @@ export function buildAPIProviderProperties(): Property[] {
|
||||
vertex: 'Google Vertex AI',
|
||||
foundry: 'Microsoft Foundry',
|
||||
openai: 'OpenAI-compatible',
|
||||
codex: 'Codex',
|
||||
gemini: 'Google Gemini',
|
||||
}[apiProvider];
|
||||
properties.push({
|
||||
@@ -333,9 +335,48 @@ export function buildAPIProviderProperties(): Property[] {
|
||||
}
|
||||
const openaiModel = process.env.OPENAI_MODEL;
|
||||
if (openaiModel) {
|
||||
// Build display model string with resolved model + reasoning effort
|
||||
let modelDisplay = openaiModel;
|
||||
const resolved = resolveProviderRequest({ model: openaiModel });
|
||||
const resolvedModel = resolved.resolvedModel;
|
||||
const reasoningEffort = resolved.reasoning?.effort;
|
||||
if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
|
||||
// Show resolved model name
|
||||
modelDisplay = resolvedModel;
|
||||
}
|
||||
if (reasoningEffort) {
|
||||
modelDisplay = `${modelDisplay} (${reasoningEffort})`;
|
||||
}
|
||||
properties.push({
|
||||
label: 'Model',
|
||||
value: redactSecretValueForDisplay(openaiModel, process.env) ?? openaiModel
|
||||
value: redactSecretValueForDisplay(modelDisplay, process.env) ?? modelDisplay
|
||||
});
|
||||
}
|
||||
} else if (apiProvider === 'codex') {
|
||||
const codexBaseUrl = process.env.OPENAI_BASE_URL;
|
||||
if (codexBaseUrl) {
|
||||
properties.push({
|
||||
label: 'Codex base URL',
|
||||
value: redactSecretValueForDisplay(codexBaseUrl, process.env) ?? codexBaseUrl
|
||||
});
|
||||
}
|
||||
const openaiModel = process.env.OPENAI_MODEL;
|
||||
if (openaiModel) {
|
||||
// Build display model string with resolved model + reasoning effort
|
||||
let modelDisplay = openaiModel;
|
||||
const resolved = resolveProviderRequest({ model: openaiModel });
|
||||
const resolvedModel = resolved.resolvedModel;
|
||||
const reasoningEffort = resolved.reasoning?.effort;
|
||||
if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
|
||||
// Show resolved model name
|
||||
modelDisplay = resolvedModel;
|
||||
}
|
||||
if (reasoningEffort) {
|
||||
modelDisplay = `${modelDisplay} (${reasoningEffort})`;
|
||||
}
|
||||
properties.push({
|
||||
label: 'Model',
|
||||
value: redactSecretValueForDisplay(modelDisplay, process.env) ?? modelDisplay
|
||||
});
|
||||
}
|
||||
} else if (apiProvider === 'gemini') {
|
||||
|
||||
@@ -286,6 +286,25 @@ function createCommandSuggestionItem(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure suggestion IDs are unique for React keys and selection logic.
|
||||
* If duplicates exist, append a stable numeric suffix to subsequent entries.
|
||||
*/
|
||||
function ensureUniqueSuggestionIds(items: SuggestionItem[]): SuggestionItem[] {
|
||||
const counts = new Map<string, number>()
|
||||
return items.map(item => {
|
||||
const seen = counts.get(item.id) ?? 0
|
||||
counts.set(item.id, seen + 1)
|
||||
if (seen === 0) {
|
||||
return item
|
||||
}
|
||||
return {
|
||||
...item,
|
||||
id: `${item.id}#${seen + 1}`,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate command suggestions based on input
|
||||
*/
|
||||
@@ -369,14 +388,14 @@ export function generateCommandSuggestions(
|
||||
|
||||
// Combine with built-in commands prioritized after recently used,
|
||||
// so they remain visible even when many skills are installed
|
||||
return [
|
||||
return ensureUniqueSuggestionIds([
|
||||
...recentlyUsed,
|
||||
...builtinCommands,
|
||||
...userCommands,
|
||||
...projectCommands,
|
||||
...policyCommands,
|
||||
...otherCommands,
|
||||
].map(cmd => createCommandSuggestionItem(cmd))
|
||||
].map(cmd => createCommandSuggestionItem(cmd)))
|
||||
}
|
||||
|
||||
// The Fuse index filters isHidden at build time and is keyed on the
|
||||
@@ -491,10 +510,13 @@ export function generateCommandSuggestions(
|
||||
if (hiddenExact) {
|
||||
const hiddenId = getCommandId(hiddenExact)
|
||||
if (!fuseSuggestions.some(s => s.id === hiddenId)) {
|
||||
return [createCommandSuggestionItem(hiddenExact), ...fuseSuggestions]
|
||||
return ensureUniqueSuggestionIds([
|
||||
createCommandSuggestionItem(hiddenExact),
|
||||
...fuseSuggestions,
|
||||
])
|
||||
}
|
||||
}
|
||||
return fuseSuggestions
|
||||
return ensureUniqueSuggestionIds(fuseSuggestions)
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user