feat: Refactor model handling & reasoning effort across navigation, typeahead, OpenAI/Codex providers, API shim, configs, and UI (adds EffortPicker, new mappings/options, unique suggestion IDs, effort utilities; removes deprecated aliases; defaults Codex to gpt-5.4; improves selection logic and status display)

This commit is contained in:
Meet Patel
2026-04-02 17:17:14 +05:30
parent 9f48bb4431
commit 8f50f17674
15 changed files with 612 additions and 139 deletions

View File

@@ -17,6 +17,14 @@ export const EFFORT_LEVELS = [
'max',
] as const satisfies readonly EffortLevel[]
export const OPENAI_EFFORT_LEVELS = [
'low',
'medium',
'high',
'xhigh',
] as const
export type OpenAIEffortLevel = typeof OPENAI_EFFORT_LEVELS[number]
export type EffortValue = EffortLevel | number
// @[MODEL LAUNCH]: Add the new model to the allowlist if it supports the effort parameter.
@@ -68,6 +76,46 @@ export function isEffortLevel(value: string): value is EffortLevel {
return (EFFORT_LEVELS as readonly string[]).includes(value)
}
export function isOpenAIEffortLevel(value: string): value is OpenAIEffortLevel {
return (OPENAI_EFFORT_LEVELS as readonly string[]).includes(value)
}
export function modelUsesOpenAIEffort(model: string): boolean {
const provider = getAPIProvider()
return provider === 'openai' || provider === 'codex'
}
export function getAvailableEffortLevels(model: string): EffortLevel[] | OpenAIEffortLevel[] {
if (modelUsesOpenAIEffort(model)) {
return [...OPENAI_EFFORT_LEVELS] as OpenAIEffortLevel[]
}
const levels: EffortLevel[] = ['low', 'medium', 'high']
if (modelSupportsMaxEffort(model)) {
levels.push('max')
}
return levels
}
export function getEffortLevelLabel(level: EffortLevel | OpenAIEffortLevel): string {
if (level === 'xhigh') return 'Extra High'
if (level === 'max') return 'Max'
return capitalize(level)
}
export function openAIEffortToStandard(level: OpenAIEffortLevel): EffortLevel {
if (level === 'xhigh') return 'max'
return level
}
export function standardEffortToOpenAI(level: EffortLevel): OpenAIEffortLevel {
if (level === 'max') return 'xhigh'
return level as OpenAIEffortLevel
}
function capitalize(s: string): string {
return s.charAt(0).toUpperCase() + s.slice(1)
}
export function parseEffortValue(value: unknown): EffortValue | undefined {
if (value === undefined || value === null || value === '') {
return undefined
@@ -221,7 +269,7 @@ export function convertEffortValueToLevel(value: EffortValue): EffortLevel {
* @param level The effort level to describe
* @returns Human-readable description
*/
export function getEffortLevelDescription(level: EffortLevel): string {
export function getEffortLevelDescription(level: EffortLevel | OpenAIEffortLevel): string {
switch (level) {
case 'low':
return 'Quick, straightforward implementation with minimal overhead'
@@ -231,6 +279,8 @@ export function getEffortLevelDescription(level: EffortLevel): string {
return 'Comprehensive implementation with extensive testing and documentation'
case 'max':
return 'Maximum capability with deepest reasoning (Opus 4.6 only)'
case 'xhigh':
return 'Extra high reasoning effort for complex tasks (OpenAI/Codex)'
}
}

View File

@@ -6,8 +6,6 @@ export const MODEL_ALIASES = [
'sonnet[1m]',
'opus[1m]',
'opusplan',
'codexplan',
'codexspark',
] as const
export type ModelAlias = (typeof MODEL_ALIASES)[number]

View File

@@ -123,6 +123,10 @@ export function getDefaultOpusModel(): ModelName {
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o'
}
// Codex provider: use user-specified model or default to gpt-5.4
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4'
}
// 3P providers (Bedrock, Vertex, Foundry) — kept as a separate branch
// even when values match, since 3P availability lags firstParty and
// these will diverge again at the next model launch.
@@ -145,6 +149,10 @@ export function getDefaultSonnetModel(): ModelName {
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o'
}
// Codex provider
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4'
}
// Default to Sonnet 4.5 for 3P since they may not have 4.6 yet
if (getAPIProvider() !== 'firstParty') {
return getModelStrings().sonnet45
@@ -165,6 +173,10 @@ export function getDefaultHaikuModel(): ModelName {
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o-mini'
}
// Codex provider
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4'
}
// Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex)
return getModelStrings().haiku45
@@ -217,6 +229,10 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o'
}
// Codex provider: always use the configured Codex model (default gpt-5.4)
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.4'
}
// Ants default to defaultModel from flag config, or Opus 1M if not configured
if (process.env.USER_TYPE === 'ant') {
@@ -343,12 +359,6 @@ export function renderDefaultModelSetting(
if (setting === 'opusplan') {
return 'Opus 4.6 in plan mode, else Sonnet 4.6'
}
if (setting === 'codexplan') {
return 'Codex Plan (GPT-5.4 high reasoning)'
}
if (setting === 'codexspark') {
return 'Codex Spark (GPT-5.3 Codex Spark)'
}
return renderModelName(parseUserSpecifiedModel(setting))
}
@@ -383,11 +393,12 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
if (setting === 'opusplan') {
return 'Opus Plan'
}
// Handle Codex models - show actual model name + resolved model
if (setting === 'codexplan') {
return 'Codex Plan'
return 'codexplan (gpt-5.4)'
}
if (setting === 'codexspark') {
return 'Codex Spark'
return 'codexspark (gpt-5.3-codex-spark)'
}
if (isModelAlias(setting)) {
return capitalize(setting)
@@ -401,8 +412,8 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
* if the model is not recognized as a public model.
*/
export function getPublicModelDisplayName(model: ModelName): string | null {
// For OpenAI/Gemini providers, show the actual model name not a Claude alias
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini') {
// For OpenAI/Gemini/Codex providers, show the actual model name not a Claude alias
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex') {
return null
}
switch (model) {
@@ -517,10 +528,6 @@ export function parseUserSpecifiedModel(
if (isModelAlias(modelString)) {
switch (modelString) {
case 'codexplan':
return modelInputTrimmed
case 'codexspark':
return modelInputTrimmed
case 'opusplan':
return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode
case 'sonnet':
@@ -535,6 +542,14 @@ export function parseUserSpecifiedModel(
}
}
// Handle Codex aliases - map to actual model names
if (modelString === 'codexplan') {
return 'gpt-5.4'
}
if (modelString === 'codexspark') {
return 'gpt-5.3-codex-spark'
}
// Opus 4/4.1 are no longer available on the first-party API (same as
// Claude.ai) — silently remap to the current Opus default. The 'opus'
// alias already resolves to 4.6, so the only users on these explicit

View File

@@ -268,20 +268,65 @@ function getOpusPlanOption(): ModelOption {
function getCodexPlanOption(): ModelOption {
return {
value: 'codexplan',
label: 'Codex Plan',
value: 'gpt-5.4',
label: 'gpt-5.4',
description: 'GPT-5.4 on the Codex backend with high reasoning',
}
}
function getCodexSparkOption(): ModelOption {
return {
value: 'codexspark',
label: 'Codex Spark',
value: 'gpt-5.3-codex-spark',
label: 'gpt-5.3-codex-spark',
description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops',
}
}
function getCodexModelOptions(): ModelOption[] {
return [
{
value: 'gpt-5.4',
label: 'gpt-5.4',
description: 'GPT-5.4 with high reasoning',
},
{
value: 'gpt-5.3-codex',
label: 'gpt-5.3-codex',
description: 'GPT-5.3 Codex with high reasoning',
},
{
value: 'gpt-5.3-codex-spark',
label: 'gpt-5.3-codex-spark',
description: 'GPT-5.3 Codex Spark for fast tool loops',
},
{
value: 'codexspark',
label: 'codexspark',
description: 'GPT-5.3 Codex Spark alias for fast tool loops',
},
{
value: 'gpt-5.2-codex',
label: 'gpt-5.2-codex',
description: 'GPT-5.2 Codex with high reasoning',
},
{
value: 'gpt-5.1-codex-max',
label: 'gpt-5.1-codex-max',
description: 'GPT-5.1 Codex Max for deep reasoning',
},
{
value: 'gpt-5.1-codex-mini',
label: 'gpt-5.1-codex-mini',
description: 'GPT-5.1 Codex Mini - faster, cheaper',
},
{
value: 'gpt-5.4-mini',
label: 'gpt-5.4-mini',
description: 'GPT-5.4 Mini - faster, cheaper',
},
]
}
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
function getModelOptionsBase(fastMode = false): ModelOption[] {
@@ -360,8 +405,9 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
// PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1
const payg3pOptions = [getDefaultOptionForUser(fastMode)]
if (getAPIProvider() === 'openai') {
payg3pOptions.push(getCodexPlanOption(), getCodexSparkOption())
// Add Codex models for openai and codex providers
if (getAPIProvider() === 'openai' || getAPIProvider() === 'codex') {
payg3pOptions.push(...getCodexModelOptions())
}
const customSonnet = getCustomSonnetOption()
@@ -517,9 +563,9 @@ export function getModelOptions(fastMode = false): ModelOption[] {
return filterModelOptionsByAllowlist(options)
} else if (customModel === 'opusplan') {
return filterModelOptionsByAllowlist([...options, getOpusPlanOption()])
} else if (customModel === 'codexplan') {
} else if (customModel === 'gpt-5.4') {
return filterModelOptionsByAllowlist([...options, getCodexPlanOption()])
} else if (customModel === 'codexspark') {
} else if (customModel === 'gpt-5.3-codex-spark') {
return filterModelOptionsByAllowlist([...options, getCodexSparkOption()])
} else if (customModel === 'opus' && getAPIProvider() === 'firstParty') {
return filterModelOptionsByAllowlist([
@@ -554,11 +600,23 @@ export function getModelOptions(fastMode = false): ModelOption[] {
*/
function filterModelOptionsByAllowlist(options: ModelOption[]): ModelOption[] {
const settings = getSettings_DEPRECATED() || {}
if (!settings.availableModels) {
return options // No restrictions
}
return options.filter(
const filtered = !settings.availableModels
? options // No restrictions
: options.filter(
opt =>
opt.value === null || (opt.value !== null && isModelAllowed(opt.value)),
)
// Select state uses option values as identity keys. If two entries share the
// same value (e.g. provider-specific aliases collapsing to one model ID),
// navigation/focus can become inconsistent and appear as duplicate rendering.
const seen = new Set<string>()
return filtered.filter(opt => {
const key = String(opt.value)
if (seen.has(key)) {
return false
}
seen.add(key)
return true
})
}

View File

@@ -23,9 +23,12 @@ export type ModelStrings = Record<ModelKey, string>
const MODEL_KEYS = Object.keys(ALL_MODEL_CONFIGS) as ModelKey[]
function getBuiltinModelStrings(provider: APIProvider): ModelStrings {
// Codex piggybacks on the OpenAI provider transport for Anthropic tier aliases.
// Reuse OpenAI mappings so model string lookups never return undefined.
const providerKey = provider === 'codex' ? 'openai' : provider
const out = {} as ModelStrings
for (const key of MODEL_KEYS) {
out[key] = ALL_MODEL_CONFIGS[key][provider]
out[key] = ALL_MODEL_CONFIGS[key][providerKey]
}
return out
}

View File

@@ -9,6 +9,7 @@ export type APIProvider =
| 'openai'
| 'gemini'
| 'github'
| 'codex'
export function getAPIProvider(): APIProvider {
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
@@ -16,7 +17,9 @@ export function getAPIProvider(): APIProvider {
: isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
? 'github'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
? 'openai'
? isCodexModel()
? 'codex'
: 'openai'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
? 'bedrock'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)
@@ -29,6 +32,19 @@ export function getAPIProvider(): APIProvider {
export function usesAnthropicAccountFlow(): boolean {
return getAPIProvider() === 'firstParty'
}
function isCodexModel(): boolean {
const model = (process.env.OPENAI_MODEL || '').toLowerCase()
return (
model === 'codexplan' ||
model === 'codexspark' ||
model === 'gpt-5.4' ||
model === 'gpt-5.3-codex' ||
model === 'gpt-5.3-codex-spark' ||
model === 'gpt-5.2-codex' ||
model === 'gpt-5.1-codex-max' ||
model === 'gpt-5.1-codex-mini'
)
}
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {
return getAPIProvider() as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS

View File

@@ -12,6 +12,7 @@ import { formatNumber } from './format.js';
import { getIdeClientName, type IDEExtensionInstallationStatus, isJetBrainsIde, toIDEDisplayName } from './ide.js';
import { getClaudeAiUserDefaultModelDescription, modelDisplayString } from './model/model.js';
import { getAPIProvider } from './model/providers.js';
import { resolveProviderRequest } from '../services/api/providerConfig.js';
import { getMTLSConfig } from './mtls.js';
import { checkInstall } from './nativeInstaller/index.js';
import { getProxyUrl } from './proxy.js';
@@ -247,6 +248,7 @@ export function buildAPIProviderProperties(): Property[] {
vertex: 'Google Vertex AI',
foundry: 'Microsoft Foundry',
openai: 'OpenAI-compatible',
codex: 'Codex',
gemini: 'Google Gemini',
}[apiProvider];
properties.push({
@@ -325,34 +327,73 @@ export function buildAPIProviderProperties(): Property[] {
}
} else if (apiProvider === 'openai') {
const openaiBaseUrl = process.env.OPENAI_BASE_URL;
if (openaiBaseUrl) {
properties.push({
label: 'OpenAI base URL',
value: redactSecretValueForDisplay(openaiBaseUrl, process.env) ?? openaiBaseUrl
});
if (openaiBaseUrl) {
properties.push({
label: 'OpenAI base URL',
value: redactSecretValueForDisplay(openaiBaseUrl, process.env) ?? openaiBaseUrl
});
}
const openaiModel = process.env.OPENAI_MODEL;
if (openaiModel) {
// Build display model string with resolved model + reasoning effort
let modelDisplay = openaiModel;
const resolved = resolveProviderRequest({ model: openaiModel });
const resolvedModel = resolved.resolvedModel;
const reasoningEffort = resolved.reasoning?.effort;
if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
// Show resolved model name
modelDisplay = resolvedModel;
}
const openaiModel = process.env.OPENAI_MODEL;
if (openaiModel) {
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(openaiModel, process.env) ?? openaiModel
});
if (reasoningEffort) {
modelDisplay = `${modelDisplay} (${reasoningEffort})`;
}
} else if (apiProvider === 'gemini') {
const geminiBaseUrl = process.env.GEMINI_BASE_URL;
if (geminiBaseUrl) {
properties.push({
label: 'Gemini base URL',
value: redactSecretValueForDisplay(geminiBaseUrl, process.env) ?? geminiBaseUrl
});
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(modelDisplay, process.env) ?? modelDisplay
});
}
} else if (apiProvider === 'codex') {
const codexBaseUrl = process.env.OPENAI_BASE_URL;
if (codexBaseUrl) {
properties.push({
label: 'Codex base URL',
value: redactSecretValueForDisplay(codexBaseUrl, process.env) ?? codexBaseUrl
});
}
const openaiModel = process.env.OPENAI_MODEL;
if (openaiModel) {
// Build display model string with resolved model + reasoning effort
let modelDisplay = openaiModel;
const resolved = resolveProviderRequest({ model: openaiModel });
const resolvedModel = resolved.resolvedModel;
const reasoningEffort = resolved.reasoning?.effort;
if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
// Show resolved model name
modelDisplay = resolvedModel;
}
const geminiModel = process.env.GEMINI_MODEL;
if (geminiModel) {
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(geminiModel, process.env) ?? geminiModel
});
if (reasoningEffort) {
modelDisplay = `${modelDisplay} (${reasoningEffort})`;
}
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(modelDisplay, process.env) ?? modelDisplay
});
}
} else if (apiProvider === 'gemini') {
const geminiBaseUrl = process.env.GEMINI_BASE_URL;
if (geminiBaseUrl) {
properties.push({
label: 'Gemini base URL',
value: redactSecretValueForDisplay(geminiBaseUrl, process.env) ?? geminiBaseUrl
});
}
const geminiModel = process.env.GEMINI_MODEL;
if (geminiModel) {
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(geminiModel, process.env) ?? geminiModel
});
}
}
const proxyUrl = getProxyUrl();
if (proxyUrl) {

View File

@@ -286,6 +286,25 @@ function createCommandSuggestionItem(
}
}
/**
* Ensure suggestion IDs are unique for React keys and selection logic.
* If duplicates exist, append a stable numeric suffix to subsequent entries.
*/
function ensureUniqueSuggestionIds(items: SuggestionItem[]): SuggestionItem[] {
const counts = new Map<string, number>()
return items.map(item => {
const seen = counts.get(item.id) ?? 0
counts.set(item.id, seen + 1)
if (seen === 0) {
return item
}
return {
...item,
id: `${item.id}#${seen + 1}`,
}
})
}
/**
* Generate command suggestions based on input
*/
@@ -369,14 +388,14 @@ export function generateCommandSuggestions(
// Combine with built-in commands prioritized after recently used,
// so they remain visible even when many skills are installed
return [
return ensureUniqueSuggestionIds([
...recentlyUsed,
...builtinCommands,
...userCommands,
...projectCommands,
...policyCommands,
...otherCommands,
].map(cmd => createCommandSuggestionItem(cmd))
].map(cmd => createCommandSuggestionItem(cmd)))
}
// The Fuse index filters isHidden at build time and is keyed on the
@@ -491,10 +510,13 @@ export function generateCommandSuggestions(
if (hiddenExact) {
const hiddenId = getCommandId(hiddenExact)
if (!fuseSuggestions.some(s => s.id === hiddenId)) {
return [createCommandSuggestionItem(hiddenExact), ...fuseSuggestions]
return ensureUniqueSuggestionIds([
createCommandSuggestionItem(hiddenExact),
...fuseSuggestions,
])
}
}
return fuseSuggestions
return ensureUniqueSuggestionIds(fuseSuggestions)
}
/**