diff --git a/src/commands/effort/effort.tsx b/src/commands/effort/effort.tsx
index 0dadd606..1cbc83d1 100644
--- a/src/commands/effort/effort.tsx
+++ b/src/commands/effort/effort.tsx
@@ -4,7 +4,8 @@ import { useMainLoopModel } from '../../hooks/useMainLoopModel.js';
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
import { useAppState, useSetAppState } from '../../state/AppState.js';
import type { LocalJSXCommandOnDone } from '../../types/command.js';
-import { type EffortValue, getDisplayedEffortLevel, getEffortEnvOverride, getEffortValueDescription, isEffortLevel, toPersistableEffort } from '../../utils/effort.js';
+import { type EffortValue, getDisplayedEffortLevel, getEffortEnvOverride, getEffortValueDescription, isEffortLevel, isOpenAIEffortLevel, modelUsesOpenAIEffort, toPersistableEffort } from '../../utils/effort.js';
+import { EffortPicker } from '../../components/EffortPicker.js';
import { updateSettingsForSource } from '../../utils/settings/settings.js';
const COMMON_HELP_ARGS = ['help', '-h', '--help'];
type EffortCommandResult = {
@@ -109,12 +110,15 @@ export function executeEffort(args: string): EffortCommandResult {
if (normalized === 'auto' || normalized === 'unset') {
return unsetEffortLevel();
}
- if (!isEffortLevel(normalized)) {
- return {
- message: `Invalid argument: ${args}. Valid options are: low, medium, high, max, auto`
- };
+ if (isEffortLevel(normalized)) {
+ return setEffortValue(normalized);
}
- return setEffortValue(normalized);
+ if (isOpenAIEffortLevel(normalized)) {
+ return setEffortValue(normalized);
+ }
+ return {
+ message: `Invalid argument: ${args}. Valid options are: low, medium, high, max, xhigh, auto`
+ };
}
function ShowCurrentEffort(t0) {
const {
@@ -174,10 +178,44 @@ export async function call(onDone: LocalJSXCommandOnDone, _context: unknown, arg
onDone('Usage: /effort [low|medium|high|max|auto]\n\nEffort levels:\n- low: Quick, straightforward implementation\n- medium: Balanced approach with standard testing\n- high: Comprehensive implementation with extensive testing\n- max: Maximum capability with deepest reasoning (Opus 4.6 only)\n- auto: Use the default effort level for your model');
return;
}
- if (!args || args === 'current' || args === 'status') {
+ if (args === 'current' || args === 'status') {
return ;
}
+ if (!args) {
+ return ;
+ }
const result = executeEffort(args);
return ;
}
+
+function EffortPickerWrapper({ onDone }: { onDone: LocalJSXCommandOnDone }) {
+ const setAppState = useSetAppState();
+ const model = useMainLoopModel();
+ const usesOpenAIEffort = modelUsesOpenAIEffort(model);
+
+ function handleSelect(effort: EffortValue | undefined) {
+ const persistable = toPersistableEffort(effort);
+ if (persistable !== undefined) {
+ updateSettingsForSource('userSettings', {
+ effortLevel: persistable
+ });
+ }
+ logEvent('tengu_effort_command', {
+ effort: (effort ?? 'auto') as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS
+ });
+ setAppState(prev => ({
+ ...prev,
+ effortValue: effort
+ }));
+ const description = effort ? getEffortValueDescription(effort) : 'Use default effort level for your model';
+ const suffix = persistable !== undefined ? '' : ' (this session only)';
+ onDone(`Set effort level to ${effort ?? 'auto'}${suffix}: ${description}`);
+ }
+
+ function handleCancel() {
+ onDone('Cancelled');
+ }
+
+ return ;
+}
//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"names":["React","useMainLoopModel","AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS","logEvent","useAppState","useSetAppState","LocalJSXCommandOnDone","EffortValue","getDisplayedEffortLevel","getEffortEnvOverride","getEffortValueDescription","isEffortLevel","toPersistableEffort","updateSettingsForSource","COMMON_HELP_ARGS","EffortCommandResult","message","effortUpdate","value","setEffortValue","effortValue","persistable","undefined","result","effortLevel","error","effort","envOverride","envRaw","process","env","CLAUDE_CODE_EFFORT_LEVEL","description","suffix","showCurrentEffort","appStateEffort","model","effectiveValue","level","unsetEffortLevel","executeEffort","args","normalized","toLowerCase","ShowCurrentEffort","t0","onDone","_temp","s","ApplyEffortAndClose","$","_c","setAppState","t1","t2","prev","useEffect","call","_context","Promise","ReactNode","trim","includes"],"sources":["effort.tsx"],"sourcesContent":["import * as React from 'react'\nimport { useMainLoopModel } from '../../hooks/useMainLoopModel.js'\nimport {\n  type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  logEvent,\n} from '../../services/analytics/index.js'\nimport { useAppState, useSetAppState } from '../../state/AppState.js'\nimport type { LocalJSXCommandOnDone } from '../../types/command.js'\nimport {\n  type EffortValue,\n  getDisplayedEffortLevel,\n  getEffortEnvOverride,\n  getEffortValueDescription,\n  isEffortLevel,\n  toPersistableEffort,\n} from '../../utils/effort.js'\nimport { updateSettingsForSource } from '../../utils/settings/settings.js'\n\nconst COMMON_HELP_ARGS = ['help', '-h', '--help']\n\ntype EffortCommandResult = {\n  message: string\n  effortUpdate?: { value: EffortValue | undefined }\n}\n\nfunction setEffortValue(effortValue: EffortValue): EffortCommandResult {\n  const persistable = toPersistableEffort(effortValue)\n  if (persistable !== undefined) {\n    const result = updateSettingsForSource('userSettings', {\n      effortLevel: persistable,\n    })\n    if (result.error) {\n      return {\n        message: `Failed to set effort level: ${result.error.message}`,\n      }\n    }\n  }\n  logEvent('tengu_effort_command', {\n    effort:\n      effortValue as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  })\n\n  // Env var wins at resolveAppliedEffort time. Only flag it when it actually\n  // conflicts — if env matches what the user just asked for, the outcome is\n  // the same, so \"Set effort to X\" is true and the note is noise.\n  const envOverride = getEffortEnvOverride()\n  if (envOverride !== undefined && envOverride !== effortValue) {\n    const envRaw = process.env.CLAUDE_CODE_EFFORT_LEVEL\n    if (persistable === undefined) {\n      return {\n        message: `Not applied: CLAUDE_CODE_EFFORT_LEVEL=${envRaw} overrides effort this session, and ${effortValue} is session-only (nothing saved)`,\n        effortUpdate: { value: effortValue },\n      }\n    }\n    return {\n      message: `CLAUDE_CODE_EFFORT_LEVEL=${envRaw} overrides this session — clear it and ${effortValue} takes over`,\n      effortUpdate: { value: effortValue },\n    }\n  }\n\n  const description = getEffortValueDescription(effortValue)\n  const suffix = persistable !== undefined ? '' : ' (this session only)'\n  return {\n    message: `Set effort level to ${effortValue}${suffix}: ${description}`,\n    effortUpdate: { value: effortValue },\n  }\n}\n\nexport function showCurrentEffort(\n  appStateEffort: EffortValue | undefined,\n  model: string,\n): EffortCommandResult {\n  const envOverride = getEffortEnvOverride()\n  const effectiveValue =\n    envOverride === null ? undefined : (envOverride ?? appStateEffort)\n  if (effectiveValue === undefined) {\n    const level = getDisplayedEffortLevel(model, appStateEffort)\n    return { message: `Effort level: auto (currently ${level})` }\n  }\n  const description = getEffortValueDescription(effectiveValue)\n  return {\n    message: `Current effort level: ${effectiveValue} (${description})`,\n  }\n}\n\nfunction unsetEffortLevel(): EffortCommandResult {\n  const result = updateSettingsForSource('userSettings', {\n    effortLevel: undefined,\n  })\n  if (result.error) {\n    return {\n      message: `Failed to set effort level: ${result.error.message}`,\n    }\n  }\n  logEvent('tengu_effort_command', {\n    effort:\n      'auto' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,\n  })\n  // env=auto/unset (null) matches what /effort auto asks for, so only warn\n  // when env is pinning a specific level that will keep overriding.\n  const envOverride = getEffortEnvOverride()\n  if (envOverride !== undefined && envOverride !== null) {\n    const envRaw = process.env.CLAUDE_CODE_EFFORT_LEVEL\n    return {\n      message: `Cleared effort from settings, but CLAUDE_CODE_EFFORT_LEVEL=${envRaw} still controls this session`,\n      effortUpdate: { value: undefined },\n    }\n  }\n  return {\n    message: 'Effort level set to auto',\n    effortUpdate: { value: undefined },\n  }\n}\n\nexport function executeEffort(args: string): EffortCommandResult {\n  const normalized = args.toLowerCase()\n  if (normalized === 'auto' || normalized === 'unset') {\n    return unsetEffortLevel()\n  }\n\n  if (!isEffortLevel(normalized)) {\n    return {\n      message: `Invalid argument: ${args}. Valid options are: low, medium, high, max, auto`,\n    }\n  }\n\n  return setEffortValue(normalized)\n}\n\nfunction ShowCurrentEffort({\n  onDone,\n}: {\n  onDone: (result: string) => void\n}): React.ReactNode {\n  const effortValue = useAppState(s => s.effortValue)\n  const model = useMainLoopModel()\n  const { message } = showCurrentEffort(effortValue, model)\n  onDone(message)\n  return null\n}\n\nfunction ApplyEffortAndClose({\n  result,\n  onDone,\n}: {\n  result: EffortCommandResult\n  onDone: (result: string) => void\n}): React.ReactNode {\n  const setAppState = useSetAppState()\n  const { effortUpdate, message } = result\n  React.useEffect(() => {\n    if (effortUpdate) {\n      setAppState(prev => ({\n        ...prev,\n        effortValue: effortUpdate.value,\n      }))\n    }\n    onDone(message)\n  }, [setAppState, effortUpdate, message, onDone])\n  return null\n}\n\nexport async function call(\n  onDone: LocalJSXCommandOnDone,\n  _context: unknown,\n  args?: string,\n): Promise<React.ReactNode> {\n  args = args?.trim() || ''\n\n  if (COMMON_HELP_ARGS.includes(args)) {\n    onDone(\n      'Usage: /effort [low|medium|high|max|auto]\\n\\nEffort levels:\\n- low: Quick, straightforward implementation\\n- medium: Balanced approach with standard testing\\n- high: Comprehensive implementation with extensive testing\\n- max: Maximum capability with deepest reasoning (Opus 4.6 only)\\n- auto: Use the default effort level for your model',\n    )\n    return\n  }\n\n  if (!args || args === 'current' || args === 'status') {\n    return <ShowCurrentEffort onDone={onDone} />\n  }\n\n  const result = executeEffort(args)\n  return <ApplyEffortAndClose result={result} onDone={onDone} />\n}\n"],"mappings":";AAAA,OAAO,KAAKA,KAAK,MAAM,OAAO;AAC9B,SAASC,gBAAgB,QAAQ,iCAAiC;AAClE,SACE,KAAKC,0DAA0D,EAC/DC,QAAQ,QACH,mCAAmC;AAC1C,SAASC,WAAW,EAAEC,cAAc,QAAQ,yBAAyB;AACrE,cAAcC,qBAAqB,QAAQ,wBAAwB;AACnE,SACE,KAAKC,WAAW,EAChBC,uBAAuB,EACvBC,oBAAoB,EACpBC,yBAAyB,EACzBC,aAAa,EACbC,mBAAmB,QACd,uBAAuB;AAC9B,SAASC,uBAAuB,QAAQ,kCAAkC;AAE1E,MAAMC,gBAAgB,GAAG,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,CAAC;AAEjD,KAAKC,mBAAmB,GAAG;EACzBC,OAAO,EAAE,MAAM;EACfC,YAAY,CAAC,EAAE;IAAEC,KAAK,EAAEX,WAAW,GAAG,SAAS;EAAC,CAAC;AACnD,CAAC;AAED,SAASY,cAAcA,CAACC,WAAW,EAAEb,WAAW,CAAC,EAAEQ,mBAAmB,CAAC;EACrE,MAAMM,WAAW,GAAGT,mBAAmB,CAACQ,WAAW,CAAC;EACpD,IAAIC,WAAW,KAAKC,SAAS,EAAE;IAC7B,MAAMC,MAAM,GAAGV,uBAAuB,CAAC,cAAc,EAAE;MACrDW,WAAW,EAAEH;IACf,CAAC,CAAC;IACF,IAAIE,MAAM,CAACE,KAAK,EAAE;MAChB,OAAO;QACLT,OAAO,EAAE,+BAA+BO,MAAM,CAACE,KAAK,CAACT,OAAO;MAC9D,CAAC;IACH;EACF;EACAb,QAAQ,CAAC,sBAAsB,EAAE;IAC/BuB,MAAM,EACJN,WAAW,IAAIlB;EACnB,CAAC,CAAC;;EAEF;EACA;EACA;EACA,MAAMyB,WAAW,GAAGlB,oBAAoB,CAAC,CAAC;EAC1C,IAAIkB,WAAW,KAAKL,SAAS,IAAIK,WAAW,KAAKP,WAAW,EAAE;IAC5D,MAAMQ,MAAM,GAAGC,OAAO,CAACC,GAAG,CAACC,wBAAwB;IACnD,IAAIV,WAAW,KAAKC,SAAS,EAAE;MAC7B,OAAO;QACLN,OAAO,EAAE,yCAAyCY,MAAM,uCAAuCR,WAAW,kCAAkC;QAC5IH,YAAY,EAAE;UAAEC,KAAK,EAAEE;QAAY;MACrC,CAAC;IACH;IACA,OAAO;MACLJ,OAAO,EAAE,4BAA4BY,MAAM,0CAA0CR,WAAW,aAAa;MAC7GH,YAAY,EAAE;QAAEC,KAAK,EAAEE;MAAY;IACrC,CAAC;EACH;EAEA,MAAMY,WAAW,GAAGtB,yBAAyB,CAACU,WAAW,CAAC;EAC1D,MAAMa,MAAM,GAAGZ,WAAW,KAAKC,SAAS,GAAG,EAAE,GAAG,sBAAsB;EACtE,OAAO;IACLN,OAAO,EAAE,uBAAuBI,WAAW,GAAGa,MAAM,KAAKD,WAAW,EAAE;IACtEf,YAAY,EAAE;MAAEC,KAAK,EAAEE;IAAY;EACrC,CAAC;AACH;AAEA,OAAO,SAASc,iBAAiBA,CAC/BC,cAAc,EAAE5B,WAAW,GAAG,SAAS,EACvC6B,KAAK,EAAE,MAAM,CACd,EAAErB,mBAAmB,CAAC;EACrB,MAAMY,WAAW,GAAGlB,oBAAoB,CAAC,CAAC;EAC1C,MAAM4B,cAAc,GAClBV,WAAW,KAAK,IAAI,GAAGL,SAAS,GAAIK,WAAW,IAAIQ,cAAe;EACpE,IAAIE,cAAc,KAAKf,SAAS,EAAE;IAChC,MAAMgB,KAAK,GAAG9B,uBAAuB,CAAC4B,KAAK,EAAED,cAAc,CAAC;IAC5D,OAAO;MAAEnB,OAAO,EAAE,iCAAiCsB,KAAK;IAAI,CAAC;EAC/D;EACA,MAAMN,WAAW,GAAGtB,yBAAyB,CAAC2B,cAAc,CAAC;EAC7D,OAAO;IACLrB,OAAO,EAAE,yBAAyBqB,cAAc,KAAKL,WAAW;EAClE,CAAC;AACH;AAEA,SAASO,gBAAgBA,CAAA,CAAE,EAAExB,mBAAmB,CAAC;EAC/C,MAAMQ,MAAM,GAAGV,uBAAuB,CAAC,cAAc,EAAE;IACrDW,WAAW,EAAEF;EACf,CAAC,CAAC;EACF,IAAIC,MAAM,CAACE,KAAK,EAAE;IAChB,OAAO;MACLT,OAAO,EAAE,+BAA+BO,MAAM,CAACE,KAAK,CAACT,OAAO;IAC9D,CAAC;EACH;EACAb,QAAQ,CAAC,sBAAsB,EAAE;IAC/BuB,MAAM,EACJ,MAAM,IAAIxB;EACd,CAAC,CAAC;EACF;EACA;EACA,MAAMyB,WAAW,GAAGlB,oBAAoB,CAAC,CAAC;EAC1C,IAAIkB,WAAW,KAAKL,SAAS,IAAIK,WAAW,KAAK,IAAI,EAAE;IACrD,MAAMC,MAAM,GAAGC,OAAO,CAACC,GAAG,CAACC,wBAAwB;IACnD,OAAO;MACLf,OAAO,EAAE,8DAA8DY,MAAM,8BAA8B;MAC3GX,YAAY,EAAE;QAAEC,KAAK,EAAEI;MAAU;IACnC,CAAC;EACH;EACA,OAAO;IACLN,OAAO,EAAE,0BAA0B;IACnCC,YAAY,EAAE;MAAEC,KAAK,EAAEI;IAAU;EACnC,CAAC;AACH;AAEA,OAAO,SAASkB,aAAaA,CAACC,IAAI,EAAE,MAAM,CAAC,EAAE1B,mBAAmB,CAAC;EAC/D,MAAM2B,UAAU,GAAGD,IAAI,CAACE,WAAW,CAAC,CAAC;EACrC,IAAID,UAAU,KAAK,MAAM,IAAIA,UAAU,KAAK,OAAO,EAAE;IACnD,OAAOH,gBAAgB,CAAC,CAAC;EAC3B;EAEA,IAAI,CAAC5B,aAAa,CAAC+B,UAAU,CAAC,EAAE;IAC9B,OAAO;MACL1B,OAAO,EAAE,qBAAqByB,IAAI;IACpC,CAAC;EACH;EAEA,OAAOtB,cAAc,CAACuB,UAAU,CAAC;AACnC;AAEA,SAAAE,kBAAAC,EAAA;EAA2B;IAAAC;EAAA,IAAAD,EAI1B;EACC,MAAAzB,WAAA,GAAoBhB,WAAW,CAAC2C,KAAkB,CAAC;EACnD,MAAAX,KAAA,GAAcnC,gBAAgB,CAAC,CAAC;EAChC;IAAAe;EAAA,IAAoBkB,iBAAiB,CAACd,WAAW,EAAEgB,KAAK,CAAC;EACzDU,MAAM,CAAC9B,OAAO,CAAC;EAAA,OACR,IAAI;AAAA;AATb,SAAA+B,MAAAC,CAAA;EAAA,OAKuCA,CAAC,CAAA5B,WAAY;AAAA;AAOpD,SAAA6B,oBAAAJ,EAAA;EAAA,MAAAK,CAAA,GAAAC,EAAA;EAA6B;IAAA5B,MAAA;IAAAuB;EAAA,IAAAD,EAM5B;EACC,MAAAO,WAAA,GAAoB/C,cAAc,CAAC,CAAC;EACpC;IAAAY,YAAA;IAAAD;EAAA,IAAkCO,MAAM;EAAA,IAAA8B,EAAA;EAAA,IAAAC,EAAA;EAAA,IAAAJ,CAAA,QAAAjC,YAAA,IAAAiC,CAAA,QAAAlC,OAAA,IAAAkC,CAAA,QAAAJ,MAAA,IAAAI,CAAA,QAAAE,WAAA;IACxBC,EAAA,GAAAA,CAAA;MACd,IAAIpC,YAAY;QACdmC,WAAW,CAACG,IAAA,KAAS;UAAA,GAChBA,IAAI;UAAAnC,WAAA,EACMH,YAAY,CAAAC;QAC3B,CAAC,CAAC,CAAC;MAAA;MAEL4B,MAAM,CAAC9B,OAAO,CAAC;IAAA,CAChB;IAAEsC,EAAA,IAACF,WAAW,EAAEnC,YAAY,EAAED,OAAO,EAAE8B,MAAM,CAAC;IAAAI,CAAA,MAAAjC,YAAA;IAAAiC,CAAA,MAAAlC,OAAA;IAAAkC,CAAA,MAAAJ,MAAA;IAAAI,CAAA,MAAAE,WAAA;IAAAF,CAAA,MAAAG,EAAA;IAAAH,CAAA,MAAAI,EAAA;EAAA;IAAAD,EAAA,GAAAH,CAAA;IAAAI,EAAA,GAAAJ,CAAA;EAAA;EAR/ClD,KAAK,CAAAwD,SAAU,CAACH,EAQf,EAAEC,EAA4C,CAAC;EAAA,OACzC,IAAI;AAAA;AAGb,OAAO,eAAeG,IAAIA,CACxBX,MAAM,EAAExC,qBAAqB,EAC7BoD,QAAQ,EAAE,OAAO,EACjBjB,IAAa,CAAR,EAAE,MAAM,CACd,EAAEkB,OAAO,CAAC3D,KAAK,CAAC4D,SAAS,CAAC,CAAC;EAC1BnB,IAAI,GAAGA,IAAI,EAAEoB,IAAI,CAAC,CAAC,IAAI,EAAE;EAEzB,IAAI/C,gBAAgB,CAACgD,QAAQ,CAACrB,IAAI,CAAC,EAAE;IACnCK,MAAM,CACJ,kVACF,CAAC;IACD;EACF;EAEA,IAAI,CAACL,IAAI,IAAIA,IAAI,KAAK,SAAS,IAAIA,IAAI,KAAK,QAAQ,EAAE;IACpD,OAAO,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAACK,MAAM,CAAC,GAAG;EAC9C;EAEA,MAAMvB,MAAM,GAAGiB,aAAa,CAACC,IAAI,CAAC;EAClC,OAAO,CAAC,mBAAmB,CAAC,MAAM,CAAC,CAAClB,MAAM,CAAC,CAAC,MAAM,CAAC,CAACuB,MAAM,CAAC,GAAG;AAChE","ignoreList":[]}
\ No newline at end of file
diff --git a/src/components/CustomSelect/use-select-navigation.ts b/src/components/CustomSelect/use-select-navigation.ts
index 7ecb4e71..544bbfa7 100644
--- a/src/components/CustomSelect/use-select-navigation.ts
+++ b/src/components/CustomSelect/use-select-navigation.ts
@@ -84,44 +84,44 @@ const reducer = (state: State, action: Action): State => {
return state
}
- // Wrap to first item if at the end
- const next = item.next || state.optionMap.first
+ // If there's a next item in the list, go to it
+ if (item.next) {
+ const needsToScroll = item.next.index >= state.visibleToIndex
- if (!next) {
+ if (!needsToScroll) {
+ return {
+ ...state,
+ focusedValue: item.next.value,
+ }
+ }
+
+ const nextVisibleToIndex = Math.min(
+ state.optionMap.size,
+ state.visibleToIndex + 1,
+ )
+
+ const nextVisibleFromIndex = nextVisibleToIndex - state.visibleOptionCount
+
+ return {
+ ...state,
+ focusedValue: item.next.value,
+ visibleFromIndex: nextVisibleFromIndex,
+ visibleToIndex: nextVisibleToIndex,
+ }
+ }
+
+ // No next item - wrap to first item
+ const firstItem = state.optionMap.first
+ if (!firstItem) {
return state
}
// When wrapping to first, reset viewport to start
- if (!item.next && next === state.optionMap.first) {
- return {
- ...state,
- focusedValue: next.value,
- visibleFromIndex: 0,
- visibleToIndex: state.visibleOptionCount,
- }
- }
-
- const needsToScroll = next.index >= state.visibleToIndex
-
- if (!needsToScroll) {
- return {
- ...state,
- focusedValue: next.value,
- }
- }
-
- const nextVisibleToIndex = Math.min(
- state.optionMap.size,
- state.visibleToIndex + 1,
- )
-
- const nextVisibleFromIndex = nextVisibleToIndex - state.visibleOptionCount
-
return {
...state,
- focusedValue: next.value,
- visibleFromIndex: nextVisibleFromIndex,
- visibleToIndex: nextVisibleToIndex,
+ focusedValue: firstItem.value,
+ visibleFromIndex: 0,
+ visibleToIndex: state.visibleOptionCount,
}
}
@@ -136,44 +136,43 @@ const reducer = (state: State, action: Action): State => {
return state
}
- // Wrap to last item if at the beginning
- const previous = item.previous || state.optionMap.last
+ // If there's a previous item in the list, go to it
+ if (item.previous) {
+ const needsToScroll = item.previous.index < state.visibleFromIndex
- if (!previous) {
- return state
- }
+ if (!needsToScroll) {
+ return {
+ ...state,
+ focusedValue: item.previous.value,
+ }
+ }
+
+ const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1)
+ const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount
- // When wrapping to last, reset viewport to end
- if (!item.previous && previous === state.optionMap.last) {
- const nextVisibleToIndex = state.optionMap.size
- const nextVisibleFromIndex = Math.max(
- 0,
- nextVisibleToIndex - state.visibleOptionCount,
- )
return {
...state,
- focusedValue: previous.value,
+ focusedValue: item.previous.value,
visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex,
}
}
- const needsToScroll = previous.index <= state.visibleFromIndex
-
- if (!needsToScroll) {
- return {
- ...state,
- focusedValue: previous.value,
- }
+ // No previous item - wrap to last item
+ const lastItem = state.optionMap.last
+ if (!lastItem) {
+ return state
}
- const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1)
-
- const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount
-
+ // When wrapping to last, reset viewport to end
+ const nextVisibleToIndex = state.optionMap.size
+ const nextVisibleFromIndex = Math.max(
+ 0,
+ nextVisibleToIndex - state.visibleOptionCount,
+ )
return {
...state,
- focusedValue: previous.value,
+ focusedValue: lastItem.value,
visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex,
}
diff --git a/src/components/EffortPicker.tsx b/src/components/EffortPicker.tsx
new file mode 100644
index 00000000..2e86509e
--- /dev/null
+++ b/src/components/EffortPicker.tsx
@@ -0,0 +1,152 @@
+import React, { useState } from 'react'
+import { Box, Text } from '../ink.js'
+import { useMainLoopModel } from '../hooks/useMainLoopModel.js'
+import { useAppState, useSetAppState } from '../state/AppState.js'
+import type { EffortLevel, OpenAIEffortLevel } from '../utils/effort.js'
+import {
+ getAvailableEffortLevels,
+ getDisplayedEffortLevel,
+ getEffortLevelDescription,
+ getEffortLevelLabel,
+ getEffortValueDescription,
+ modelSupportsEffort,
+ modelUsesOpenAIEffort,
+ standardEffortToOpenAI,
+ isOpenAIEffortLevel,
+} from '../utils/effort.js'
+import { getAPIProvider } from '../utils/model/providers.js'
+import { getReasoningEffortForModel } from '../services/api/providerConfig.js'
+import { Select } from './CustomSelect/select.js'
+import { effortLevelToSymbol } from './EffortIndicator.js'
+import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js'
+import { Byline } from './design-system/Byline.js'
+
+type EffortOption = {
+ label: React.ReactNode
+ value: string
+ description: string
+ isAvailable: boolean
+}
+
+type Props = {
+ onSelect: (effort: EffortLevel | undefined) => void
+ onCancel?: () => void
+}
+
+export function EffortPicker({ onSelect, onCancel }: Props) {
+ const model = useMainLoopModel()
+ const appStateEffort = useAppState((s: any) => s.effortValue)
+ const setAppState = useSetAppState()
+ const provider = getAPIProvider()
+ const usesOpenAIEffort = modelUsesOpenAIEffort(model)
+ const availableLevels = getAvailableEffortLevels(model)
+ const currentDisplayedLevel = getDisplayedEffortLevel(model, appStateEffort)
+
+ // For OpenAI/Codex, get the model's default reasoning effort
+ const modelReasoningEffort = usesOpenAIEffort ? getReasoningEffortForModel(model) : undefined
+ const defaultEffortForModel = modelReasoningEffort || currentDisplayedLevel
+
+ const options: EffortOption[] = [
+ {
+ label: ,
+ value: 'auto',
+ description: 'Use the default effort level for your model',
+ isAvailable: true,
+ },
+ ...availableLevels.map(level => {
+ const displayLevel = usesOpenAIEffort
+ ? (level === 'xhigh' ? 'max' : level)
+ : level
+ const isCurrent = currentDisplayedLevel === displayLevel
+ return {
+ label: (
+
+ ),
+ value: level,
+ description: getEffortLevelDescription(level as EffortLevel),
+ isAvailable: true,
+ }
+ }),
+ ]
+
+ function handleSelect(value: string) {
+ if (value === 'auto') {
+ setAppState(prev => ({
+ ...prev,
+ effortValue: undefined,
+ }))
+ onSelect(undefined)
+ } else {
+ const effortLevel = value as EffortLevel
+ setAppState(prev => ({
+ ...prev,
+ effortValue: effortLevel,
+ }))
+ onSelect(effortLevel)
+ }
+ }
+
+ function handleCancel() {
+ onCancel?.()
+ }
+
+ const supportsEffort = modelSupportsEffort(model)
+ // For OpenAI/Codex, use the model's default reasoning effort as initial focus
+ // For Claude, use the displayed effort level or 'auto'
+ const initialFocus = usesOpenAIEffort
+ ? (modelReasoningEffort || 'auto')
+ : (appStateEffort ? String(appStateEffort) : 'auto')
+
+ return (
+
+
+ Set effort level
+
+ {usesOpenAIEffort
+ ? `OpenAI/Codex provider (${provider})`
+ : supportsEffort
+ ? `Claude model ยท ${provider} provider`
+ : `Effort not supported for this model`
+ }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ )
+}
+
+function EffortOptionLabel({ level, text, isCurrent }: { level: EffortLevel | 'auto', text: string, isCurrent: boolean }) {
+ const symbol = level === 'auto' ? 'โ' : effortLevelToSymbol(level as EffortLevel)
+ const color = isCurrent ? 'remember' : level === 'auto' ? 'subtle' : 'suggestion'
+
+ return (
+ <>
+ {symbol}
+ {text}
+ {isCurrent && (current)}
+ >
+ )
+}
diff --git a/src/components/StartupScreen.ts b/src/components/StartupScreen.ts
index b20d26c1..e38a4111 100644
--- a/src/components/StartupScreen.ts
+++ b/src/components/StartupScreen.ts
@@ -97,21 +97,45 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
}
if (useOpenAI) {
- const model = process.env.OPENAI_MODEL || 'gpt-4o'
+ const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
let name = 'OpenAI'
- if (/deepseek/i.test(baseUrl) || /deepseek/i.test(model)) name = 'DeepSeek'
+ if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
else if (/together/i.test(baseUrl)) name = 'Together AI'
else if (/groq/i.test(baseUrl)) name = 'Groq'
- else if (/mistral/i.test(baseUrl) || /mistral/i.test(model)) name = 'Mistral'
+ else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
- else if (/llama/i.test(model)) name = 'Meta Llama'
+ else if (/llama/i.test(rawModel)) name = 'Meta Llama'
else if (isLocal) name = 'Local'
- return { name, model, baseUrl, isLocal }
+
+ // Resolve model alias to actual model name + reasoning effort
+ let displayModel = rawModel
+ const codexAliases: Record = {
+ codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' },
+ 'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' },
+ 'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' },
+ 'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' },
+ codexspark: { model: 'gpt-5.3-codex-spark' },
+ 'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' },
+ 'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' },
+ 'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' },
+ 'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' },
+ 'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' },
+ }
+ const alias = rawModel.toLowerCase()
+ if (alias in codexAliases) {
+ const resolved = codexAliases[alias]
+ displayModel = resolved.model
+ if (resolved.reasoningEffort) {
+ displayModel = `${displayModel} (${resolved.reasoningEffort})`
+ }
+ }
+
+ return { name, model: displayModel, baseUrl, isLocal }
}
// Default: Anthropic
diff --git a/src/hooks/useTypeahead.tsx b/src/hooks/useTypeahead.tsx
index a269902b..8183a011 100644
--- a/src/hooks/useTypeahead.tsx
+++ b/src/hooks/useTypeahead.tsx
@@ -1242,17 +1242,25 @@ export function useTypeahead({
const handleAutocompletePrevious = useCallback(() => {
setSuggestionsState(prev => ({
...prev,
- selectedSuggestion: prev.selectedSuggestion <= 0 ? suggestions.length - 1 : prev.selectedSuggestion - 1
+ selectedSuggestion: prev.suggestions.length === 0
+ ? -1
+ : prev.selectedSuggestion <= 0
+ ? prev.suggestions.length - 1
+ : Math.min(prev.selectedSuggestion - 1, prev.suggestions.length - 1)
}));
- }, [suggestions.length, setSuggestionsState]);
+ }, [setSuggestionsState]);
// Handler for autocomplete:next - selects next suggestion
const handleAutocompleteNext = useCallback(() => {
setSuggestionsState(prev => ({
...prev,
- selectedSuggestion: prev.selectedSuggestion >= suggestions.length - 1 ? 0 : prev.selectedSuggestion + 1
+ selectedSuggestion: prev.suggestions.length === 0
+ ? -1
+ : prev.selectedSuggestion >= prev.suggestions.length - 1
+ ? 0
+ : Math.max(0, prev.selectedSuggestion + 1)
}));
- }, [suggestions.length, setSuggestionsState]);
+ }, [setSuggestionsState]);
// Autocomplete context keybindings - only active when suggestions are visible
const autocompleteHandlers = useMemo(() => ({
diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts
index 1f99b7c4..c456e9e4 100644
--- a/src/services/api/openaiShim.ts
+++ b/src/services/api/openaiShim.ts
@@ -655,9 +655,11 @@ class OpenAIShimStream {
class OpenAIShimMessages {
private defaultHeaders: Record
+ private reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
- constructor(defaultHeaders: Record) {
+ constructor(defaultHeaders: Record, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh') {
this.defaultHeaders = defaultHeaders
+ this.reasoningEffort = reasoningEffort
}
create(
@@ -667,7 +669,7 @@ class OpenAIShimMessages {
const self = this
const promise = (async () => {
- const request = resolveProviderRequest({ model: params.model })
+ const request = resolveProviderRequest({ model: params.model, reasoningEffortOverride: self.reasoningEffort })
const response = await self._doRequest(request, params, options)
if (params.stream) {
@@ -993,9 +995,11 @@ class OpenAIShimMessages {
class OpenAIShimBeta {
messages: OpenAIShimMessages
+ reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
- constructor(defaultHeaders: Record) {
- this.messages = new OpenAIShimMessages(defaultHeaders)
+ constructor(defaultHeaders: Record, reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh') {
+ this.messages = new OpenAIShimMessages(defaultHeaders, reasoningEffort)
+ this.reasoningEffort = reasoningEffort
}
}
@@ -1003,6 +1007,7 @@ export function createOpenAIShimClient(options: {
defaultHeaders?: Record
maxRetries?: number
timeout?: number
+ reasoningEffort?: 'low' | 'medium' | 'high' | 'xhigh'
}): unknown {
hydrateGithubModelsTokenFromSecureStorage()
@@ -1025,7 +1030,7 @@ export function createOpenAIShimClient(options: {
const beta = new OpenAIShimBeta({
...(options.defaultHeaders ?? {}),
- })
+ }, options.reasoningEffort)
return {
beta,
diff --git a/src/services/api/providerConfig.ts b/src/services/api/providerConfig.ts
index 90643aa1..1c3097db 100644
--- a/src/services/api/providerConfig.ts
+++ b/src/services/api/providerConfig.ts
@@ -20,13 +20,43 @@ const CODEX_ALIAS_MODELS: Record<
model: 'gpt-5.4',
reasoningEffort: 'high',
},
+ 'gpt-5.4': {
+ model: 'gpt-5.4',
+ reasoningEffort: 'high',
+ },
+ 'gpt-5.3-codex': {
+ model: 'gpt-5.3-codex',
+ reasoningEffort: 'high',
+ },
+ 'gpt-5.3-codex-spark': {
+ model: 'gpt-5.3-codex-spark',
+ },
codexspark: {
model: 'gpt-5.3-codex-spark',
},
+ 'gpt-5.2-codex': {
+ model: 'gpt-5.2-codex',
+ reasoningEffort: 'high',
+ },
+ 'gpt-5.1-codex-max': {
+ model: 'gpt-5.1-codex-max',
+ reasoningEffort: 'high',
+ },
+ 'gpt-5.1-codex-mini': {
+ model: 'gpt-5.1-codex-mini',
+ },
+ 'gpt-5.4-mini': {
+ model: 'gpt-5.4-mini',
+ reasoningEffort: 'medium',
+ },
+ 'gpt-5.2': {
+ model: 'gpt-5.2',
+ reasoningEffort: 'medium',
+ },
} as const
type CodexAlias = keyof typeof CODEX_ALIAS_MODELS
-type ReasoningEffort = 'low' | 'medium' | 'high'
+type ReasoningEffort = 'low' | 'medium' | 'high' | 'xhigh'
export type ProviderTransport = 'chat_completions' | 'codex_responses'
@@ -102,7 +132,7 @@ function decodeJwtPayload(token: string): Record | undefined {
function parseReasoningEffort(value: string | undefined): ReasoningEffort | undefined {
if (!value) return undefined
const normalized = value.trim().toLowerCase()
- if (normalized === 'low' || normalized === 'medium' || normalized === 'high') {
+ if (normalized === 'low' || normalized === 'medium' || normalized === 'high' || normalized === 'xhigh') {
return normalized
}
return undefined
@@ -193,6 +223,7 @@ export function resolveProviderRequest(options?: {
model?: string
baseUrl?: string
fallbackModel?: string
+ reasoningEffortOverride?: ReasoningEffort
}): ResolvedProviderRequest {
const isGithubMode = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
const requestedModel =
@@ -217,6 +248,11 @@ export function resolveProviderRequest(options?: {
? normalizeGithubModelsApiModel(requestedModel)
: descriptor.baseModel
+ const reasoning = options?.reasoningEffortOverride
+ ? { effort: options.reasoningEffortOverride }
+ : descriptor.reasoning
+
+
return {
transport,
requestedModel,
@@ -227,7 +263,7 @@ export function resolveProviderRequest(options?: {
? DEFAULT_CODEX_BASE_URL
: DEFAULT_OPENAI_BASE_URL)
).replace(/\/+$/, ''),
- reasoning: descriptor.reasoning,
+ reasoning,
}
}
@@ -336,3 +372,11 @@ export function resolveCodexApiCredentials(
source: 'auth.json',
}
}
+
+export function getReasoningEffortForModel(model: string): ReasoningEffort | undefined {
+ const normalized = model.trim().toLowerCase()
+ const base = normalized.split('?', 1)[0] ?? normalized
+ const alias = base as CodexAlias
+ const aliasConfig = CODEX_ALIAS_MODELS[alias]
+ return aliasConfig?.reasoningEffort
+}
diff --git a/src/utils/effort.ts b/src/utils/effort.ts
index cafcf3de..2a391ee6 100644
--- a/src/utils/effort.ts
+++ b/src/utils/effort.ts
@@ -17,6 +17,14 @@ export const EFFORT_LEVELS = [
'max',
] as const satisfies readonly EffortLevel[]
+export const OPENAI_EFFORT_LEVELS = [
+ 'low',
+ 'medium',
+ 'high',
+ 'xhigh',
+] as const
+
+export type OpenAIEffortLevel = typeof OPENAI_EFFORT_LEVELS[number]
export type EffortValue = EffortLevel | number
// @[MODEL LAUNCH]: Add the new model to the allowlist if it supports the effort parameter.
@@ -68,6 +76,46 @@ export function isEffortLevel(value: string): value is EffortLevel {
return (EFFORT_LEVELS as readonly string[]).includes(value)
}
+export function isOpenAIEffortLevel(value: string): value is OpenAIEffortLevel {
+ return (OPENAI_EFFORT_LEVELS as readonly string[]).includes(value)
+}
+
+export function modelUsesOpenAIEffort(model: string): boolean {
+ const provider = getAPIProvider()
+ return provider === 'openai' || provider === 'codex'
+}
+
+export function getAvailableEffortLevels(model: string): EffortLevel[] | OpenAIEffortLevel[] {
+ if (modelUsesOpenAIEffort(model)) {
+ return [...OPENAI_EFFORT_LEVELS] as OpenAIEffortLevel[]
+ }
+ const levels: EffortLevel[] = ['low', 'medium', 'high']
+ if (modelSupportsMaxEffort(model)) {
+ levels.push('max')
+ }
+ return levels
+}
+
+export function getEffortLevelLabel(level: EffortLevel | OpenAIEffortLevel): string {
+ if (level === 'xhigh') return 'Extra High'
+ if (level === 'max') return 'Max'
+ return capitalize(level)
+}
+
+export function openAIEffortToStandard(level: OpenAIEffortLevel): EffortLevel {
+ if (level === 'xhigh') return 'max'
+ return level
+}
+
+export function standardEffortToOpenAI(level: EffortLevel): OpenAIEffortLevel {
+ if (level === 'max') return 'xhigh'
+ return level as OpenAIEffortLevel
+}
+
+function capitalize(s: string): string {
+ return s.charAt(0).toUpperCase() + s.slice(1)
+}
+
export function parseEffortValue(value: unknown): EffortValue | undefined {
if (value === undefined || value === null || value === '') {
return undefined
@@ -221,7 +269,7 @@ export function convertEffortValueToLevel(value: EffortValue): EffortLevel {
* @param level The effort level to describe
* @returns Human-readable description
*/
-export function getEffortLevelDescription(level: EffortLevel): string {
+export function getEffortLevelDescription(level: EffortLevel | OpenAIEffortLevel): string {
switch (level) {
case 'low':
return 'Quick, straightforward implementation with minimal overhead'
@@ -231,6 +279,8 @@ export function getEffortLevelDescription(level: EffortLevel): string {
return 'Comprehensive implementation with extensive testing and documentation'
case 'max':
return 'Maximum capability with deepest reasoning (Opus 4.6 only)'
+ case 'xhigh':
+ return 'Extra high reasoning effort for complex tasks (OpenAI/Codex)'
}
}
diff --git a/src/utils/model/aliases.ts b/src/utils/model/aliases.ts
index 91514da1..75ae388c 100644
--- a/src/utils/model/aliases.ts
+++ b/src/utils/model/aliases.ts
@@ -6,8 +6,6 @@ export const MODEL_ALIASES = [
'sonnet[1m]',
'opus[1m]',
'opusplan',
- 'codexplan',
- 'codexspark',
] as const
export type ModelAlias = (typeof MODEL_ALIASES)[number]
diff --git a/src/utils/model/model.ts b/src/utils/model/model.ts
index 6c81a8ef..97a74d95 100644
--- a/src/utils/model/model.ts
+++ b/src/utils/model/model.ts
@@ -123,6 +123,10 @@ export function getDefaultOpusModel(): ModelName {
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o'
}
+ // Codex provider: use user-specified model or default to gpt-5.4
+ if (getAPIProvider() === 'codex') {
+ return process.env.OPENAI_MODEL || 'gpt-5.4'
+ }
// 3P providers (Bedrock, Vertex, Foundry) โ kept as a separate branch
// even when values match, since 3P availability lags firstParty and
// these will diverge again at the next model launch.
@@ -145,6 +149,10 @@ export function getDefaultSonnetModel(): ModelName {
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o'
}
+ // Codex provider
+ if (getAPIProvider() === 'codex') {
+ return process.env.OPENAI_MODEL || 'gpt-5.4'
+ }
// Default to Sonnet 4.5 for 3P since they may not have 4.6 yet
if (getAPIProvider() !== 'firstParty') {
return getModelStrings().sonnet45
@@ -165,6 +173,10 @@ export function getDefaultHaikuModel(): ModelName {
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o-mini'
}
+ // Codex provider
+ if (getAPIProvider() === 'codex') {
+ return process.env.OPENAI_MODEL || 'gpt-5.4'
+ }
// Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex)
return getModelStrings().haiku45
@@ -217,6 +229,10 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o'
}
+ // Codex provider: always use the configured Codex model (default gpt-5.4)
+ if (getAPIProvider() === 'codex') {
+ return process.env.OPENAI_MODEL || 'gpt-5.4'
+ }
// Ants default to defaultModel from flag config, or Opus 1M if not configured
if (process.env.USER_TYPE === 'ant') {
@@ -343,12 +359,6 @@ export function renderDefaultModelSetting(
if (setting === 'opusplan') {
return 'Opus 4.6 in plan mode, else Sonnet 4.6'
}
- if (setting === 'codexplan') {
- return 'Codex Plan (GPT-5.4 high reasoning)'
- }
- if (setting === 'codexspark') {
- return 'Codex Spark (GPT-5.3 Codex Spark)'
- }
return renderModelName(parseUserSpecifiedModel(setting))
}
@@ -383,11 +393,12 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
if (setting === 'opusplan') {
return 'Opus Plan'
}
+ // Handle Codex models - show actual model name + resolved model
if (setting === 'codexplan') {
- return 'Codex Plan'
+ return 'codexplan (gpt-5.4)'
}
if (setting === 'codexspark') {
- return 'Codex Spark'
+ return 'codexspark (gpt-5.3-codex-spark)'
}
if (isModelAlias(setting)) {
return capitalize(setting)
@@ -401,8 +412,8 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
* if the model is not recognized as a public model.
*/
export function getPublicModelDisplayName(model: ModelName): string | null {
- // For OpenAI/Gemini providers, show the actual model name not a Claude alias
- if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini') {
+ // For OpenAI/Gemini/Codex providers, show the actual model name not a Claude alias
+ if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex') {
return null
}
switch (model) {
@@ -517,10 +528,6 @@ export function parseUserSpecifiedModel(
if (isModelAlias(modelString)) {
switch (modelString) {
- case 'codexplan':
- return modelInputTrimmed
- case 'codexspark':
- return modelInputTrimmed
case 'opusplan':
return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode
case 'sonnet':
@@ -535,6 +542,14 @@ export function parseUserSpecifiedModel(
}
}
+ // Handle Codex aliases - map to actual model names
+ if (modelString === 'codexplan') {
+ return 'gpt-5.4'
+ }
+ if (modelString === 'codexspark') {
+ return 'gpt-5.3-codex-spark'
+ }
+
// Opus 4/4.1 are no longer available on the first-party API (same as
// Claude.ai) โ silently remap to the current Opus default. The 'opus'
// alias already resolves to 4.6, so the only users on these explicit
diff --git a/src/utils/model/modelOptions.ts b/src/utils/model/modelOptions.ts
index 0c464d6a..84371c84 100644
--- a/src/utils/model/modelOptions.ts
+++ b/src/utils/model/modelOptions.ts
@@ -268,20 +268,65 @@ function getOpusPlanOption(): ModelOption {
function getCodexPlanOption(): ModelOption {
return {
- value: 'codexplan',
- label: 'Codex Plan',
+ value: 'gpt-5.4',
+ label: 'gpt-5.4',
description: 'GPT-5.4 on the Codex backend with high reasoning',
}
}
function getCodexSparkOption(): ModelOption {
return {
- value: 'codexspark',
- label: 'Codex Spark',
+ value: 'gpt-5.3-codex-spark',
+ label: 'gpt-5.3-codex-spark',
description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops',
}
}
+function getCodexModelOptions(): ModelOption[] {
+ return [
+ {
+ value: 'gpt-5.4',
+ label: 'gpt-5.4',
+ description: 'GPT-5.4 with high reasoning',
+ },
+ {
+ value: 'gpt-5.3-codex',
+ label: 'gpt-5.3-codex',
+ description: 'GPT-5.3 Codex with high reasoning',
+ },
+ {
+ value: 'gpt-5.3-codex-spark',
+ label: 'gpt-5.3-codex-spark',
+ description: 'GPT-5.3 Codex Spark for fast tool loops',
+ },
+ {
+ value: 'codexspark',
+ label: 'codexspark',
+ description: 'GPT-5.3 Codex Spark alias for fast tool loops',
+ },
+ {
+ value: 'gpt-5.2-codex',
+ label: 'gpt-5.2-codex',
+ description: 'GPT-5.2 Codex with high reasoning',
+ },
+ {
+ value: 'gpt-5.1-codex-max',
+ label: 'gpt-5.1-codex-max',
+ description: 'GPT-5.1 Codex Max for deep reasoning',
+ },
+ {
+ value: 'gpt-5.1-codex-mini',
+ label: 'gpt-5.1-codex-mini',
+ description: 'GPT-5.1 Codex Mini - faster, cheaper',
+ },
+ {
+ value: 'gpt-5.4-mini',
+ label: 'gpt-5.4-mini',
+ description: 'GPT-5.4 Mini - faster, cheaper',
+ },
+ ]
+}
+
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
function getModelOptionsBase(fastMode = false): ModelOption[] {
@@ -360,8 +405,9 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
// PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1
const payg3pOptions = [getDefaultOptionForUser(fastMode)]
- if (getAPIProvider() === 'openai') {
- payg3pOptions.push(getCodexPlanOption(), getCodexSparkOption())
+ // Add Codex models for openai and codex providers
+ if (getAPIProvider() === 'openai' || getAPIProvider() === 'codex') {
+ payg3pOptions.push(...getCodexModelOptions())
}
const customSonnet = getCustomSonnetOption()
@@ -517,9 +563,9 @@ export function getModelOptions(fastMode = false): ModelOption[] {
return filterModelOptionsByAllowlist(options)
} else if (customModel === 'opusplan') {
return filterModelOptionsByAllowlist([...options, getOpusPlanOption()])
- } else if (customModel === 'codexplan') {
+ } else if (customModel === 'gpt-5.4') {
return filterModelOptionsByAllowlist([...options, getCodexPlanOption()])
- } else if (customModel === 'codexspark') {
+ } else if (customModel === 'gpt-5.3-codex-spark') {
return filterModelOptionsByAllowlist([...options, getCodexSparkOption()])
} else if (customModel === 'opus' && getAPIProvider() === 'firstParty') {
return filterModelOptionsByAllowlist([
@@ -554,11 +600,23 @@ export function getModelOptions(fastMode = false): ModelOption[] {
*/
function filterModelOptionsByAllowlist(options: ModelOption[]): ModelOption[] {
const settings = getSettings_DEPRECATED() || {}
- if (!settings.availableModels) {
- return options // No restrictions
- }
- return options.filter(
+ const filtered = !settings.availableModels
+ ? options // No restrictions
+ : options.filter(
opt =>
opt.value === null || (opt.value !== null && isModelAllowed(opt.value)),
)
+
+ // Select state uses option values as identity keys. If two entries share the
+ // same value (e.g. provider-specific aliases collapsing to one model ID),
+ // navigation/focus can become inconsistent and appear as duplicate rendering.
+ const seen = new Set()
+ return filtered.filter(opt => {
+ const key = String(opt.value)
+ if (seen.has(key)) {
+ return false
+ }
+ seen.add(key)
+ return true
+ })
}
diff --git a/src/utils/model/modelStrings.ts b/src/utils/model/modelStrings.ts
index 5b7be104..4d8399d1 100644
--- a/src/utils/model/modelStrings.ts
+++ b/src/utils/model/modelStrings.ts
@@ -23,9 +23,12 @@ export type ModelStrings = Record
const MODEL_KEYS = Object.keys(ALL_MODEL_CONFIGS) as ModelKey[]
function getBuiltinModelStrings(provider: APIProvider): ModelStrings {
+ // Codex piggybacks on the OpenAI provider transport for Anthropic tier aliases.
+ // Reuse OpenAI mappings so model string lookups never return undefined.
+ const providerKey = provider === 'codex' ? 'openai' : provider
const out = {} as ModelStrings
for (const key of MODEL_KEYS) {
- out[key] = ALL_MODEL_CONFIGS[key][provider]
+ out[key] = ALL_MODEL_CONFIGS[key][providerKey]
}
return out
}
diff --git a/src/utils/model/providers.ts b/src/utils/model/providers.ts
index 30a1f1c9..6b6d627e 100644
--- a/src/utils/model/providers.ts
+++ b/src/utils/model/providers.ts
@@ -9,6 +9,7 @@ export type APIProvider =
| 'openai'
| 'gemini'
| 'github'
+ | 'codex'
export function getAPIProvider(): APIProvider {
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
@@ -16,7 +17,9 @@ export function getAPIProvider(): APIProvider {
: isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
? 'github'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
- ? 'openai'
+ ? isCodexModel()
+ ? 'codex'
+ : 'openai'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
? 'bedrock'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)
@@ -29,6 +32,19 @@ export function getAPIProvider(): APIProvider {
export function usesAnthropicAccountFlow(): boolean {
return getAPIProvider() === 'firstParty'
}
+function isCodexModel(): boolean {
+ const model = (process.env.OPENAI_MODEL || '').toLowerCase()
+ return (
+ model === 'codexplan' ||
+ model === 'codexspark' ||
+ model === 'gpt-5.4' ||
+ model === 'gpt-5.3-codex' ||
+ model === 'gpt-5.3-codex-spark' ||
+ model === 'gpt-5.2-codex' ||
+ model === 'gpt-5.1-codex-max' ||
+ model === 'gpt-5.1-codex-mini'
+ )
+}
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {
return getAPIProvider() as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS
diff --git a/src/utils/status.tsx b/src/utils/status.tsx
index bc159cdb..97e8c742 100644
--- a/src/utils/status.tsx
+++ b/src/utils/status.tsx
@@ -12,6 +12,7 @@ import { formatNumber } from './format.js';
import { getIdeClientName, type IDEExtensionInstallationStatus, isJetBrainsIde, toIDEDisplayName } from './ide.js';
import { getClaudeAiUserDefaultModelDescription, modelDisplayString } from './model/model.js';
import { getAPIProvider } from './model/providers.js';
+import { resolveProviderRequest } from '../services/api/providerConfig.js';
import { getMTLSConfig } from './mtls.js';
import { checkInstall } from './nativeInstaller/index.js';
import { getProxyUrl } from './proxy.js';
@@ -246,6 +247,7 @@ export function buildAPIProviderProperties(): Property[] {
vertex: 'Google Vertex AI',
foundry: 'Microsoft Foundry',
openai: 'OpenAI-compatible',
+ codex: 'Codex',
gemini: 'Google Gemini',
}[apiProvider];
properties.push({
@@ -332,9 +334,46 @@ export function buildAPIProviderProperties(): Property[] {
}
const openaiModel = process.env.OPENAI_MODEL;
if (openaiModel) {
+ // Build display model string with resolved model + reasoning effort
+ let modelDisplay = openaiModel;
+ const resolvedModel = resolveProviderRequest({ model: openaiModel }).resolvedModel;
+ const reasoningEffort = resolveProviderRequest({ model: openaiModel }).reasoning?.effort;
+ if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
+ // Show resolved model name
+ modelDisplay = resolvedModel;
+ }
+ if (reasoningEffort) {
+ modelDisplay = `${modelDisplay} (${reasoningEffort})`;
+ }
properties.push({
label: 'Model',
- value: openaiModel
+ value: modelDisplay
+ });
+ }
+ } else if (apiProvider === 'codex') {
+ const codexBaseUrl = process.env.OPENAI_BASE_URL;
+ if (codexBaseUrl) {
+ properties.push({
+ label: 'Codex base URL',
+ value: codexBaseUrl
+ });
+ }
+ const openaiModel = process.env.OPENAI_MODEL;
+ if (openaiModel) {
+ // Build display model string with resolved model + reasoning effort
+ let modelDisplay = openaiModel;
+ const resolvedModel = resolveProviderRequest({ model: openaiModel }).resolvedModel;
+ const reasoningEffort = resolveProviderRequest({ model: openaiModel }).reasoning?.effort;
+ if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
+ // Show resolved model name
+ modelDisplay = resolvedModel;
+ }
+ if (reasoningEffort) {
+ modelDisplay = `${modelDisplay} (${reasoningEffort})`;
+ }
+ properties.push({
+ label: 'Model',
+ value: modelDisplay
});
}
} else if (apiProvider === 'gemini') {
diff --git a/src/utils/suggestions/commandSuggestions.ts b/src/utils/suggestions/commandSuggestions.ts
index 4a90db55..2f83ae6f 100644
--- a/src/utils/suggestions/commandSuggestions.ts
+++ b/src/utils/suggestions/commandSuggestions.ts
@@ -286,6 +286,25 @@ function createCommandSuggestionItem(
}
}
+/**
+ * Ensure suggestion IDs are unique for React keys and selection logic.
+ * If duplicates exist, append a stable numeric suffix to subsequent entries.
+ */
+function ensureUniqueSuggestionIds(items: SuggestionItem[]): SuggestionItem[] {
+ const counts = new Map()
+ return items.map(item => {
+ const seen = counts.get(item.id) ?? 0
+ counts.set(item.id, seen + 1)
+ if (seen === 0) {
+ return item
+ }
+ return {
+ ...item,
+ id: `${item.id}#${seen + 1}`,
+ }
+ })
+}
+
/**
* Generate command suggestions based on input
*/
@@ -369,14 +388,14 @@ export function generateCommandSuggestions(
// Combine with built-in commands prioritized after recently used,
// so they remain visible even when many skills are installed
- return [
+ return ensureUniqueSuggestionIds([
...recentlyUsed,
...builtinCommands,
...userCommands,
...projectCommands,
...policyCommands,
...otherCommands,
- ].map(cmd => createCommandSuggestionItem(cmd))
+ ].map(cmd => createCommandSuggestionItem(cmd)))
}
// The Fuse index filters isHidden at build time and is keyed on the
@@ -491,10 +510,13 @@ export function generateCommandSuggestions(
if (hiddenExact) {
const hiddenId = getCommandId(hiddenExact)
if (!fuseSuggestions.some(s => s.id === hiddenId)) {
- return [createCommandSuggestionItem(hiddenExact), ...fuseSuggestions]
+ return ensureUniqueSuggestionIds([
+ createCommandSuggestionItem(hiddenExact),
+ ...fuseSuggestions,
+ ])
}
}
- return fuseSuggestions
+ return ensureUniqueSuggestionIds(fuseSuggestions)
}
/**