feat: Refactor model handling & reasoning effort across navigation, typeahead, OpenAI/Codex providers, API shim, configs, and UI (adds EffortPicker, new mappings/options, unique suggestion IDs, effort utilities; removes deprecated aliases; defaults Codex to gpt-5.4; improves selection logic and status display)

This commit is contained in:
Meet Patel
2026-04-02 17:17:14 +05:30
parent 9f48bb4431
commit 8f50f17674
15 changed files with 612 additions and 139 deletions

View File

@@ -84,44 +84,44 @@ const reducer = <T>(state: State<T>, action: Action<T>): State<T> => {
return state
}
// Wrap to first item if at the end
const next = item.next || state.optionMap.first
// If there's a next item in the list, go to it
if (item.next) {
const needsToScroll = item.next.index >= state.visibleToIndex
if (!next) {
if (!needsToScroll) {
return {
...state,
focusedValue: item.next.value,
}
}
const nextVisibleToIndex = Math.min(
state.optionMap.size,
state.visibleToIndex + 1,
)
const nextVisibleFromIndex = nextVisibleToIndex - state.visibleOptionCount
return {
...state,
focusedValue: item.next.value,
visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex,
}
}
// No next item - wrap to first item
const firstItem = state.optionMap.first
if (!firstItem) {
return state
}
// When wrapping to first, reset viewport to start
if (!item.next && next === state.optionMap.first) {
return {
...state,
focusedValue: next.value,
visibleFromIndex: 0,
visibleToIndex: state.visibleOptionCount,
}
}
const needsToScroll = next.index >= state.visibleToIndex
if (!needsToScroll) {
return {
...state,
focusedValue: next.value,
}
}
const nextVisibleToIndex = Math.min(
state.optionMap.size,
state.visibleToIndex + 1,
)
const nextVisibleFromIndex = nextVisibleToIndex - state.visibleOptionCount
return {
...state,
focusedValue: next.value,
visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex,
focusedValue: firstItem.value,
visibleFromIndex: 0,
visibleToIndex: state.visibleOptionCount,
}
}
@@ -136,44 +136,43 @@ const reducer = <T>(state: State<T>, action: Action<T>): State<T> => {
return state
}
// Wrap to last item if at the beginning
const previous = item.previous || state.optionMap.last
// If there's a previous item in the list, go to it
if (item.previous) {
const needsToScroll = item.previous.index < state.visibleFromIndex
if (!previous) {
return state
}
if (!needsToScroll) {
return {
...state,
focusedValue: item.previous.value,
}
}
const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1)
const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount
// When wrapping to last, reset viewport to end
if (!item.previous && previous === state.optionMap.last) {
const nextVisibleToIndex = state.optionMap.size
const nextVisibleFromIndex = Math.max(
0,
nextVisibleToIndex - state.visibleOptionCount,
)
return {
...state,
focusedValue: previous.value,
focusedValue: item.previous.value,
visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex,
}
}
const needsToScroll = previous.index <= state.visibleFromIndex
if (!needsToScroll) {
return {
...state,
focusedValue: previous.value,
}
// No previous item - wrap to last item
const lastItem = state.optionMap.last
if (!lastItem) {
return state
}
const nextVisibleFromIndex = Math.max(0, state.visibleFromIndex - 1)
const nextVisibleToIndex = nextVisibleFromIndex + state.visibleOptionCount
// When wrapping to last, reset viewport to end
const nextVisibleToIndex = state.optionMap.size
const nextVisibleFromIndex = Math.max(
0,
nextVisibleToIndex - state.visibleOptionCount,
)
return {
...state,
focusedValue: previous.value,
focusedValue: lastItem.value,
visibleFromIndex: nextVisibleFromIndex,
visibleToIndex: nextVisibleToIndex,
}

View File

@@ -0,0 +1,152 @@
import React, { useState } from 'react'
import { Box, Text } from '../ink.js'
import { useMainLoopModel } from '../hooks/useMainLoopModel.js'
import { useAppState, useSetAppState } from '../state/AppState.js'
import type { EffortLevel, OpenAIEffortLevel } from '../utils/effort.js'
import {
getAvailableEffortLevels,
getDisplayedEffortLevel,
getEffortLevelDescription,
getEffortLevelLabel,
getEffortValueDescription,
modelSupportsEffort,
modelUsesOpenAIEffort,
standardEffortToOpenAI,
isOpenAIEffortLevel,
} from '../utils/effort.js'
import { getAPIProvider } from '../utils/model/providers.js'
import { getReasoningEffortForModel } from '../services/api/providerConfig.js'
import { Select } from './CustomSelect/select.js'
import { effortLevelToSymbol } from './EffortIndicator.js'
import { KeyboardShortcutHint } from './design-system/KeyboardShortcutHint.js'
import { Byline } from './design-system/Byline.js'
type EffortOption = {
label: React.ReactNode
value: string
description: string
isAvailable: boolean
}
type Props = {
onSelect: (effort: EffortLevel | undefined) => void
onCancel?: () => void
}
export function EffortPicker({ onSelect, onCancel }: Props) {
const model = useMainLoopModel()
const appStateEffort = useAppState((s: any) => s.effortValue)
const setAppState = useSetAppState()
const provider = getAPIProvider()
const usesOpenAIEffort = modelUsesOpenAIEffort(model)
const availableLevels = getAvailableEffortLevels(model)
const currentDisplayedLevel = getDisplayedEffortLevel(model, appStateEffort)
// For OpenAI/Codex, get the model's default reasoning effort
const modelReasoningEffort = usesOpenAIEffort ? getReasoningEffortForModel(model) : undefined
const defaultEffortForModel = modelReasoningEffort || currentDisplayedLevel
const options: EffortOption[] = [
{
label: <EffortOptionLabel level="auto" text="Auto" isCurrent={false} />,
value: 'auto',
description: 'Use the default effort level for your model',
isAvailable: true,
},
...availableLevels.map(level => {
const displayLevel = usesOpenAIEffort
? (level === 'xhigh' ? 'max' : level)
: level
const isCurrent = currentDisplayedLevel === displayLevel
return {
label: (
<EffortOptionLabel
level={level as EffortLevel}
text={getEffortLevelLabel(level as EffortLevel)}
isCurrent={isCurrent}
/>
),
value: level,
description: getEffortLevelDescription(level as EffortLevel),
isAvailable: true,
}
}),
]
function handleSelect(value: string) {
if (value === 'auto') {
setAppState(prev => ({
...prev,
effortValue: undefined,
}))
onSelect(undefined)
} else {
const effortLevel = value as EffortLevel
setAppState(prev => ({
...prev,
effortValue: effortLevel,
}))
onSelect(effortLevel)
}
}
function handleCancel() {
onCancel?.()
}
const supportsEffort = modelSupportsEffort(model)
// For OpenAI/Codex, use the model's default reasoning effort as initial focus
// For Claude, use the displayed effort level or 'auto'
const initialFocus = usesOpenAIEffort
? (modelReasoningEffort || 'auto')
: (appStateEffort ? String(appStateEffort) : 'auto')
return (
<Box flexDirection="column">
<Box marginBottom={1} flexDirection="column">
<Text color="remember" bold={true}>Set effort level</Text>
<Text dimColor={true}>
{usesOpenAIEffort
? `OpenAI/Codex provider (${provider})`
: supportsEffort
? `Claude model · ${provider} provider`
: `Effort not supported for this model`
}
</Text>
</Box>
<Box marginBottom={1}>
<Select
options={options}
defaultValue={initialFocus}
onChange={handleSelect}
onCancel={handleCancel}
visibleOptionCount={Math.min(6, options.length)}
inlineDescriptions={true}
/>
</Box>
<Box marginBottom={1}>
<Text dimColor={true} italic={true}>
<Byline>
<KeyboardShortcutHint shortcut="Enter" action="confirm" />
<KeyboardShortcutHint shortcut="Esc" action="cancel" />
</Byline>
</Text>
</Box>
</Box>
)
}
function EffortOptionLabel({ level, text, isCurrent }: { level: EffortLevel | 'auto', text: string, isCurrent: boolean }) {
const symbol = level === 'auto' ? '⊘' : effortLevelToSymbol(level as EffortLevel)
const color = isCurrent ? 'remember' : level === 'auto' ? 'subtle' : 'suggestion'
return (
<>
<Text color={color}>{symbol} </Text>
<Text bold={isCurrent}>{text}</Text>
{isCurrent && <Text dimColor={true}> (current)</Text>}
</>
)
}

View File

@@ -97,21 +97,45 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
}
if (useOpenAI) {
const model = process.env.OPENAI_MODEL || 'gpt-4o'
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
let name = 'OpenAI'
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(model)) name = 'DeepSeek'
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
else if (/together/i.test(baseUrl)) name = 'Together AI'
else if (/groq/i.test(baseUrl)) name = 'Groq'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(model)) name = 'Mistral'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
else if (/llama/i.test(model)) name = 'Meta Llama'
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
else if (isLocal) name = 'Local'
return { name, model, baseUrl, isLocal }
// Resolve model alias to actual model name + reasoning effort
let displayModel = rawModel
const codexAliases: Record<string, { model: string; reasoningEffort?: string }> = {
codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' },
'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' },
codexspark: { model: 'gpt-5.3-codex-spark' },
'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' },
'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' },
'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' },
'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' },
'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' },
}
const alias = rawModel.toLowerCase()
if (alias in codexAliases) {
const resolved = codexAliases[alias]
displayModel = resolved.model
if (resolved.reasoningEffort) {
displayModel = `${displayModel} (${resolved.reasoningEffort})`
}
}
return { name, model: displayModel, baseUrl, isLocal }
}
// Default: Anthropic