Merge origin/main into codex/provider-profile-recommendations

Preserve provider recommendation workflows while integrating Codex profile support, safer launch isolation, and updated docs/scripts from upstream main.
This commit is contained in:
Vasanthdev2004
2026-04-01 17:33:07 +05:30
21 changed files with 2141 additions and 188 deletions

View File

@@ -6,6 +6,8 @@ export const MODEL_ALIASES = [
'sonnet[1m]',
'opus[1m]',
'opusplan',
'codexplan',
'codexspark',
] as const
export type ModelAlias = (typeof MODEL_ALIASES)[number]

View File

@@ -193,6 +193,11 @@ export function getRuntimeMainLoopModel(params: {
* @returns The default model setting to use
*/
export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
// OpenAI provider: always use the configured OpenAI model
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o'
}
// Ants default to defaultModel from flag config, or Opus 1M if not configured
if (process.env.USER_TYPE === 'ant') {
return (
@@ -318,6 +323,12 @@ export function renderDefaultModelSetting(
if (setting === 'opusplan') {
return 'Opus 4.6 in plan mode, else Sonnet 4.6'
}
if (setting === 'codexplan') {
return 'Codex Plan (GPT-5.4 high reasoning)'
}
if (setting === 'codexspark') {
return 'Codex Spark (GPT-5.3 Codex Spark)'
}
return renderModelName(parseUserSpecifiedModel(setting))
}
@@ -352,6 +363,12 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
if (setting === 'opusplan') {
return 'Opus Plan'
}
if (setting === 'codexplan') {
return 'Codex Plan'
}
if (setting === 'codexspark') {
return 'Codex Spark'
}
if (isModelAlias(setting)) {
return capitalize(setting)
}
@@ -364,7 +381,15 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
* if the model is not recognized as a public model.
*/
export function getPublicModelDisplayName(model: ModelName): string | null {
// For OpenAI provider, show the actual model name (e.g. 'gpt-4o') not a Claude alias
if (getAPIProvider() === 'openai') {
return null
}
switch (model) {
case 'gpt-5.4':
return 'GPT-5.4'
case 'gpt-5.3-codex-spark':
return 'GPT-5.3 Codex Spark'
case getModelStrings().opus46:
return 'Opus 4.6'
case getModelStrings().opus46 + '[1m]':
@@ -472,6 +497,10 @@ export function parseUserSpecifiedModel(
if (isModelAlias(modelString)) {
switch (modelString) {
case 'codexplan':
return modelInputTrimmed
case 'codexspark':
return modelInputTrimmed
case 'opusplan':
return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode
case 'sonnet':

View File

@@ -266,6 +266,22 @@ function getOpusPlanOption(): ModelOption {
}
}
function getCodexPlanOption(): ModelOption {
return {
value: 'codexplan',
label: 'Codex Plan',
description: 'GPT-5.4 on the Codex backend with high reasoning',
}
}
function getCodexSparkOption(): ModelOption {
return {
value: 'codexspark',
label: 'Codex Spark',
description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops',
}
}
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
function getModelOptionsBase(fastMode = false): ModelOption[] {
@@ -344,6 +360,10 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
// PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1
const payg3pOptions = [getDefaultOptionForUser(fastMode)]
if (getAPIProvider() === 'openai') {
payg3pOptions.push(getCodexPlanOption(), getCodexSparkOption())
}
const customSonnet = getCustomSonnetOption()
if (customSonnet !== undefined) {
payg3pOptions.push(customSonnet)
@@ -497,6 +517,10 @@ export function getModelOptions(fastMode = false): ModelOption[] {
return filterModelOptionsByAllowlist(options)
} else if (customModel === 'opusplan') {
return filterModelOptionsByAllowlist([...options, getOpusPlanOption()])
} else if (customModel === 'codexplan') {
return filterModelOptionsByAllowlist([...options, getCodexPlanOption()])
} else if (customModel === 'codexspark') {
return filterModelOptionsByAllowlist([...options, getCodexSparkOption()])
} else if (customModel === 'opus' && getAPIProvider() === 'firstParty') {
return filterModelOptionsByAllowlist([
...options,

View File

@@ -1,36 +1,22 @@
export type ModifierKey = 'shift' | 'command' | 'control' | 'option'
let prewarmed = false
/**
* Pre-warm the native module by loading it in advance.
* Call this early to avoid delay on first use.
*
* NOTE: The `modifiers-napi` package is an Anthropic-internal native addon
* that is not shipped with the open-source build. All calls are no-ops here
* to avoid supply-chain risk from unverified npm packages with the same name.
*/
export function prewarmModifiers(): void {
if (prewarmed || process.platform !== 'darwin') {
return
}
prewarmed = true
// Load module in background
try {
// eslint-disable-next-line @typescript-eslint/no-require-imports
const { prewarm } = require('modifiers-napi') as { prewarm: () => void }
prewarm()
} catch {
// Ignore errors during prewarm
}
// No-op in open-source build — native modifier detection is not available.
}
/**
* Check if a specific modifier key is currently pressed (synchronous).
*
* Always returns false in the open-source build since the native addon
* is not available.
*/
export function isModifierPressed(modifier: ModifierKey): boolean {
if (process.platform !== 'darwin') {
return false
}
// Dynamic import to avoid loading native module at top level
const { isModifierPressed: nativeIsModifierPressed } =
// eslint-disable-next-line @typescript-eslint/no-require-imports
require('modifiers-napi') as { isModifierPressed: (m: string) => boolean }
return nativeIsModifierPressed(modifier)
export function isModifierPressed(_modifier: ModifierKey): boolean {
return false
}

View File

@@ -1,9 +1,14 @@
import assert from 'node:assert/strict'
import { mkdtempSync, rmSync, writeFileSync } from 'node:fs'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import test from 'node:test'
import {
buildCodexProfileEnv,
buildLaunchEnv,
buildOllamaProfileEnv,
buildOpenAIProfileEnv,
selectAutoProfile,
type ProfileFile,
} from './providerProfile.ts'
@@ -16,6 +21,8 @@ function profile(profile: ProfileFile['profile'], env: ProfileFile['env']): Prof
}
}
const missingCodexAuthPath = join(tmpdir(), 'openclaude-missing-codex-auth.json')
test('matching persisted ollama env is reused for ollama launch', async () => {
const env = await buildLaunchEnv({
profile: 'ollama',
@@ -45,6 +52,9 @@ test('ollama launch ignores mismatched persisted openai env and shell model fall
processEnv: {
OPENAI_BASE_URL: 'https://api.deepseek.com/v1',
OPENAI_MODEL: 'gpt-4o-mini',
OPENAI_API_KEY: 'sk-live',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_live',
},
getOllamaChatBaseUrl: () => 'http://localhost:11434/v1',
resolveOllamaDefaultModel: async () => 'qwen2.5-coder:7b',
@@ -52,6 +62,9 @@ test('ollama launch ignores mismatched persisted openai env and shell model fall
assert.equal(env.OPENAI_BASE_URL, 'http://localhost:11434/v1')
assert.equal(env.OPENAI_MODEL, 'qwen2.5-coder:7b')
assert.equal(env.OPENAI_API_KEY, undefined)
assert.equal(env.CODEX_API_KEY, undefined)
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
})
test('openai launch ignores mismatched persisted ollama env', async () => {
@@ -64,6 +77,8 @@ test('openai launch ignores mismatched persisted ollama env', async () => {
goal: 'latency',
processEnv: {
OPENAI_API_KEY: 'sk-live',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_live',
},
getOllamaChatBaseUrl: () => 'http://localhost:11434/v1',
resolveOllamaDefaultModel: async () => 'llama3.1:8b',
@@ -72,6 +87,159 @@ test('openai launch ignores mismatched persisted ollama env', async () => {
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
assert.equal(env.OPENAI_MODEL, 'gpt-4o-mini')
assert.equal(env.OPENAI_API_KEY, 'sk-live')
assert.equal(env.CODEX_API_KEY, undefined)
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
})
test('openai launch ignores codex shell transport hints', async () => {
const env = await buildLaunchEnv({
profile: 'openai',
persisted: null,
goal: 'balanced',
processEnv: {
OPENAI_API_KEY: 'sk-live',
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
assert.equal(env.OPENAI_MODEL, 'gpt-4o')
assert.equal(env.OPENAI_API_KEY, 'sk-live')
})
test('openai launch ignores codex persisted transport hints', async () => {
const env = await buildLaunchEnv({
profile: 'openai',
persisted: profile('openai', {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
OPENAI_API_KEY: 'sk-persisted',
}),
goal: 'balanced',
processEnv: {
OPENAI_API_KEY: 'sk-live',
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
assert.equal(env.OPENAI_MODEL, 'gpt-4o')
assert.equal(env.OPENAI_API_KEY, 'sk-live')
})
test('matching persisted codex env is reused for codex launch', async () => {
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('codex', {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexspark',
CODEX_API_KEY: 'codex-persisted',
CHATGPT_ACCOUNT_ID: 'acct_persisted',
}),
goal: 'balanced',
processEnv: {
CODEX_AUTH_JSON_PATH: missingCodexAuthPath,
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://chatgpt.com/backend-api/codex')
assert.equal(env.OPENAI_MODEL, 'codexspark')
assert.equal(env.CODEX_API_KEY, 'codex-persisted')
assert.equal(env.CHATGPT_ACCOUNT_ID, 'acct_persisted')
})
test('codex launch normalizes poisoned persisted base urls', async () => {
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('codex', {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'codexspark',
CHATGPT_ACCOUNT_ID: 'acct_persisted',
}),
goal: 'balanced',
processEnv: {
CODEX_AUTH_JSON_PATH: missingCodexAuthPath,
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://chatgpt.com/backend-api/codex')
assert.equal(env.OPENAI_MODEL, 'codexspark')
})
test('codex launch ignores mismatched persisted openai env', async () => {
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('openai', {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_API_KEY: 'sk-persisted',
}),
goal: 'balanced',
processEnv: {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o-mini',
OPENAI_API_KEY: 'sk-live',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_live',
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://chatgpt.com/backend-api/codex')
assert.equal(env.OPENAI_MODEL, 'codexplan')
assert.equal(env.OPENAI_API_KEY, undefined)
assert.equal(env.CODEX_API_KEY, 'codex-live')
assert.equal(env.CHATGPT_ACCOUNT_ID, 'acct_live')
})
test('codex launch ignores placeholder codex env keys', async () => {
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('codex', {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexspark',
CODEX_API_KEY: 'codex-persisted',
CHATGPT_ACCOUNT_ID: 'acct_persisted',
}),
goal: 'balanced',
processEnv: {
CODEX_API_KEY: 'SUA_CHAVE',
CODEX_AUTH_JSON_PATH: missingCodexAuthPath,
},
})
assert.equal(env.CODEX_API_KEY, 'codex-persisted')
assert.equal(env.CHATGPT_ACCOUNT_ID, 'acct_persisted')
})
test('codex launch prefers auth account id over stale persisted value', async () => {
const codexHome = mkdtempSync(join(tmpdir(), 'openclaude-codex-'))
try {
writeFileSync(
join(codexHome, 'auth.json'),
JSON.stringify({
access_token: 'codex-live',
account_id: 'acct_auth',
}),
'utf8',
)
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('codex', {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexspark',
CHATGPT_ACCOUNT_ID: 'acct_persisted',
}),
goal: 'balanced',
processEnv: {
CODEX_HOME: codexHome,
},
})
assert.equal(env.CHATGPT_ACCOUNT_ID, 'acct_auth')
} finally {
rmSync(codexHome, { recursive: true, force: true })
}
})
test('ollama profiles never persist openai api keys', () => {
@@ -86,6 +254,53 @@ test('ollama profiles never persist openai api keys', () => {
assert.equal('OPENAI_API_KEY' in env, false)
})
test('codex profiles accept explicit codex credentials', () => {
const env = buildCodexProfileEnv({
model: 'codexspark',
apiKey: 'codex-live',
processEnv: {
CHATGPT_ACCOUNT_ID: 'acct_123',
},
})
assert.deepEqual(env, {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexspark',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_123',
})
})
test('codex profiles require a chatgpt account id', () => {
const env = buildCodexProfileEnv({
model: 'codexspark',
apiKey: 'codex-live',
processEnv: {
CODEX_AUTH_JSON_PATH: missingCodexAuthPath,
},
})
assert.equal(env, null)
})
test('openai profiles ignore codex shell transport hints', () => {
const env = buildOpenAIProfileEnv({
goal: 'balanced',
apiKey: 'sk-live',
processEnv: {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
OPENAI_API_KEY: 'sk-live',
},
})
assert.deepEqual(env, {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_API_KEY: 'sk-live',
})
})
test('auto profile falls back to openai when no viable ollama model exists', () => {
assert.equal(selectAutoProfile(null), 'openai')
assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama')

View File

@@ -1,14 +1,24 @@
import {
DEFAULT_CODEX_BASE_URL,
DEFAULT_OPENAI_BASE_URL,
isCodexBaseUrl,
resolveCodexApiCredentials,
resolveProviderRequest,
} from '../services/api/providerConfig.ts'
import {
getGoalDefaultOpenAIModel,
type RecommendationGoal,
} from './providerRecommendation.ts'
export type ProviderProfile = 'openai' | 'ollama'
export type ProviderProfile = 'openai' | 'ollama' | 'codex'
export type ProfileEnv = {
OPENAI_BASE_URL?: string
OPENAI_MODEL?: string
OPENAI_API_KEY?: string
CODEX_API_KEY?: string
CHATGPT_ACCOUNT_ID?: string
CODEX_ACCOUNT_ID?: string
}
export type ProfileFile = {
@@ -40,6 +50,7 @@ export function buildOllamaProfileEnv(
export function buildOpenAIProfileEnv(options: {
goal: RecommendationGoal
model?: string | null
baseUrl?: string | null
apiKey?: string | null
processEnv?: NodeJS.ProcessEnv
}): ProfileEnv | null {
@@ -49,13 +60,57 @@ export function buildOpenAIProfileEnv(options: {
return null
}
const defaultModel = getGoalDefaultOpenAIModel(options.goal)
const shellOpenAIRequest = resolveProviderRequest({
model: processEnv.OPENAI_MODEL,
baseUrl: processEnv.OPENAI_BASE_URL,
fallbackModel: defaultModel,
})
const useShellOpenAIConfig = shellOpenAIRequest.transport === 'chat_completions'
return {
OPENAI_BASE_URL: processEnv.OPENAI_BASE_URL || 'https://api.openai.com/v1',
OPENAI_MODEL: options.model || getGoalDefaultOpenAIModel(options.goal),
OPENAI_BASE_URL:
options.baseUrl ||
(useShellOpenAIConfig ? processEnv.OPENAI_BASE_URL : undefined) ||
DEFAULT_OPENAI_BASE_URL,
OPENAI_MODEL:
options.model ||
(useShellOpenAIConfig ? processEnv.OPENAI_MODEL : undefined) ||
defaultModel,
OPENAI_API_KEY: key,
}
}
export function buildCodexProfileEnv(options: {
model?: string | null
baseUrl?: string | null
apiKey?: string | null
processEnv?: NodeJS.ProcessEnv
}): ProfileEnv | null {
const processEnv = options.processEnv ?? process.env
const key = sanitizeApiKey(options.apiKey ?? processEnv.CODEX_API_KEY)
const credentialEnv = key
? ({ ...processEnv, CODEX_API_KEY: key } as NodeJS.ProcessEnv)
: processEnv
const credentials = resolveCodexApiCredentials(credentialEnv)
if (!credentials.apiKey || !credentials.accountId) {
return null
}
const env: ProfileEnv = {
OPENAI_BASE_URL: options.baseUrl || DEFAULT_CODEX_BASE_URL,
OPENAI_MODEL: options.model || 'codexplan',
}
if (key) {
env.CODEX_API_KEY = key
}
env.CHATGPT_ACCOUNT_ID = credentials.accountId
return env
}
export function createProfileFile(
profile: ProviderProfile,
env: ProfileEnv,
@@ -103,21 +158,75 @@ export async function buildLaunchEnv(options: {
persistedEnv.OPENAI_MODEL ||
(await resolveOllamaModel(options.goal))
if (!processEnv.OPENAI_API_KEY || processEnv.OPENAI_API_KEY === 'SUA_CHAVE') {
delete env.OPENAI_API_KEY
}
delete env.OPENAI_API_KEY
delete env.CODEX_API_KEY
delete env.CHATGPT_ACCOUNT_ID
delete env.CODEX_ACCOUNT_ID
return env
}
if (options.profile === 'codex') {
env.OPENAI_BASE_URL =
persistedEnv.OPENAI_BASE_URL && isCodexBaseUrl(persistedEnv.OPENAI_BASE_URL)
? persistedEnv.OPENAI_BASE_URL
: DEFAULT_CODEX_BASE_URL
env.OPENAI_MODEL = persistedEnv.OPENAI_MODEL || 'codexplan'
delete env.OPENAI_API_KEY
const codexKey =
sanitizeApiKey(processEnv.CODEX_API_KEY) ||
sanitizeApiKey(persistedEnv.CODEX_API_KEY)
const liveCodexCredentials = resolveCodexApiCredentials(processEnv)
const codexAccountId =
processEnv.CHATGPT_ACCOUNT_ID ||
processEnv.CODEX_ACCOUNT_ID ||
liveCodexCredentials.accountId ||
persistedEnv.CHATGPT_ACCOUNT_ID ||
persistedEnv.CODEX_ACCOUNT_ID
if (codexKey) {
env.CODEX_API_KEY = codexKey
} else {
delete env.CODEX_API_KEY
}
if (codexAccountId) {
env.CHATGPT_ACCOUNT_ID = codexAccountId
} else {
delete env.CHATGPT_ACCOUNT_ID
}
delete env.CODEX_ACCOUNT_ID
return env
}
const defaultOpenAIModel = getGoalDefaultOpenAIModel(options.goal)
const shellOpenAIRequest = resolveProviderRequest({
model: processEnv.OPENAI_MODEL,
baseUrl: processEnv.OPENAI_BASE_URL,
fallbackModel: defaultOpenAIModel,
})
const persistedOpenAIRequest = resolveProviderRequest({
model: persistedEnv.OPENAI_MODEL,
baseUrl: persistedEnv.OPENAI_BASE_URL,
fallbackModel: defaultOpenAIModel,
})
const useShellOpenAIConfig = shellOpenAIRequest.transport === 'chat_completions'
const usePersistedOpenAIConfig =
(!persistedEnv.OPENAI_MODEL && !persistedEnv.OPENAI_BASE_URL) ||
persistedOpenAIRequest.transport === 'chat_completions'
env.OPENAI_BASE_URL =
processEnv.OPENAI_BASE_URL ||
persistedEnv.OPENAI_BASE_URL ||
'https://api.openai.com/v1'
(useShellOpenAIConfig ? processEnv.OPENAI_BASE_URL : undefined) ||
(usePersistedOpenAIConfig ? persistedEnv.OPENAI_BASE_URL : undefined) ||
DEFAULT_OPENAI_BASE_URL
env.OPENAI_MODEL =
processEnv.OPENAI_MODEL ||
persistedEnv.OPENAI_MODEL ||
getGoalDefaultOpenAIModel(options.goal)
(useShellOpenAIConfig ? processEnv.OPENAI_MODEL : undefined) ||
(usePersistedOpenAIConfig ? persistedEnv.OPENAI_MODEL : undefined) ||
defaultOpenAIModel
env.OPENAI_API_KEY = processEnv.OPENAI_API_KEY || persistedEnv.OPENAI_API_KEY
delete env.CODEX_API_KEY
delete env.CHATGPT_ACCOUNT_ID
delete env.CODEX_ACCOUNT_ID
return env
}